commit 5e089a2cd1440989d8bb439cb24d86fc3e4ad36a Author: Connor Date: Thu Apr 30 15:07:27 2020 +0800 init course framework Signed-off-by: Connor Co-authored-by: Nick Cameron Co-authored-by: linning Co-authored-by: YangKeao Co-authored-by: andylokandy Co-authored-by: Iosmanthus Teng diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml new file mode 100644 index 00000000..46160f4c --- /dev/null +++ b/.github/workflows/go.yml @@ -0,0 +1,20 @@ +name: Go +on: [push,pull_request] +jobs: + + build: + name: Build & Test + runs-on: ubuntu-latest + steps: + + - name: Set up Go 1.13 + uses: actions/setup-go@v1 + with: + go-version: 1.13 + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v1 + + - name: Build & Test + run: make ci diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..12a8fd59 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +.idea* +.vscode* +node/node +unikv/unikv +bin/* +_tools \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..97d12e7d --- /dev/null +++ b/Makefile @@ -0,0 +1,65 @@ +SHELL := /bin/bash +PROJECT=tinykv +GOPATH ?= $(shell go env GOPATH) + +# Ensure GOPATH is set before running build process. +ifeq "$(GOPATH)" "" + $(error Please set the environment variable GOPATH before running `make`) +endif + +GO := GO111MODULE=on go +GOBUILD := $(GO) build $(BUILD_FLAG) -tags codes +GOTEST := $(GO) test -p 8 + +TEST_LDFLAGS := "" + +PACKAGE_LIST := go list ./...| grep -vE "cmd" +PACKAGES := $$($(PACKAGE_LIST)) + +# Targets +.PHONY: clean test proto kv scheduler dev + +default: kv scheduler + +dev: default test + +test: + @echo "Running tests in native mode." + @export TZ='Asia/Shanghai'; \ + $(GOTEST) -cover $(PACKAGES) + +CURDIR := $(shell pwd) +export PATH := $(CURDIR)/bin/:$(PATH) +proto: + mkdir -p $(CURDIR)/bin + (cd proto && ./generate_go.sh) + GO111MODULE=on go build ./proto/pkg/... + +kv: + $(GOBUILD) -o bin/tinykv-server kv/main.go + +scheduler: + $(GOBUILD) -o bin/tinyscheduler-server scheduler/main.go + +ci: default test + @echo "Checking formatting" + @test -z "$$(gofmt -s -l $$(find . -name '*.go' -type f -print) | tee /dev/stderr)" + @echo "Running Go vet" + @go vet ./... + +format: + @gofmt -s -w `find . -name '*.go' -type f ! -path '*/_tools/*' -print` + +lab1: + go test -count=1 ./kv/server -run 1 + +lab2: lab2a lab2b lab2c + +lab2a: + go test -count=1 ./raft -run 2A + +lab2b: + go test -count=1 ./kv/test_raftstore -run 2B + +lab2c: + go test -count=1 ./raft ./kv/test_raftstore -run 2C diff --git a/README.md b/README.md new file mode 100644 index 00000000..7de7162d --- /dev/null +++ b/README.md @@ -0,0 +1,49 @@ +# The TinyKV LAB +This is a series of labs on a key-value storage system built with the Raft consensus algorithm. These labs are inspired by the famous [MIT 6.824](http://nil.csail.mit.edu/6.824/2018/index.html) course, but aim to be closer to industry implementations. The whole lab is pruned from [TiKV](github.com/tikv/tikv) and re-written in Go. After completing this course, you will have the knowledge to implement a basic key-value storage service with distributed transactions and fault-tolerance and better understanding of TiKV implementation. + +The whole project is a skeleton code for a kv server and a scheduler server at initial, and you need to finish the core logic step by step: +- LAB1: build a standalone key-value server +- LAB2: build a fault tolerant key-value server with Raft +- LAB3: support multi Raft group and balance scheduling on top of LAB2 +- LAB4: support distributed transaction on top of LAB3 + +**Important note: This course is still in developing, and the document is incomplete.** Any feedback and contribution is greatly appreciated. Please see help wanted issues if you want to join in the development. + +## Build +``` +make +``` + +## Test +``` +make test +``` + +## Run(Not runnable now) + +Put the binary of `tinyscheduler-server`, `tinykv-server` and `tidb-server` into a single dir. + +Under the binary dir, run the following commands: + +``` +mkdir -p data +``` + +``` +./tinyscheduler-server +``` + +``` +./tinykv-server --db-path=data +``` + +``` +./tinysql-server --store=tikv --path="127.0.0.1:2379" +``` + +## Documentation(Incomplete) + +This repo contains a single module: tinykv. Each package is documented either in a doc.go file or, if it is a single +file package, in the single file. + +See [doc.go](doc.go) for top-level documentation. diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..9a0e789c --- /dev/null +++ b/go.mod @@ -0,0 +1,39 @@ +module github.com/pingcap-incubator/tinykv + +require ( + github.com/BurntSushi/toml v0.3.1 + github.com/Connor1996/badger v1.5.1-0.20200306031920-9bbcbd8ba570 + github.com/coocood/badger v1.5.1-0.20191220113928-eaffd0ec7a8c // indirect + github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f + github.com/docker/go-units v0.4.0 + github.com/gogo/protobuf v1.2.1 + github.com/golang/protobuf v1.3.2 + github.com/google/btree v1.0.0 + github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5 + github.com/juju/loggo v0.0.0-20180524022052-584905176618 // indirect + github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073 // indirect + github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 + github.com/opentracing/opentracing-go v1.0.2 + github.com/petar/GoLLRB v0.0.0-20190514000832-33fb24c13b99 + github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 + github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9 + github.com/pingcap/errors v0.11.4 + github.com/pingcap/kvproto v0.0.0-20190821201150-798d27658fae + github.com/pingcap/log v0.0.0-20190307075452-bd41d9273596 + github.com/pingcap/parser v0.0.0-20190903084634-0daf3f706c76 + github.com/pingcap/tidb v1.1.0-beta.0.20190904060835-0872b65ff1f9 + github.com/pkg/errors v0.8.1 + github.com/shirou/gopsutil v2.18.10+incompatible + github.com/sirupsen/logrus v1.2.0 + github.com/stretchr/testify v1.3.0 + go.etcd.io/etcd v0.0.0-20190320044326-77d4b742cdbf + go.uber.org/zap v1.9.1 + golang.org/x/net v0.0.0-20190620200207-3b0461eec859 + golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect + google.golang.org/grpc v1.17.0 + gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 + gopkg.in/stretchr/testify.v1 v1.2.2 // indirect +) + +go 1.13 diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..5f978da8 --- /dev/null +++ b/go.sum @@ -0,0 +1,417 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Connor1996/badger v1.5.0 h1:GIETn+enyBZZEs6SNXCGZafzf4W9bf83Ec/Rtf7+ExU= +github.com/Connor1996/badger v1.5.0/go.mod h1:i5tVv6WOnUfrDvUwyNe70leuRyNIBfTpXX9I8CoxQjQ= +github.com/Connor1996/badger v1.5.1-0.20200220043901-cee19f7bcf4f h1:xNCYfucq8ErQ0obR9SirnxbCXtzwQkORy8KhtKuf0/Q= +github.com/Connor1996/badger v1.5.1-0.20200220043901-cee19f7bcf4f/go.mod h1:eDy3lZfjgEs4EC8pePI7y/Qx509ylx/S94y/dimtkxc= +github.com/Connor1996/badger v1.5.1-0.20200302104252-6a1b02b1cb4c h1:5BlmQypu3/umx1JZw5sE5lv1zdPMRl1NQLUYCvoRgUk= +github.com/Connor1996/badger v1.5.1-0.20200302104252-6a1b02b1cb4c/go.mod h1:eDy3lZfjgEs4EC8pePI7y/Qx509ylx/S94y/dimtkxc= +github.com/Connor1996/badger v1.5.1-0.20200306031920-9bbcbd8ba570 h1:fDz4LQTBQynUkcYDLzVQ8nz4HKoRu4zga7/MGXV/e8s= +github.com/Connor1996/badger v1.5.1-0.20200306031920-9bbcbd8ba570/go.mod h1:eDy3lZfjgEs4EC8pePI7y/Qx509ylx/S94y/dimtkxc= +github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= +github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f h1:5ZfJxyXo8KyX8DgGXC5B7ILL8y51fci/qYz2B4j8iLY= +github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d/go.mod h1:VKt7CNAQxpFpSDz3sXyj9hY/GbVsQCr0sB3w59nE7lU= +github.com/brianvoe/gofakeit v3.18.0+incompatible/go.mod h1:kfwdRA90vvNhPutZWfH7WPaDzUjz+CZFqG+rPkOjGOc= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20171208011716-f6d7a1f6fbf3 h1:T7Bw4H6z3WAZ2khw+gfKdYmbKHyy5xiHtk9IHfZqm7g= +github.com/chzyer/readline v0.0.0-20171208011716-f6d7a1f6fbf3/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coocood/badger v1.5.1-0.20191220113928-eaffd0ec7a8c h1:3LUmJxDuX+9aSQl0Szun1ZkfARHD/2NOgcRd2ncyOZI= +github.com/coocood/badger v1.5.1-0.20191220113928-eaffd0ec7a8c/go.mod h1:nWOxoEl8pfb73mZNB38uYvFfNOykho6REd5J6VerzjM= +github.com/coocood/bbloom v0.0.0-20190830030839-58deb6228d64 h1:W1SHiII3e0jVwvaQFglwu3kS9NLxOeTpvik7MbKCyuQ= +github.com/coocood/bbloom v0.0.0-20190830030839-58deb6228d64/go.mod h1:F86k/6c7aDUdwSUevnLpHS/3Q9hzYCE99jGk2xsHnt0= +github.com/coocood/rtutil v0.0.0-20190304133409-c84515f646f2 h1:NnLfQ77q0G4k2Of2c1ceQ0ec6MkLQyDp+IGdVM0D8XM= +github.com/coocood/rtutil v0.0.0-20190304133409-c84515f646f2/go.mod h1:7qG7YFnOALvsx6tKTNmQot8d7cGFXM9TidzvRFLWYwM= +github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142 h1:3jFq2xL4ZajGK4aZY8jz+DAF0FHjI51BXjjSwCzS1Dk= +github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= +github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= +github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65 h1:hxuZop6tSoOi0sxFzoGGYdRqNrPubyaIf9KoBG9tPiE= +github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger v1.6.0 h1:DshxFxZWXUcO0xX476VJC07Xsr6ZCBVRHKZ93Oh7Evo= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/ristretto v0.0.0-20191010170704-2ba187ef9534 h1:9G6fVccQriMJu4nXwpwLDoy9y31t/KUSLAbPcoBgv+4= +github.com/dgraph-io/ristretto v0.0.0-20191010170704-2ba187ef9534/go.mod h1:edzKIzGvqUCMzhTVWbiTSe75zD9Xxq0GtSBtFmaUTZs= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= +github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-playground/overalls v0.0.0-20180201144345-22ec1a223b7c/go.mod h1:UqxAgEOt89sCiXlrc/ycnx00LVvUO/eS8tMUkWX4R7w= +github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff h1:kOkM9whyQYodu09SJ6W3NCsHG7crFaJILQ22Gozp3lg= +github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.5.1 h1:3scN4iuXkNOyP98jF55Lv8a9j1o/IwvnDIZ0LHJK1nk= +github.com/grpc-ecosystem/grpc-gateway v1.5.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5 h1:rhqTjzJlm7EbkELJDKMTU7udov+Se0xZkWmugr6zGok= +github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/loggo v0.0.0-20180524022052-584905176618 h1:MK144iBQF9hTSwBW/9eJm034bVoG30IshVm688T2hi8= +github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY= +github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= +github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073 h1:WQM1NildKThwdP7qWrNAFGzp4ijNLw8RlgENkaI4MJs= +github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.3 h1:K/VxK7SZ+cvuPgFSLKi5QPI9Vr/ipOf4C1gN+ntueUk= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/montanaflynn/stats v0.0.0-20151014174947-eeaced052adb/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 h1:pmpDGKLw4n82EtrNiLqB+xSz/JQwFOaZuMALYUHwX5s= +github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/myesui/uuid v1.0.0 h1:xCBmH4l5KuvLYc5L7AS7SZg9/jKdIFubM7OVoLqaQUI= +github.com/myesui/uuid v1.0.0/go.mod h1:2CDfNgU0LR8mIdO8vdWd8i9gWWxLlcoIGGpSNgafq84= +github.com/ncw/directio v1.0.4 h1:CojwI07mCEmRkajgx42Pf8jyCwTs1ji9/Ij9/PJG12k= +github.com/ncw/directio v1.0.4/go.mod h1:CKGdcN7StAaqjT7Qack3lAXeX4pjnyc46YeqZH1yWVY= +github.com/ngaut/log v0.0.0-20180314031856-b8e36e7ba5ac h1:wyheT2lPXRQqYPWY2IVW5BTLrbqCsnhL61zK2R5goLA= +github.com/ngaut/log v0.0.0-20180314031856-b8e36e7ba5ac/go.mod h1:ueVCjKQllPmX7uEvCYnZD5b8qjidGf1TCH61arVe4SU= +github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c= +github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI= +github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k= +github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8= +github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/petar/GoLLRB v0.0.0-20190514000832-33fb24c13b99 h1:KcEvVBAvyHkUdFAygKAzwB6LAcZ6LS32WHmRD2VyXMI= +github.com/petar/GoLLRB v0.0.0-20190514000832-33fb24c13b99/go.mod h1:HUpKUBZnpzkdx0kD/+Yfuft+uD3zHGtXF/XJB14TUr4= +github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg= +github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= +github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9 h1:KH4f4Si9XK6/IW50HtoaiLIFHGkapOM6w83za47UYik= +github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= +github.com/pingcap/errors v0.10.1/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/errors v0.11.0 h1:DCJQB8jrHbQ1VVlMFIrbj2ApScNNotVmkSNplu2yUt4= +github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c h1:hvQd3aOLKLF7xvRV6DzvPkKY4QXzfVbjU1BhW0d9yL8= +github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= +github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8= +github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= +github.com/pingcap/kvproto v0.0.0-20190516013202-4cf58ad90b6c/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY= +github.com/pingcap/kvproto v0.0.0-20190821201150-798d27658fae h1:WR4d5ga8zXT+QDWYFzzyA+PJMMszR0kQxyYMh6dvHPg= +github.com/pingcap/kvproto v0.0.0-20190821201150-798d27658fae/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY= +github.com/pingcap/log v0.0.0-20190214045112-b37da76f67a7/go.mod h1:xsfkWVaFVV5B8e1K9seWfyJWFrIhbtUTAD8NV1Pq3+w= +github.com/pingcap/log v0.0.0-20190307075452-bd41d9273596 h1:t2OQTpPJnrPDGlvA+3FwJptMTt6MEPdzK1Wt99oaefQ= +github.com/pingcap/log v0.0.0-20190307075452-bd41d9273596/go.mod h1:WpHUKhNZ18v116SvGrmjkA9CBhYmuUTKL+p8JC9ANEw= +github.com/pingcap/parser v0.0.0-20190903084634-0daf3f706c76 h1:q8d5NIRT/Urmb5woYWhlrMER8nDV33tjyvJMqODI2Rk= +github.com/pingcap/parser v0.0.0-20190903084634-0daf3f706c76/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= +github.com/pingcap/pd v0.0.0-20190712044914-75a1f9f3062b/go.mod h1:3DlDlFT7EF64A1bmb/tulZb6wbPSagm5G4p1AlhaEDs= +github.com/pingcap/tidb v1.1.0-beta.0.20190904060835-0872b65ff1f9 h1:Fg4wHf1wd50v0RR+GPIIGrcnCVI/LSXKUYoB5ON1t6k= +github.com/pingcap/tidb v1.1.0-beta.0.20190904060835-0872b65ff1f9/go.mod h1:vLe4ZQRrNZ98B0W6BMZJ2MFlGuLNhMO0gYLL7o7QHiE= +github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible h1:MkWCxgZpJBgY2f4HtwWMMFzSBb3+JPzeJgF3VrXE/bU= +github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= +github.com/pingcap/tipb v0.0.0-20190806070524-16909e03435e h1:H7meq8QPmWGImOkHTQYAWw82zwIqndJaCDPVUknOHbM= +github.com/pingcap/tipb v0.0.0-20190806070524-16909e03435e/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.0 h1:tXuTFVHC03mW0D+Ua1Q2d1EAVqLTuggX50V0VLICCzY= +github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39 h1:Cto4X6SVMWRPBkJ/3YHn1iDGDGc/Z+sW+AEMKHMVvN4= +github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7 h1:FUL3b97ZY2EPqg2NbXKuMHs5pXJB9hjj1fDHnF2vl28= +github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shirou/gopsutil v2.18.10+incompatible h1:cy84jW6EVRPa5g9HAHrlbxMSIjBhDSX0OFYyMYminYs= +github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/struCoder/pidusage v0.1.2/go.mod h1:pWBlW3YuSwRl6h7R5KbvA4N8oOqe9LjaKW5CwT1SPjI= +github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d h1:4J9HCZVpvDmj2tiKGSTUnb3Ok/9CEQb9oqu9LHKQQpc= +github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= +github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6 h1:lYIiVDtZnyTWlNwiAxLj0bbpTcx1BWCFhXjfsvmPdNc= +github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/twinj/uuid v1.0.0 h1:fzz7COZnDrXGTAOHGuUGYd6sG+JMq+AoE7+Jlu0przk= +github.com/twinj/uuid v1.0.0/go.mod h1:mMgcE1RHFUFqe5AfiwlINXisXfDGro23fWdPUfOMjRY= +github.com/uber-go/atomic v1.3.2 h1:Azu9lPBWRNKzYXSIwRfgRuDuS0YKsK4NFhiQv98gkxo= +github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= +github.com/uber/jaeger-client-go v2.15.0+incompatible h1:NP3qsSqNxh8VYr956ur1N/1C1PjvOJnJykCzcD5QHbk= +github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v1.5.0 h1:OHbgr8l656Ub3Fw5k9SWnBfIEwvoHQ+W2y+Aa9D1Uyo= +github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.2 h1:JON3E2/GPW2iDNGoSAusl1KDf5TRQ8k8q7Tp097pZGs= +github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 h1:3SVOIvH7Ae1KRYyQWRjXWJEA9sS/c/pjvH++55Gr648= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43 h1:BasDe+IErOQKrMVXab7UayvSlIpiyGwRvuX3EKYY7UA= +github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= +github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= +github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d h1:ggUgChAeyge4NZ4QUw6lhHsVymzwSDJOZcE0s2X8S20= +github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/negroni v0.3.0 h1:PaXOb61mWeZJxc1Ji2xJjpVg9QfPo0rrB+lHyBxGNSU= +github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20190320044326-77d4b742cdbf h1:rmttwKPEgG/l4UscTDYtaJgeUsedKPKSyFfNQLI6q+I= +go.etcd.io/etcd v0.0.0-20190320044326-77d4b742cdbf/go.mod h1:KSGwdbiFchh5KIC9My2+ZVl5/3ANcwohw50dpPwa2cw= +go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190109145017-48ac38b7c8cb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52 h1:JG/0uqcGdTNgq7FdU+61l5Pdmb8putNZlXb65bJBROs= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190130214255-bb1329dc71a0 h1:iRpjPej1fPzmfoBhMFkp3HdqzF+ytPmAwiQhJGV0zGw= +golang.org/x/tools v0.0.0-20190130214255-bb1329dc71a0/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190108161440-ae2f86662275 h1:9oFlwfEGIvmxXTcY53ygNyxIQtWciRHjrnUvZJCYXYU= +google.golang.org/genproto v0.0.0-20190108161440-ae2f86662275/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0 h1:dz5IJGuC2BB7qXR5AyHNwAUBhZscK2xVez7mznh72sY= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo= +gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/stretchr/testify.v1 v1.2.2 h1:yhQC6Uy5CqibAIlk1wlusa/MJ3iAN49/BsR/dCCKz3M= +gopkg.in/stretchr/testify.v1 v1.2.2/go.mod h1:QI5V/q6UbPmuhtm10CaFZxED9NreB8PnFYN9JcR6TxU= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4 h1:VO9oZbbkvTwqLimlQt15QNdOOBArT2dw/bvzsMZBiqQ= +sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k= diff --git a/kv/config/config.go b/kv/config/config.go new file mode 100644 index 00000000..572c3869 --- /dev/null +++ b/kv/config/config.go @@ -0,0 +1,99 @@ +package config + +import ( + "fmt" + "time" + + "github.com/pingcap-incubator/tinykv/log" +) + +type Config struct { + StoreAddr string + Raft bool + SchedulerAddr string + LogLevel string + + DBPath string // Directory to store the data in. Should exist and be writable. + + // raft_base_tick_interval is a base tick interval (ms). + RaftBaseTickInterval time.Duration + RaftHeartbeatTicks int + RaftElectionTimeoutTicks int + + // Interval to gc unnecessary raft log (ms). + RaftLogGCTickInterval time.Duration + // When entry count exceed this value, gc will be forced trigger. + RaftLogGcCountLimit uint64 + + // Interval (ms) to check region whether need to be split or not. + SplitRegionCheckTickInterval time.Duration + // delay time before deleting a stale peer + SchedulerHeartbeatTickInterval time.Duration + SchedulerStoreHeartbeatTickInterval time.Duration + + // When region [a,e) size meets regionMaxSize, it will be split into + // several regions [a,b), [b,c), [c,d), [d,e). And the size of [a,b), + // [b,c), [c,d) will be regionSplitSize (maybe a little larger). + RegionMaxSize uint64 + RegionSplitSize uint64 +} + +func (c *Config) Validate() error { + if c.RaftHeartbeatTicks == 0 { + return fmt.Errorf("heartbeat tick must greater than 0") + } + + if c.RaftElectionTimeoutTicks != 10 { + log.Warnf("Election timeout ticks needs to be same across all the cluster, " + + "otherwise it may lead to inconsistency.") + } + + if c.RaftElectionTimeoutTicks <= c.RaftHeartbeatTicks { + return fmt.Errorf("election tick must be greater than heartbeat tick.") + } + + return nil +} + +const ( + KB uint64 = 1024 + MB uint64 = 1024 * 1024 +) + +func NewDefaultConfig() *Config { + return &Config{ + SchedulerAddr: "127.0.0.1:2379", + StoreAddr: "127.0.0.1:20160", + LogLevel: "info", + RaftBaseTickInterval: 1 * time.Second, + RaftHeartbeatTicks: 2, + RaftElectionTimeoutTicks: 10, + RaftLogGCTickInterval: 10 * time.Second, + // Assume the average size of entries is 1k. + RaftLogGcCountLimit: 128000, + SplitRegionCheckTickInterval: 10 * time.Second, + SchedulerHeartbeatTickInterval: 100 * time.Millisecond, + SchedulerStoreHeartbeatTickInterval: 10 * time.Second, + RegionMaxSize: 144 * MB, + RegionSplitSize: 96 * MB, + DBPath: "/tmp/badger", + } +} + +func NewTestConfig() *Config { + return &Config{ + LogLevel: "info", + RaftBaseTickInterval: 10 * time.Millisecond, + RaftHeartbeatTicks: 2, + RaftElectionTimeoutTicks: 10, + RaftLogGCTickInterval: 50 * time.Millisecond, + // Assume the average size of entries is 1k. + RaftLogGcCountLimit: 128000, + SplitRegionCheckTickInterval: 100 * time.Millisecond, + SchedulerHeartbeatTickInterval: 100 * time.Millisecond, + SchedulerStoreHeartbeatTickInterval: 500 * time.Millisecond, + RegionMaxSize: 144 * MB, + RegionSplitSize: 96 * MB, + DBPath: "/tmp/badger", + } +} diff --git a/kv/coprocessor/rowcodec/common.go b/kv/coprocessor/rowcodec/common.go new file mode 100644 index 00000000..53f91a4b --- /dev/null +++ b/kv/coprocessor/rowcodec/common.go @@ -0,0 +1,231 @@ +package rowcodec + +import ( + "encoding/binary" + "fmt" + "reflect" + "strings" + "unsafe" + + "github.com/juju/errors" +) + +// CodecVer is the constant number that represent the new row format. +const CodecVer = 128 + +var invalidCodecVer = errors.New("invalid codec version") + +// First byte in the encoded value which specifies the encoding type. +const ( + NilFlag byte = 0 + BytesFlag byte = 1 + CompactBytesFlag byte = 2 + IntFlag byte = 3 + UintFlag byte = 4 + VarintFlag byte = 8 + VaruintFlag byte = 9 +) + +// row is the struct type used to access the a row. +type row struct { + // small: colID []byte, offsets []uint16, optimized for most cases. + // large: colID []uint32, offsets []uint32. + large bool + numNotNullCols uint16 + numNullCols uint16 + colIDs []byte + + // valFlags is used for converting new row format to old row format. + // It can be removed once TiDB implemented the new row format. + valFlags []byte + offsets []uint16 + data []byte + + // for large row + colIDs32 []uint32 + offsets32 []uint32 +} + +// String implements the strings.Stringer interface. +func (r row) String() string { + var colValStrs []string + for i := 0; i < int(r.numNotNullCols); i++ { + var colID, offStart, offEnd int64 + if r.large { + colID = int64(r.colIDs32[i]) + if i != 0 { + offStart = int64(r.offsets32[i-1]) + } + offEnd = int64(r.offsets32[i]) + } else { + colID = int64(r.colIDs[i]) + if i != 0 { + offStart = int64(r.offsets[i-1]) + } + offEnd = int64(r.offsets[i]) + } + colValData := r.data[offStart:offEnd] + valFlag := r.valFlags[i] + var colValStr string + if valFlag == BytesFlag { + colValStr = fmt.Sprintf("(%d:'%s')", colID, colValData) + } else { + colValStr = fmt.Sprintf("(%d:%d)", colID, colValData) + } + colValStrs = append(colValStrs, colValStr) + } + return strings.Join(colValStrs, ",") +} + +func (r *row) getData(i int) []byte { + var start, end uint32 + if r.large { + if i > 0 { + start = r.offsets32[i-1] + } + end = r.offsets32[i] + } else { + if i > 0 { + start = uint32(r.offsets[i-1]) + } + end = uint32(r.offsets[i]) + } + return r.data[start:end] +} + +func (r *row) setRowData(rowData []byte) error { + if rowData[0] != CodecVer { + return invalidCodecVer + } + r.large = rowData[1]&1 > 0 + r.numNotNullCols = binary.LittleEndian.Uint16(rowData[2:]) + r.numNullCols = binary.LittleEndian.Uint16(rowData[4:]) + cursor := 6 + r.valFlags = rowData[cursor : cursor+int(r.numNotNullCols)] + cursor += int(r.numNotNullCols) + if r.large { + colIDsLen := int(r.numNotNullCols+r.numNullCols) * 4 + r.colIDs32 = bytesToU32Slice(rowData[cursor : cursor+colIDsLen]) + cursor += colIDsLen + offsetsLen := int(r.numNotNullCols) * 4 + r.offsets32 = bytesToU32Slice(rowData[cursor : cursor+offsetsLen]) + cursor += offsetsLen + } else { + colIDsLen := int(r.numNotNullCols + r.numNullCols) + r.colIDs = rowData[cursor : cursor+colIDsLen] + cursor += colIDsLen + offsetsLen := int(r.numNotNullCols) * 2 + r.offsets = bytes2U16Slice(rowData[cursor : cursor+offsetsLen]) + cursor += offsetsLen + } + r.data = rowData[cursor:] + return nil +} + +func bytesToU32Slice(b []byte) []uint32 { + if len(b) == 0 { + return nil + } + var u32s []uint32 + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&u32s)) + hdr.Len = len(b) / 4 + hdr.Cap = hdr.Len + hdr.Data = uintptr(unsafe.Pointer(&b[0])) + return u32s +} + +func bytes2U16Slice(b []byte) []uint16 { + if len(b) == 0 { + return nil + } + var u16s []uint16 + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&u16s)) + hdr.Len = len(b) / 2 + hdr.Cap = hdr.Len + hdr.Data = uintptr(unsafe.Pointer(&b[0])) + return u16s +} + +func u16SliceToBytes(u16s []uint16) []byte { + if len(u16s) == 0 { + return nil + } + var b []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + hdr.Len = len(u16s) * 2 + hdr.Cap = hdr.Len + hdr.Data = uintptr(unsafe.Pointer(&u16s[0])) + return b +} + +func u32SliceToBytes(u32s []uint32) []byte { + if len(u32s) == 0 { + return nil + } + var b []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + hdr.Len = len(u32s) * 4 + hdr.Cap = hdr.Len + hdr.Data = uintptr(unsafe.Pointer(&u32s[0])) + return b +} + +func encodeInt(buf []byte, iVal int64) []byte { + var tmp [8]byte + if int64(int8(iVal)) == iVal { + buf = append(buf, byte(iVal)) + } else if int64(int16(iVal)) == iVal { + binary.LittleEndian.PutUint16(tmp[:], uint16(iVal)) + buf = append(buf, tmp[:2]...) + } else if int64(int32(iVal)) == iVal { + binary.LittleEndian.PutUint32(tmp[:], uint32(iVal)) + buf = append(buf, tmp[:4]...) + } else { + binary.LittleEndian.PutUint64(tmp[:], uint64(iVal)) + buf = append(buf, tmp[:8]...) + } + return buf +} + +func decodeInt(val []byte) int64 { + switch len(val) { + case 1: + return int64(int8(val[0])) + case 2: + return int64(int16(binary.LittleEndian.Uint16(val))) + case 4: + return int64(int32(binary.LittleEndian.Uint32(val))) + default: + return int64(binary.LittleEndian.Uint64(val)) + } +} + +func encodeUint(buf []byte, uVal uint64) []byte { + var tmp [8]byte + if uint64(uint8(uVal)) == uVal { + buf = append(buf, byte(uVal)) + } else if uint64(uint16(uVal)) == uVal { + binary.LittleEndian.PutUint16(tmp[:], uint16(uVal)) + buf = append(buf, tmp[:2]...) + } else if uint64(uint32(uVal)) == uVal { + binary.LittleEndian.PutUint32(tmp[:], uint32(uVal)) + buf = append(buf, tmp[:4]...) + } else { + binary.LittleEndian.PutUint64(tmp[:], uint64(uVal)) + buf = append(buf, tmp[:8]...) + } + return buf +} + +func decodeUint(val []byte) uint64 { + switch len(val) { + case 1: + return uint64(val[0]) + case 2: + return uint64(binary.LittleEndian.Uint16(val)) + case 4: + return uint64(binary.LittleEndian.Uint32(val)) + default: + return binary.LittleEndian.Uint64(val) + } +} diff --git a/kv/coprocessor/rowcodec/decoder.go b/kv/coprocessor/rowcodec/decoder.go new file mode 100644 index 00000000..12df0e1d --- /dev/null +++ b/kv/coprocessor/rowcodec/decoder.go @@ -0,0 +1,254 @@ +package rowcodec + +import ( + "math" + "time" + + "github.com/juju/errors" + "github.com/pingcap/parser/mysql" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/types/json" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" +) + +// Decoder decodes the row to chunk.Chunk. +type Decoder struct { + row + requestColIDs []int64 + handleColID int64 + requestTypes []*types.FieldType + origDefaults [][]byte + loc *time.Location +} + +// NewDecoder creates a NewDecoder. +// requestColIDs is the columnIDs to decode. tps is the field types for request columns. +// origDefault is the original default value in old format, if the column ID is not found in the row, +// the origDefault will be used. +func NewDecoder(requestColIDs []int64, handleColID int64, tps []*types.FieldType, origDefaults [][]byte, + loc *time.Location) (*Decoder, error) { + xOrigDefaultVals := make([][]byte, len(origDefaults)) + for i := 0; i < len(origDefaults); i++ { + if len(origDefaults[i]) == 0 { + continue + } + xDefaultVal, err := convertDefaultValue(origDefaults[i]) + if err != nil { + return nil, err + } + xOrigDefaultVals[i] = xDefaultVal + } + return &Decoder{ + requestColIDs: requestColIDs, + handleColID: handleColID, + requestTypes: tps, + origDefaults: xOrigDefaultVals, + loc: loc, + }, nil +} + +func convertDefaultValue(defaultVal []byte) (colVal []byte, err error) { + var d types.Datum + _, d, err = codec.DecodeOne(defaultVal) + if err != nil { + return + } + switch d.Kind() { + case types.KindNull: + return nil, nil + case types.KindInt64: + return encodeInt(nil, d.GetInt64()), nil + case types.KindUint64: + return encodeUint(nil, d.GetUint64()), nil + case types.KindString, types.KindBytes: + return d.GetBytes(), nil + case types.KindFloat32: + return encodeUint(nil, uint64(math.Float32bits(d.GetFloat32()))), nil + case types.KindFloat64: + return encodeUint(nil, math.Float64bits(d.GetFloat64())), nil + default: + return defaultVal[1:], nil + } +} + +// Decode decodes a row to chunk. +func (decoder *Decoder) Decode(rowData []byte, handle int64, chk *chunk.Chunk) error { + err := decoder.setRowData(rowData) + if err != nil { + return err + } + for colIdx, colID := range decoder.requestColIDs { + if colID == decoder.handleColID { + chk.AppendInt64(colIdx, handle) + continue + } + // Search the column in not-null columns array. + i, j := 0, int(decoder.numNotNullCols) + var found bool + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + var v int64 + if decoder.large { + v = int64(decoder.colIDs32[h]) + } else { + v = int64(decoder.colIDs[h]) + } + if v < colID { + i = h + 1 + } else if v > colID { + j = h + } else { + found = true + colData := decoder.getData(h) + err := decoder.decodeColData(colIdx, colData, chk) + if err != nil { + return err + } + break + } + } + if found { + continue + } + defaultVal := decoder.origDefaults[colIdx] + if decoder.isNull(colID, defaultVal) { + chk.AppendNull(colIdx) + } else { + err := decoder.decodeColData(colIdx, defaultVal, chk) + if err != nil { + return err + } + } + } + return nil +} + +// ColumnIsNull returns if the column value is null. Mainly used for count column aggregation. +func (decoder *Decoder) ColumnIsNull(rowData []byte, colID int64, defaultVal []byte) (bool, error) { + err := decoder.setRowData(rowData) + if err != nil { + return false, err + } + // Search the column in not-null columns array. + i, j := 0, int(decoder.numNotNullCols) + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + var v int64 + if decoder.large { + v = int64(decoder.colIDs32[h]) + } else { + v = int64(decoder.colIDs[h]) + } + if v < colID { + i = h + 1 + } else if v > colID { + j = h + } else { + return false, nil + } + } + return decoder.isNull(colID, defaultVal), nil +} + +func (decoder *Decoder) isNull(colID int64, defaultVal []byte) bool { + // Search the column in null columns array. + i, j := int(decoder.numNotNullCols), int(decoder.numNotNullCols+decoder.numNullCols) + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + var v int64 + if decoder.large { + v = int64(decoder.colIDs32[h]) + } else { + v = int64(decoder.colIDs[h]) + } + if v < colID { + i = h + 1 + } else if v > colID { + j = h + } else { + return true + } + } + return defaultVal == nil +} + +func (decoder *Decoder) decodeColData(colIdx int, colData []byte, chk *chunk.Chunk) error { + ft := decoder.requestTypes[colIdx] + switch ft.Tp { + case mysql.TypeLonglong, mysql.TypeLong, mysql.TypeInt24, mysql.TypeShort, mysql.TypeTiny, mysql.TypeYear: + if mysql.HasUnsignedFlag(ft.Flag) { + chk.AppendUint64(colIdx, decodeUint(colData)) + } else { + chk.AppendInt64(colIdx, decodeInt(colData)) + } + case mysql.TypeFloat: + _, fVal, err := codec.DecodeFloat(colData) + if err != nil { + return err + } + chk.AppendFloat32(colIdx, float32(fVal)) + case mysql.TypeDouble: + _, fVal, err := codec.DecodeFloat(colData) + if err != nil { + return err + } + chk.AppendFloat64(colIdx, fVal) + case mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeString, + mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: + chk.AppendBytes(colIdx, colData) + case mysql.TypeNewDecimal: + _, dec, _, _, err := codec.DecodeDecimal(colData) + if err != nil { + return err + } + chk.AppendMyDecimal(colIdx, dec) + case mysql.TypeDate, mysql.TypeDatetime, mysql.TypeTimestamp: + var t types.Time + t.Type = ft.Tp + t.Fsp = int8(ft.Decimal) + err := t.FromPackedUint(decodeUint(colData)) + if err != nil { + return err + } + if ft.Tp == mysql.TypeTimestamp && !t.IsZero() { + err = t.ConvertTimeZone(time.UTC, decoder.loc) + if err != nil { + return err + } + } + chk.AppendTime(colIdx, t) + case mysql.TypeDuration: + var dur types.Duration + dur.Duration = time.Duration(decodeInt(colData)) + dur.Fsp = int8(ft.Decimal) + chk.AppendDuration(colIdx, dur) + case mysql.TypeEnum: + // ignore error deliberately, to read empty enum value. + enum, err := types.ParseEnumValue(ft.Elems, decodeUint(colData)) + if err != nil { + enum = types.Enum{} + } + chk.AppendEnum(colIdx, enum) + case mysql.TypeSet: + set, err := types.ParseSetValue(ft.Elems, decodeUint(colData)) + if err != nil { + return err + } + chk.AppendSet(colIdx, set) + case mysql.TypeBit: + byteSize := (ft.Flen + 7) >> 3 + chk.AppendBytes(colIdx, types.NewBinaryLiteralFromUint(decodeUint(colData), byteSize)) + case mysql.TypeJSON: + var j json.BinaryJSON + j.TypeCode = colData[0] + j.Value = colData[1:] + chk.AppendJSON(colIdx, j) + default: + return errors.Errorf("unknown type %d", ft.Tp) + } + return nil +} diff --git a/kv/coprocessor/rowcodec/encoder.go b/kv/coprocessor/rowcodec/encoder.go new file mode 100644 index 00000000..38e04aa5 --- /dev/null +++ b/kv/coprocessor/rowcodec/encoder.go @@ -0,0 +1,368 @@ +package rowcodec + +import ( + "math" + "sort" + "time" + + "github.com/juju/errors" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/codec" +) + +// Encoder is used to encode a row. +type Encoder struct { + row + tempColIDs []int64 + values []types.Datum + tempData []byte +} + +func (encoder *Encoder) reset() { + encoder.large = false + encoder.numNotNullCols = 0 + encoder.numNullCols = 0 + encoder.data = encoder.data[:0] + encoder.tempColIDs = encoder.tempColIDs[:0] + encoder.values = encoder.values[:0] +} + +func (encoder *Encoder) addColumn(colID int64, d types.Datum) { + if colID > 255 { + encoder.large = true + } + if d.IsNull() { + encoder.numNullCols++ + } else { + encoder.numNotNullCols++ + } + encoder.tempColIDs = append(encoder.tempColIDs, colID) + encoder.values = append(encoder.values, d) +} + +// Encode encodes a row from a datums slice. +func (encoder *Encoder) Encode(colIDs []int64, values []types.Datum, buf []byte) ([]byte, error) { + encoder.reset() + for i, colID := range colIDs { + encoder.addColumn(colID, values[i]) + } + return encoder.build(buf[:0]) +} + +// EncodeFromOldRow encodes a row from an old-format row. +func (encoder *Encoder) EncodeFromOldRow(oldRow, buf []byte) ([]byte, error) { + encoder.reset() + for len(oldRow) > 1 { + var d types.Datum + var err error + oldRow, d, err = codec.DecodeOne(oldRow) + if err != nil { + return nil, err + } + colID := d.GetInt64() + oldRow, d, err = codec.DecodeOne(oldRow) + if err != nil { + return nil, err + } + encoder.addColumn(colID, d) + } + return encoder.build(buf[:0]) +} + +func (encoder *Encoder) build(buf []byte) ([]byte, error) { + r := &encoder.row + // Separate null and not-null column IDs. + numCols := len(encoder.tempColIDs) + nullIdx := numCols - int(r.numNullCols) + notNullIdx := 0 + if r.large { + encoder.initColIDs32() + encoder.initOffsets32() + } else { + encoder.initColIDs() + encoder.initOffsets() + } + for i, colID := range encoder.tempColIDs { + if encoder.values[i].IsNull() { + if r.large { + r.colIDs32[nullIdx] = uint32(colID) + } else { + r.colIDs[nullIdx] = byte(colID) + } + nullIdx++ + } else { + if r.large { + r.colIDs32[notNullIdx] = uint32(colID) + } else { + r.colIDs[notNullIdx] = byte(colID) + } + encoder.values[notNullIdx] = encoder.values[i] + notNullIdx++ + } + } + if r.large { + largeNotNullSorter := (*largeNotNullSorter)(encoder) + sort.Sort(largeNotNullSorter) + if r.numNullCols > 0 { + largeNullSorter := (*largeNullSorter)(encoder) + sort.Sort(largeNullSorter) + } + } else { + smallNotNullSorter := (*smallNotNullSorter)(encoder) + sort.Sort(smallNotNullSorter) + if r.numNullCols > 0 { + smallNullSorter := (*smallNullSorter)(encoder) + sort.Sort(smallNullSorter) + } + } + encoder.initValFlags() + for i := 0; i < notNullIdx; i++ { + d := encoder.values[i] + switch d.Kind() { + case types.KindInt64: + r.valFlags[i] = IntFlag + r.data = encodeInt(r.data, d.GetInt64()) + case types.KindUint64: + r.valFlags[i] = UintFlag + r.data = encodeUint(r.data, d.GetUint64()) + case types.KindString, types.KindBytes: + r.valFlags[i] = BytesFlag + r.data = append(r.data, d.GetBytes()...) + default: + var err error + encoder.tempData, err = codec.EncodeValue(defaultStmtCtx, encoder.tempData[:0], d) + if err != nil { + return nil, errors.Trace(err) + } + r.valFlags[i] = encoder.tempData[0] + r.data = append(r.data, encoder.tempData[1:]...) + } + if len(r.data) > math.MaxUint16 && !r.large { + // We need to convert the row to large row. + encoder.initColIDs32() + for j := 0; j < numCols; j++ { + r.colIDs32[j] = uint32(r.colIDs[j]) + } + encoder.initOffsets32() + for j := 0; j <= i; j++ { + r.offsets32[j] = uint32(r.offsets[j]) + } + r.large = true + } + if r.large { + r.offsets32[i] = uint32(len(r.data)) + } else { + r.offsets[i] = uint16(len(r.data)) + } + } + if !r.large { + if len(r.data) >= math.MaxUint16 { + r.large = true + encoder.initColIDs32() + for i, val := range r.colIDs { + r.colIDs32[i] = uint32(val) + } + } else { + encoder.initOffsets() + for i, val := range r.offsets32 { + r.offsets[i] = uint16(val) + } + } + } + buf = append(buf, CodecVer) + flag := byte(0) + if r.large { + flag = 1 + } + buf = append(buf, flag) + buf = append(buf, byte(r.numNotNullCols), byte(r.numNotNullCols>>8)) + buf = append(buf, byte(r.numNullCols), byte(r.numNullCols>>8)) + buf = append(buf, r.valFlags...) + if r.large { + buf = append(buf, u32SliceToBytes(r.colIDs32)...) + buf = append(buf, u32SliceToBytes(r.offsets32)...) + } else { + buf = append(buf, r.colIDs...) + buf = append(buf, u16SliceToBytes(r.offsets)...) + } + buf = append(buf, r.data...) + return buf, nil +} + +func (encoder *Encoder) initValFlags() { + if cap(encoder.valFlags) >= int(encoder.numNotNullCols) { + encoder.valFlags = encoder.valFlags[:encoder.numNotNullCols] + } else { + encoder.valFlags = make([]byte, encoder.numNotNullCols) + } +} + +func (encoder *Encoder) initColIDs() { + numCols := int(encoder.numNotNullCols + encoder.numNullCols) + if cap(encoder.colIDs) >= numCols { + encoder.colIDs = encoder.colIDs[:numCols] + } else { + encoder.colIDs = make([]byte, numCols) + } +} + +func (encoder *Encoder) initColIDs32() { + numCols := int(encoder.numNotNullCols + encoder.numNullCols) + if cap(encoder.colIDs32) >= numCols { + encoder.colIDs32 = encoder.colIDs32[:numCols] + } else { + encoder.colIDs32 = make([]uint32, numCols) + } +} + +func (encoder *Encoder) initOffsets() { + if cap(encoder.offsets) >= int(encoder.numNotNullCols) { + encoder.offsets = encoder.offsets[:encoder.numNotNullCols] + } else { + encoder.offsets = make([]uint16, encoder.numNotNullCols) + } +} + +func (encoder *Encoder) initOffsets32() { + if cap(encoder.offsets32) >= int(encoder.numNotNullCols) { + encoder.offsets32 = encoder.offsets32[:encoder.numNotNullCols] + } else { + encoder.offsets32 = make([]uint32, encoder.numNotNullCols) + } +} + +type largeNotNullSorter Encoder + +func (s *largeNotNullSorter) Less(i, j int) bool { + return s.colIDs32[i] < s.colIDs32[j] +} + +func (s *largeNotNullSorter) Len() int { + return int(s.numNotNullCols) +} + +func (s *largeNotNullSorter) Swap(i, j int) { + s.colIDs32[i], s.colIDs32[j] = s.colIDs32[j], s.colIDs32[i] + s.values[i], s.values[j] = s.values[j], s.values[i] +} + +type smallNotNullSorter Encoder + +func (s *smallNotNullSorter) Less(i, j int) bool { + return s.colIDs[i] < s.colIDs[j] +} + +func (s *smallNotNullSorter) Len() int { + return int(s.numNotNullCols) +} + +func (s *smallNotNullSorter) Swap(i, j int) { + s.colIDs[i], s.colIDs[j] = s.colIDs[j], s.colIDs[i] + s.values[i], s.values[j] = s.values[j], s.values[i] +} + +type smallNullSorter Encoder + +func (s *smallNullSorter) Less(i, j int) bool { + nullCols := s.colIDs[s.numNotNullCols:] + return nullCols[i] < nullCols[j] +} + +func (s *smallNullSorter) Len() int { + return int(s.numNullCols) +} + +func (s *smallNullSorter) Swap(i, j int) { + nullCols := s.colIDs[s.numNotNullCols:] + nullCols[i], nullCols[j] = nullCols[j], nullCols[i] +} + +type largeNullSorter Encoder + +func (s *largeNullSorter) Less(i, j int) bool { + nullCols := s.colIDs32[s.numNotNullCols:] + return nullCols[i] < nullCols[j] +} + +func (s *largeNullSorter) Len() int { + return int(s.numNullCols) +} + +func (s *largeNullSorter) Swap(i, j int) { + nullCols := s.colIDs32[s.numNotNullCols:] + nullCols[i], nullCols[j] = nullCols[j], nullCols[i] +} + +var defaultStmtCtx = &stmtctx.StatementContext{ + TimeZone: time.Local, +} + +const ( + // Length of rowkey. + rowKeyLen = 19 + // Index of record flag 'r' in rowkey used by master tidb-server. + // The rowkey format is t{8 bytes id}_r{8 bytes handle} + recordPrefixIdx = 10 + // Index of record flag 'r' in rowkey whit shard byte. + shardedRecordPrefixIdx = 1 +) + +func IsRowKeyWithShardByte(key []byte) bool { + return len(key) == rowKeyLen && key[0] == 't' && key[shardedRecordPrefixIdx] == 'r' +} + +func IsRowKey(key []byte) bool { + return len(key) == rowKeyLen && key[0] == 't' && key[recordPrefixIdx] == 'r' +} + +// RowToOldRow converts a row to old-format row. +func RowToOldRow(rowData, buf []byte) ([]byte, error) { + if len(rowData) == 0 { + return rowData, nil + } + buf = buf[:0] + var r row + err := r.setRowData(rowData) + if err != nil { + return nil, err + } + if !r.large { + for i, colID := range r.colIDs { + buf = encodeOldOne(&r, buf, i, int64(colID)) + } + } else { + for i, colID := range r.colIDs32 { + buf = encodeOldOne(&r, buf, i, int64(colID)) + } + } + if len(buf) == 0 { + buf = append(buf, NilFlag) + } + return buf, nil +} + +func encodeOldOne(r *row, buf []byte, i int, colID int64) []byte { + buf = append(buf, VarintFlag) + buf = codec.EncodeVarint(buf, colID) + if i < int(r.numNotNullCols) { + val := r.getData(i) + switch r.valFlags[i] { + case BytesFlag: + buf = append(buf, CompactBytesFlag) + buf = codec.EncodeCompactBytes(buf, val) + case IntFlag: + buf = append(buf, VarintFlag) + buf = codec.EncodeVarint(buf, decodeInt(val)) + case UintFlag: + buf = append(buf, VaruintFlag) + buf = codec.EncodeUvarint(buf, decodeUint(val)) + default: + buf = append(buf, r.valFlags[i]) + buf = append(buf, val...) + } + } else { + buf = append(buf, NilFlag) + } + return buf +} diff --git a/kv/coprocessor/rowcodec/rowcodec_test.go b/kv/coprocessor/rowcodec/rowcodec_test.go new file mode 100644 index 00000000..bb0221b2 --- /dev/null +++ b/kv/coprocessor/rowcodec/rowcodec_test.go @@ -0,0 +1,160 @@ +package rowcodec + +import ( + "testing" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/parser/mysql" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testSuite{}) + +type testSuite struct{} + +func (s *testSuite) TestRowCodec(c *C) { + colIDs := []int64{1, 2, 3} + tps := make([]*types.FieldType, 3) + for i := 0; i < 3; i++ { + tps[i] = types.NewFieldType(mysql.TypeLonglong) + } + sc := new(stmtctx.StatementContext) + oldRow, err := tablecodec.EncodeRow(sc, types.MakeDatums(1, 2, 3), colIDs, nil, nil) + c.Check(err, IsNil) + + var rb Encoder + newRow, err := rb.EncodeFromOldRow(oldRow, nil) + c.Check(err, IsNil) + rd, err := NewDecoder(colIDs, 0, tps, make([][]byte, 3), time.Local) + c.Assert(err, IsNil) + chk := chunk.NewChunkWithCapacity(tps, 1) + err = rd.Decode(newRow, -1, chk) + c.Assert(err, IsNil) + row := chk.GetRow(0) + for i := 0; i < 3; i++ { + c.Assert(row.GetInt64(i), Equals, int64(i)+1) + } +} + +func (s *testSuite) TestRowCodecIsNull(c *C) { + colIDs := []int64{1, 2} + tps := make([]*types.FieldType, 2) + for i := 0; i < 2; i++ { + tps[i] = types.NewFieldType(mysql.TypeLonglong) + } + var rb Encoder + newRow, err := rb.Encode(colIDs, types.MakeDatums(1, nil), nil) + c.Assert(err, IsNil) + rd, err := NewDecoder(colIDs, 0, tps, make([][]byte, 3), time.Local) + c.Assert(err, IsNil) + defaultVal := make([]byte, 1) + isNull, err := rd.ColumnIsNull(newRow, 1, defaultVal) + c.Assert(err, IsNil) + c.Assert(isNull, IsFalse) + isNull, err = rd.ColumnIsNull(newRow, 1, nil) + c.Assert(err, IsNil) + c.Assert(isNull, IsFalse) + isNull, err = rd.ColumnIsNull(newRow, 2, defaultVal) + c.Assert(err, IsNil) + c.Assert(isNull, IsTrue) + isNull, err = rd.ColumnIsNull(newRow, 3, defaultVal) + c.Assert(err, IsNil) + c.Assert(isNull, IsFalse) + isNull, err = rd.ColumnIsNull(newRow, 3, nil) + c.Assert(err, IsNil) + c.Assert(isNull, IsTrue) +} + +func BenchmarkEncode(b *testing.B) { + b.ReportAllocs() + oldRow := types.MakeDatums(1, "abc", 1.1) + var xb Encoder + var buf []byte + colIDs := []int64{1, 2, 3} + var err error + for i := 0; i < b.N; i++ { + buf, err = xb.Encode(colIDs, oldRow, buf) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeFromOldRow(b *testing.B) { + b.ReportAllocs() + oldRow := types.MakeDatums(1, "abc", 1.1) + oldRowData, err := tablecodec.EncodeRow(new(stmtctx.StatementContext), oldRow, []int64{1, 2, 3}, nil, nil) + if err != nil { + b.Fatal(err) + } + var xb Encoder + var buf []byte + for i := 0; i < b.N; i++ { + buf, err = xb.EncodeFromOldRow(oldRowData, buf) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecode(b *testing.B) { + b.ReportAllocs() + oldRow := types.MakeDatums(1, "abc", 1.1) + colIDs := []int64{-1, 2, 3} + tps := []*types.FieldType{ + types.NewFieldType(mysql.TypeLonglong), + types.NewFieldType(mysql.TypeString), + types.NewFieldType(mysql.TypeDouble), + } + var xb Encoder + xRowData, err := xb.Encode(colIDs, oldRow, nil) + if err != nil { + b.Fatal(err) + } + decoder, err := NewDecoder(colIDs, -1, tps, make([][]byte, 3), time.Local) + if err != nil { + b.Fatal(err) + } + chk := chunk.NewChunkWithCapacity(tps, 1) + for i := 0; i < b.N; i++ { + chk.Reset() + err = decoder.Decode(xRowData, 1, chk) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkIsNull(b *testing.B) { + b.ReportAllocs() + oldRow := types.MakeDatums(1, "abc", 1.1) + colIDs := []int64{-1, 2, 3} + tps := []*types.FieldType{ + types.NewFieldType(mysql.TypeLonglong), + types.NewFieldType(mysql.TypeString), + types.NewFieldType(mysql.TypeDouble), + } + var xb Encoder + xRowData, err := xb.Encode(colIDs, oldRow, nil) + if err != nil { + b.Fatal(err) + } + decoder, err := NewDecoder(colIDs, -1, tps, make([][]byte, 3), time.Local) + if err != nil { + b.Fatal(err) + } + for i := 0; i < b.N; i++ { + _, err = decoder.ColumnIsNull(xRowData, int64(i)%4, nil) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/kv/main.go b/kv/main.go new file mode 100644 index 00000000..36aca497 --- /dev/null +++ b/kv/main.go @@ -0,0 +1,91 @@ +package main + +import ( + "flag" + "net" + _ "net/http/pprof" + "os" + "os/signal" + "strings" + "syscall" + "time" + + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/kv/server" + "github.com/pingcap-incubator/tinykv/kv/storage" + "github.com/pingcap-incubator/tinykv/kv/storage/raft_storage" + "github.com/pingcap-incubator/tinykv/kv/storage/standalone_storage" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/tinykvpb" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" +) + +var ( + schedulerAddr = flag.String("scheduler", "", "scheduler address") + storeAddr = flag.String("addr", "", "store address") +) + +func main() { + flag.Parse() + conf := config.NewDefaultConfig() + if *schedulerAddr != "" { + conf.SchedulerAddr = *schedulerAddr + } + if *storeAddr != "" { + conf.StoreAddr = *storeAddr + } + log.SetLevelByString(conf.LogLevel) + log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds | log.Lshortfile) + log.Infof("conf %v", conf) + + var storage storage.Storage + if conf.Raft { + storage = raft_storage.NewRaftStorage(conf) + } else { + storage = standalone_storage.NewStandAloneStorage(conf) + } + if err := storage.Start(); err != nil { + log.Fatal(err) + } + server := server.NewServer(storage) + + var alivePolicy = keepalive.EnforcementPolicy{ + MinTime: 2 * time.Second, // If a client pings more than once every 2 seconds, terminate the connection + PermitWithoutStream: true, // Allow pings even when there are no active streams + } + + grpcServer := grpc.NewServer( + grpc.KeepaliveEnforcementPolicy(alivePolicy), + grpc.InitialWindowSize(1<<30), + grpc.InitialConnWindowSize(1<<30), + grpc.MaxRecvMsgSize(10*1024*1024), + ) + tinykvpb.RegisterTinyKvServer(grpcServer, server) + listenAddr := conf.StoreAddr[strings.IndexByte(conf.StoreAddr, ':'):] + l, err := net.Listen("tcp", listenAddr) + if err != nil { + log.Fatal(err) + } + handleSignal(grpcServer) + + err = grpcServer.Serve(l) + if err != nil { + log.Fatal(err) + } + log.Info("Server stopped.") +} + +func handleSignal(grpcServer *grpc.Server) { + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, + syscall.SIGHUP, + syscall.SIGINT, + syscall.SIGTERM, + syscall.SIGQUIT) + go func() { + sig := <-sigCh + log.Infof("Got signal [%s] to exit.", sig) + grpcServer.Stop() + }() +} diff --git a/kv/raftstore/batch_system.go b/kv/raftstore/batch_system.go new file mode 100644 index 00000000..d0ef2f06 --- /dev/null +++ b/kv/raftstore/batch_system.go @@ -0,0 +1,313 @@ +package raftstore + +import ( + "bytes" + "sync" + "time" + + "github.com/Connor1996/badger" + "github.com/Connor1996/badger/y" + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/kv/raftstore/message" + "github.com/pingcap-incubator/tinykv/kv/raftstore/meta" + "github.com/pingcap-incubator/tinykv/kv/raftstore/runner" + "github.com/pingcap-incubator/tinykv/kv/raftstore/scheduler_client" + "github.com/pingcap-incubator/tinykv/kv/raftstore/snap" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/kv/util/worker" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + rspb "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/btree" + "github.com/pingcap/errors" +) + +var _ btree.Item = ®ionItem{} + +type regionItem struct { + region *metapb.Region +} + +// Less returns true if the region start key is less than the other. +func (r *regionItem) Less(other btree.Item) bool { + left := r.region.GetStartKey() + right := other.(*regionItem).region.GetStartKey() + return bytes.Compare(left, right) < 0 +} + +type storeMeta struct { + /// region end key -> region ID + regionRanges *btree.BTree + /// region_id -> region + regions map[uint64]*metapb.Region + /// `MsgRequestVote` messages from newly split Regions shouldn't be dropped if there is no + /// such Region in this store now. So the messages are recorded temporarily and will be handled later. + pendingVotes []*rspb.RaftMessage +} + +func newStoreMeta() *storeMeta { + return &storeMeta{ + regionRanges: btree.New(2), + regions: map[uint64]*metapb.Region{}, + } +} + +func (m *storeMeta) setRegion(region *metapb.Region, peer *peer) { + m.regions[region.Id] = region + peer.SetRegion(region) +} + +// getOverlaps gets the regions which are overlapped with the specified region range. +func (m *storeMeta) getOverlapRegions(region *metapb.Region) []*metapb.Region { + item := ®ionItem{region: region} + var result *regionItem + // find is a helper function to find an item that contains the regions start key. + m.regionRanges.DescendLessOrEqual(item, func(i btree.Item) bool { + result = i.(*regionItem) + return false + }) + + if result == nil || engine_util.ExceedEndKey(region.GetStartKey(), result.region.GetEndKey()) { + result = item + } + + var overlaps []*metapb.Region + m.regionRanges.AscendGreaterOrEqual(result, func(i btree.Item) bool { + over := i.(*regionItem) + if engine_util.ExceedEndKey(over.region.GetStartKey(), region.GetEndKey()) { + return false + } + overlaps = append(overlaps, over.region) + return true + }) + return overlaps +} + +type GlobalContext struct { + cfg *config.Config + engine *engine_util.Engines + store *metapb.Store + storeMeta *storeMeta + snapMgr *snap.SnapManager + router *router + trans Transport + schedulerTaskSender chan<- worker.Task + regionTaskSender chan<- worker.Task + raftLogGCTaskSender chan<- worker.Task + splitCheckTaskSender chan<- worker.Task + schedulerClient scheduler_client.Client + tickDriverSender chan uint64 +} + +type Transport interface { + Send(msg *rspb.RaftMessage) error +} + +/// loadPeers loads peers in this store. It scans the db engine, loads all regions and their peers from it +/// WARN: This store should not be used before initialized. +func (bs *RaftBatchSystem) loadPeers() ([]*peer, error) { + // Scan region meta to get saved regions. + startKey := meta.RegionMetaMinKey + endKey := meta.RegionMetaMaxKey + ctx := bs.ctx + kvEngine := ctx.engine.Kv + storeID := ctx.store.Id + + var totalCount, tombStoneCount int + var regionPeers []*peer + + t := time.Now() + kvWB := new(engine_util.WriteBatch) + raftWB := new(engine_util.WriteBatch) + err := kvEngine.View(func(txn *badger.Txn) error { + // get all regions from RegionLocalState + it := txn.NewIterator(badger.DefaultIteratorOptions) + defer it.Close() + for it.Seek(startKey); it.Valid(); it.Next() { + item := it.Item() + if bytes.Compare(item.Key(), endKey) >= 0 { + break + } + regionID, suffix, err := meta.DecodeRegionMetaKey(item.Key()) + if err != nil { + return err + } + if suffix != meta.RegionStateSuffix { + continue + } + val, err := item.Value() + if err != nil { + return errors.WithStack(err) + } + totalCount++ + localState := new(rspb.RegionLocalState) + err = localState.Unmarshal(val) + if err != nil { + return errors.WithStack(err) + } + region := localState.Region + if localState.State == rspb.PeerState_Tombstone { + tombStoneCount++ + bs.clearStaleMeta(kvWB, raftWB, localState) + continue + } + + peer, err := createPeer(storeID, ctx.cfg, ctx.regionTaskSender, ctx.engine, region) + if err != nil { + return err + } + ctx.storeMeta.regionRanges.ReplaceOrInsert(®ionItem{region: region}) + ctx.storeMeta.regions[regionID] = region + // No need to check duplicated here, because we use region id as the key + // in DB. + regionPeers = append(regionPeers, peer) + } + return nil + }) + if err != nil { + return nil, err + } + kvWB.MustWriteToDB(ctx.engine.Kv) + raftWB.MustWriteToDB(ctx.engine.Raft) + + log.Infof("start store %d, region_count %d, tombstone_count %d, takes %v", + storeID, totalCount, tombStoneCount, time.Since(t)) + return regionPeers, nil +} + +func (bs *RaftBatchSystem) clearStaleMeta(kvWB, raftWB *engine_util.WriteBatch, originState *rspb.RegionLocalState) { + region := originState.Region + raftState, err := meta.GetRaftLocalState(bs.ctx.engine.Raft, region.Id) + if err != nil { + // it has been cleaned up. + return + } + err = ClearMeta(bs.ctx.engine, kvWB, raftWB, region.Id, raftState.LastIndex) + if err != nil { + panic(err) + } + if err := kvWB.SetMeta(meta.RegionStateKey(region.Id), originState); err != nil { + panic(err) + } +} + +type workers struct { + raftLogGCWorker *worker.Worker + schedulerWorker *worker.Worker + splitCheckWorker *worker.Worker + regionWorker *worker.Worker + wg *sync.WaitGroup +} + +type RaftBatchSystem struct { + ctx *GlobalContext + storeState *storeState + router *router + workers *workers + tickDriver *tickDriver + closeCh chan struct{} + wg *sync.WaitGroup +} + +func (bs *RaftBatchSystem) start( + meta *metapb.Store, + cfg *config.Config, + engines *engine_util.Engines, + trans Transport, + schedulerClient scheduler_client.Client, + snapMgr *snap.SnapManager) error { + y.Assert(bs.workers == nil) + // TODO: we can get cluster meta regularly too later. + if err := cfg.Validate(); err != nil { + return err + } + err := snapMgr.Init() + if err != nil { + return err + } + wg := new(sync.WaitGroup) + bs.workers = &workers{ + splitCheckWorker: worker.NewWorker("split-check", wg), + regionWorker: worker.NewWorker("snapshot-worker", wg), + raftLogGCWorker: worker.NewWorker("raft-gc-worker", wg), + schedulerWorker: worker.NewWorker("scheduler-worker", wg), + wg: wg, + } + bs.ctx = &GlobalContext{ + cfg: cfg, + engine: engines, + store: meta, + storeMeta: newStoreMeta(), + snapMgr: snapMgr, + router: bs.router, + trans: trans, + schedulerTaskSender: bs.workers.schedulerWorker.Sender(), + regionTaskSender: bs.workers.regionWorker.Sender(), + splitCheckTaskSender: bs.workers.splitCheckWorker.Sender(), + raftLogGCTaskSender: bs.workers.raftLogGCWorker.Sender(), + schedulerClient: schedulerClient, + tickDriverSender: bs.tickDriver.newRegionCh, + } + regionPeers, err := bs.loadPeers() + if err != nil { + return err + } + + for _, peer := range regionPeers { + bs.router.register(peer) + } + bs.startWorkers(regionPeers) + return nil +} + +func (bs *RaftBatchSystem) startWorkers(peers []*peer) { + ctx := bs.ctx + workers := bs.workers + router := bs.router + bs.wg.Add(2) // raftWorker, storeWorker + rw := newRaftWorker(ctx, router) + go rw.run(bs.closeCh, bs.wg) + sw := newStoreWorker(ctx, bs.storeState) + go sw.run(bs.closeCh, bs.wg) + router.sendStore(message.Msg{Type: message.MsgTypeStoreStart, Data: ctx.store}) + for i := 0; i < len(peers); i++ { + regionID := peers[i].regionId + _ = router.send(regionID, message.Msg{RegionID: regionID, Type: message.MsgTypeStart}) + } + engines := ctx.engine + cfg := ctx.cfg + workers.splitCheckWorker.Start(runner.NewSplitCheckHandler(engines.Kv, NewRaftstoreRouter(router), cfg)) + workers.regionWorker.Start(runner.NewRegionTaskHandler(engines, ctx.snapMgr)) + workers.raftLogGCWorker.Start(runner.NewRaftLogGCTaskHandler()) + workers.schedulerWorker.Start(runner.NewSchedulerTaskHandler(ctx.store.Id, ctx.schedulerClient, NewRaftstoreRouter(router))) + go bs.tickDriver.run() +} + +func (bs *RaftBatchSystem) shutDown() { + close(bs.closeCh) + bs.wg.Wait() + bs.tickDriver.stop() + if bs.workers == nil { + return + } + workers := bs.workers + bs.workers = nil + workers.splitCheckWorker.Stop() + workers.regionWorker.Stop() + workers.raftLogGCWorker.Stop() + workers.schedulerWorker.Stop() + workers.wg.Wait() +} + +func CreateRaftBatchSystem(cfg *config.Config) (*RaftstoreRouter, *RaftBatchSystem) { + storeSender, storeState := newStoreState(cfg) + router := newRouter(storeSender) + raftBatchSystem := &RaftBatchSystem{ + router: router, + storeState: storeState, + tickDriver: newTickDriver(cfg.RaftBaseTickInterval, router, storeState.ticker), + closeCh: make(chan struct{}), + wg: new(sync.WaitGroup), + } + return NewRaftstoreRouter(router), raftBatchSystem +} diff --git a/kv/raftstore/bootstrap.go b/kv/raftstore/bootstrap.go new file mode 100644 index 00000000..ac9ee63c --- /dev/null +++ b/kv/raftstore/bootstrap.go @@ -0,0 +1,154 @@ +package raftstore + +import ( + "bytes" + + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/raftstore/meta" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + rspb "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + "github.com/pingcap/errors" +) + +const ( + InitEpochVer uint64 = 1 + InitEpochConfVer uint64 = 1 +) + +func isRangeEmpty(engine *badger.DB, startKey, endKey []byte) (bool, error) { + var hasData bool + err := engine.View(func(txn *badger.Txn) error { + it := txn.NewIterator(badger.DefaultIteratorOptions) + defer it.Close() + it.Seek(startKey) + if it.Valid() { + item := it.Item() + if bytes.Compare(item.Key(), endKey) < 0 { + hasData = true + } + } + return nil + }) + if err != nil { + return false, errors.WithStack(err) + } + return !hasData, err +} + +func BootstrapStore(engines *engine_util.Engines, clusterID, storeID uint64) error { + ident := new(rspb.StoreIdent) + empty, err := isRangeEmpty(engines.Kv, meta.MinKey, meta.MaxKey) + if err != nil { + return err + } + if !empty { + return errors.New("kv store is not empty and ahs alread had data.") + } + empty, err = isRangeEmpty(engines.Raft, meta.MinKey, meta.MaxKey) + if err != nil { + return err + } + if !empty { + return errors.New("raft store is not empty and has already had data.") + } + ident.ClusterId = clusterID + ident.StoreId = storeID + err = engine_util.PutMeta(engines.Kv, meta.StoreIdentKey, ident) + if err != nil { + return err + } + return nil +} + +func PrepareBootstrap(engins *engine_util.Engines, storeID, regionID, peerID uint64) (*metapb.Region, error) { + region := &metapb.Region{ + Id: regionID, + StartKey: []byte{}, + EndKey: []byte{}, + RegionEpoch: &metapb.RegionEpoch{ + Version: InitEpochVer, + ConfVer: InitEpochConfVer, + }, + Peers: []*metapb.Peer{ + { + Id: peerID, + StoreId: storeID, + }, + }, + } + err := PrepareBootstrapCluster(engins, region) + if err != nil { + return nil, err + } + return region, nil +} + +func PrepareBootstrapCluster(engines *engine_util.Engines, region *metapb.Region) error { + state := new(rspb.RegionLocalState) + state.Region = region + kvWB := new(engine_util.WriteBatch) + kvWB.SetMeta(meta.PrepareBootstrapKey, state) + kvWB.SetMeta(meta.RegionStateKey(region.Id), state) + writeInitialApplyState(kvWB, region.Id) + err := engines.WriteKV(kvWB) + if err != nil { + return err + } + raftWB := new(engine_util.WriteBatch) + writeInitialRaftState(raftWB, region.Id) + err = engines.WriteRaft(raftWB) + if err != nil { + return err + } + return nil +} + +func writeInitialApplyState(kvWB *engine_util.WriteBatch, regionID uint64) { + applyState := &rspb.RaftApplyState{ + AppliedIndex: meta.RaftInitLogIndex, + TruncatedState: &rspb.RaftTruncatedState{ + Index: meta.RaftInitLogIndex, + Term: meta.RaftInitLogTerm, + }, + } + kvWB.SetMeta(meta.ApplyStateKey(regionID), applyState) +} + +func writeInitialRaftState(raftWB *engine_util.WriteBatch, regionID uint64) { + raftState := &rspb.RaftLocalState{ + HardState: &eraftpb.HardState{ + Term: meta.RaftInitLogTerm, + Commit: meta.RaftInitLogIndex, + }, + LastIndex: meta.RaftInitLogIndex, + } + raftWB.SetMeta(meta.RaftStateKey(regionID), raftState) +} + +func ClearPrepareBootstrap(engines *engine_util.Engines, regionID uint64) error { + err := engines.Raft.Update(func(txn *badger.Txn) error { + return txn.Delete(meta.RaftStateKey(regionID)) + }) + if err != nil { + return errors.WithStack(err) + } + wb := new(engine_util.WriteBatch) + wb.DeleteMeta(meta.PrepareBootstrapKey) + // should clear raft initial state too. + wb.DeleteMeta(meta.RegionStateKey(regionID)) + wb.DeleteMeta(meta.ApplyStateKey(regionID)) + err = engines.WriteKV(wb) + if err != nil { + return err + } + return nil +} + +func ClearPrepareBootstrapState(engines *engine_util.Engines) error { + err := engines.Kv.Update(func(txn *badger.Txn) error { + return txn.Delete(meta.PrepareBootstrapKey) + }) + return errors.WithStack(err) +} diff --git a/kv/raftstore/bootstrap_test.go b/kv/raftstore/bootstrap_test.go new file mode 100644 index 00000000..67b90209 --- /dev/null +++ b/kv/raftstore/bootstrap_test.go @@ -0,0 +1,38 @@ +package raftstore + +import ( + "testing" + + "github.com/pingcap-incubator/tinykv/kv/raftstore/meta" + "github.com/pingcap-incubator/tinykv/kv/raftstore/util" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/stretchr/testify/require" +) + +func TestBootstrapStore(t *testing.T) { + engines := util.NewTestEngines() + defer engines.Destroy() + require.Nil(t, BootstrapStore(engines, 1, 1)) + require.NotNil(t, BootstrapStore(engines, 1, 1)) + _, err := PrepareBootstrap(engines, 1, 1, 1) + require.Nil(t, err) + region := new(metapb.Region) + require.Nil(t, engine_util.GetMeta(engines.Kv, meta.PrepareBootstrapKey, region)) + _, err = meta.GetRegionLocalState(engines.Kv, 1) + require.Nil(t, err) + _, err = meta.GetApplyState(engines.Kv, 1) + require.Nil(t, err) + _, err = meta.GetRaftLocalState(engines.Raft, 1) + require.Nil(t, err) + + require.Nil(t, ClearPrepareBootstrapState(engines)) + require.Nil(t, ClearPrepareBootstrap(engines, 1)) + empty, err := isRangeEmpty(engines.Kv, meta.RegionMetaPrefixKey(1), meta.RegionMetaPrefixKey(2)) + require.Nil(t, err) + require.True(t, empty) + + empty, err = isRangeEmpty(engines.Kv, meta.RegionRaftPrefixKey(1), meta.RegionRaftPrefixKey(2)) + require.Nil(t, err) + require.True(t, empty) +} diff --git a/kv/raftstore/cmd_resp.go b/kv/raftstore/cmd_resp.go new file mode 100644 index 00000000..cf5f97a5 --- /dev/null +++ b/kv/raftstore/cmd_resp.go @@ -0,0 +1,62 @@ +package raftstore + +import ( + "github.com/pingcap-incubator/tinykv/kv/raftstore/util" + "github.com/pingcap-incubator/tinykv/proto/pkg/errorpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_cmdpb" +) + +func ensureRespHeader(resp *raft_cmdpb.RaftCmdResponse) { + header := resp.GetHeader() + if header == nil { + resp.Header = &raft_cmdpb.RaftResponseHeader{} + } +} + +func BindRespTerm(resp *raft_cmdpb.RaftCmdResponse, term uint64) { + if term == 0 { + return + } + ensureRespHeader(resp) + resp.Header.CurrentTerm = term +} + +func BindRespError(resp *raft_cmdpb.RaftCmdResponse, err error) { + ensureRespHeader(resp) + resp.Header.Error = util.RaftstoreErrToPbError(err) +} + +func ErrResp(err error) *raft_cmdpb.RaftCmdResponse { + resp := &raft_cmdpb.RaftCmdResponse{Header: &raft_cmdpb.RaftResponseHeader{}} + BindRespError(resp, err) + return resp +} + +func ErrRespWithTerm(err error, term uint64) *raft_cmdpb.RaftCmdResponse { + resp := ErrResp(err) + BindRespTerm(resp, term) + return resp +} + +func ErrRespStaleCommand(term uint64) *raft_cmdpb.RaftCmdResponse { + return ErrRespWithTerm(new(util.ErrStaleCommand), term) +} + +func ErrRespRegionNotFound(regionID uint64) *raft_cmdpb.RaftCmdResponse { + return &raft_cmdpb.RaftCmdResponse{ + Header: &raft_cmdpb.RaftResponseHeader{ + Error: &errorpb.Error{ + Message: "region is not found", + RegionNotFound: &errorpb.RegionNotFound{ + RegionId: regionID, + }, + }, + }, + } +} + +func newCmdRespForReq(req *raft_cmdpb.RaftCmdRequest) *raft_cmdpb.RaftCmdResponse { + return &raft_cmdpb.RaftCmdResponse{ + Header: &raft_cmdpb.RaftResponseHeader{}, + } +} diff --git a/kv/raftstore/message/callback.go b/kv/raftstore/message/callback.go new file mode 100644 index 00000000..c7ac9541 --- /dev/null +++ b/kv/raftstore/message/callback.go @@ -0,0 +1,46 @@ +package message + +import ( + "time" + + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_cmdpb" +) + +type Callback struct { + Resp *raft_cmdpb.RaftCmdResponse + Txn *badger.Txn // used for GetSnap + done chan struct{} +} + +func (cb *Callback) Done(resp *raft_cmdpb.RaftCmdResponse) { + if cb == nil { + return + } + if resp != nil { + cb.Resp = resp + } + cb.done <- struct{}{} +} + +func (cb *Callback) WaitResp() *raft_cmdpb.RaftCmdResponse { + select { + case <-cb.done: + return cb.Resp + } +} + +func (cb *Callback) WaitRespWithTimeout(timeout time.Duration) *raft_cmdpb.RaftCmdResponse { + select { + case <-cb.done: + return cb.Resp + case <-time.After(timeout): + return cb.Resp + } +} + +func NewCallback() *Callback { + done := make(chan struct{}, 1) + cb := &Callback{done: done} + return cb +} diff --git a/kv/raftstore/message/msg.go b/kv/raftstore/message/msg.go new file mode 100644 index 00000000..236beb85 --- /dev/null +++ b/kv/raftstore/message/msg.go @@ -0,0 +1,70 @@ +package message + +import ( + "github.com/pingcap-incubator/tinykv/kv/raftstore/snap" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_cmdpb" +) + +type MsgType int64 + +const ( + // just a placeholder + MsgTypeNull MsgType = 0 + // message to start the ticker of peer + MsgTypeStart MsgType = 1 + // message of base tick to drive the ticker + MsgTypeTick MsgType = 2 + // message wraps a raft message that should be forwardded to Raft module + // the raft message is from peer on other store + MsgTypeRaftMessage MsgType = 3 + // message warps a raft command that maybe a read/write request or admin request + // the raft command should be proposed to Raft module + MsgTypeRaftCmd MsgType = 4 + // message to trigger split region + // it first asks Scheduler for allocating new split region's ids, then schedules a + // MsyTypeRaftCmd with split admin command + MsgTypeSplitRegion MsgType = 5 + // message to update region approximate size + // it is sent by split checker + MsgTypeRegionApproximateSize MsgType = 6 + // message to trigger gc generated snapshots + MsgTypeGcSnap MsgType = 7 + + // message wraps a raft message to the peer not existing on the Store. + // It is due to region split or add peer conf change + MsgTypeStoreRaftMessage MsgType = 101 + // message of store base tick to drive the store ticker, including store heartbeat + MsgTypeStoreTick MsgType = 106 + // message to start the ticker of store + MsgTypeStoreStart MsgType = 107 +) + +type Msg struct { + Type MsgType + RegionID uint64 + Data interface{} +} + +func NewMsg(tp MsgType, data interface{}) Msg { + return Msg{Type: tp, Data: data} +} + +func NewPeerMsg(tp MsgType, regionID uint64, data interface{}) Msg { + return Msg{Type: tp, RegionID: regionID, Data: data} +} + +type MsgGCSnap struct { + Snaps []snap.SnapKeyWithSending +} + +type MsgRaftCmd struct { + Request *raft_cmdpb.RaftCmdRequest + Callback *Callback +} + +type MsgSplitRegion struct { + RegionEpoch *metapb.RegionEpoch + SplitKey []byte + Callback *Callback +} diff --git a/kv/raftstore/message/raft_router.go b/kv/raftstore/message/raft_router.go new file mode 100644 index 00000000..67df8473 --- /dev/null +++ b/kv/raftstore/message/raft_router.go @@ -0,0 +1,12 @@ +package message + +import ( + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_cmdpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" +) + +type RaftRouter interface { + Send(regionID uint64, msg Msg) error + SendRaftMessage(msg *raft_serverpb.RaftMessage) error + SendRaftCommand(req *raft_cmdpb.RaftCmdRequest, cb *Callback) error +} diff --git a/kv/raftstore/meta/keys.go b/kv/raftstore/meta/keys.go new file mode 100644 index 00000000..8d311ada --- /dev/null +++ b/kv/raftstore/meta/keys.go @@ -0,0 +1,123 @@ +package meta + +import ( + "bytes" + "encoding/binary" + + "github.com/pingcap/errors" +) + +const ( + // local is in (0x01, 0x02) + LocalPrefix byte = 0x01 + + // We save two types region data in DB, for raft and other meta data. + // When the store starts, we should iterate all region meta data to + // construct peer, no need to travel large raft data, so we separate them + // with different prefixes. + RegionRaftPrefix byte = 0x02 + RegionMetaPrefix byte = 0x03 + RegionRaftPrefixLen = 11 // REGION_RAFT_PREFIX_KEY + region_id + suffix + RegionRaftLogLen = 19 // REGION_RAFT_PREFIX_KEY + region_id + suffix + index + + // Following are the suffix after the local prefix. + // For region id + RaftLogSuffix byte = 0x01 + RaftStateSuffix byte = 0x02 + ApplyStateSuffix byte = 0x03 + + // For region meta + RegionStateSuffix byte = 0x01 +) + +var ( + MinKey = []byte{} + MaxKey = []byte{255} + LocalMinKey = []byte{LocalPrefix} + LocalMaxKey = []byte{LocalPrefix + 1} + RegionMetaMinKey = []byte{LocalPrefix, RegionMetaPrefix} + RegionMetaMaxKey = []byte{LocalPrefix, RegionMetaPrefix + 1} + + // Following keys are all local keys, so the first byte must be 0x01. + PrepareBootstrapKey = []byte{LocalPrefix, 0x01} + StoreIdentKey = []byte{LocalPrefix, 0x02} +) + +func makeRegionPrefix(regionID uint64, suffix byte) []byte { + key := make([]byte, 11) + key[0] = LocalPrefix + key[1] = RegionRaftPrefix + binary.BigEndian.PutUint64(key[2:], regionID) + key[10] = suffix + return key +} + +func makeRegionKey(regionID uint64, suffix byte, subID uint64) []byte { + key := make([]byte, 19) + key[0] = LocalPrefix + key[1] = RegionRaftPrefix + binary.BigEndian.PutUint64(key[2:], regionID) + key[10] = suffix + binary.BigEndian.PutUint64(key[11:], subID) + return key +} + +func RegionRaftPrefixKey(regionID uint64) []byte { + key := make([]byte, 10) + key[0] = LocalPrefix + key[1] = RegionRaftPrefix + binary.BigEndian.PutUint64(key[2:], regionID) + return key +} + +func RaftLogKey(regionID, index uint64) []byte { + return makeRegionKey(regionID, RaftLogSuffix, index) +} + +func RaftStateKey(regionID uint64) []byte { + return makeRegionPrefix(regionID, RaftStateSuffix) +} + +func ApplyStateKey(regionID uint64) []byte { + return makeRegionPrefix(regionID, ApplyStateSuffix) +} + +func IsRaftStateKey(key []byte) bool { + return len(key) == 11 && key[0] == LocalPrefix && key[1] == RegionRaftPrefix +} + +func DecodeRegionMetaKey(key []byte) (uint64, byte, error) { + if len(RegionMetaMinKey)+8+1 != len(key) { + return 0, 0, errors.Errorf("invalid region meta key length for key %v", key) + } + if !bytes.HasPrefix(key, RegionMetaMinKey) { + return 0, 0, errors.Errorf("invalid region meta key prefix for key %v", key) + } + regionID := binary.BigEndian.Uint64(key[len(RegionMetaMinKey):]) + return regionID, key[len(key)-1], nil +} + +func RegionMetaPrefixKey(regionID uint64) []byte { + key := make([]byte, 10) + key[0] = LocalPrefix + key[1] = RegionMetaPrefix + binary.BigEndian.PutUint64(key[2:], regionID) + return key +} + +func RegionStateKey(regionID uint64) []byte { + key := make([]byte, 11) + key[0] = LocalPrefix + key[1] = RegionMetaPrefix + binary.BigEndian.PutUint64(key[2:], regionID) + key[10] = RegionStateSuffix + return key +} + +/// RaftLogIndex gets the log index from raft log key generated by `raft_log_key`. +func RaftLogIndex(key []byte) (uint64, error) { + if len(key) != RegionRaftLogLen { + return 0, errors.Errorf("key %v is not a valid raft log key", key) + } + return binary.BigEndian.Uint64(key[RegionRaftLogLen-8:]), nil +} diff --git a/kv/raftstore/meta/values.go b/kv/raftstore/meta/values.go new file mode 100644 index 00000000..788089bf --- /dev/null +++ b/kv/raftstore/meta/values.go @@ -0,0 +1,119 @@ +package meta + +import ( + "github.com/Connor1996/badger" + "github.com/Connor1996/badger/y" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + rspb "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + "github.com/pingcap/errors" +) + +func GetRegionLocalState(db *badger.DB, regionId uint64) (*rspb.RegionLocalState, error) { + regionLocalState := new(rspb.RegionLocalState) + if err := engine_util.GetMeta(db, RegionStateKey(regionId), regionLocalState); err != nil { + return regionLocalState, err + } + return regionLocalState, nil +} + +func GetRaftLocalState(db *badger.DB, regionId uint64) (*rspb.RaftLocalState, error) { + raftLocalState := new(rspb.RaftLocalState) + if err := engine_util.GetMeta(db, RaftStateKey(regionId), raftLocalState); err != nil { + return raftLocalState, err + } + return raftLocalState, nil +} + +func GetApplyState(db *badger.DB, regionId uint64) (*rspb.RaftApplyState, error) { + applyState := new(rspb.RaftApplyState) + if err := engine_util.GetMeta(db, ApplyStateKey(regionId), applyState); err != nil { + return nil, err + } + return applyState, nil +} + +func GetRaftEntry(db *badger.DB, regionId, idx uint64) (*eraftpb.Entry, error) { + entry := new(eraftpb.Entry) + if err := engine_util.GetMeta(db, RaftLogKey(regionId, idx), entry); err != nil { + return nil, err + } + return entry, nil +} + +const ( + // When we create a region peer, we should initialize its log term/index > 0, + // so that we can force the follower peer to sync the snapshot first. + RaftInitLogTerm = 5 + RaftInitLogIndex = 5 +) + +func InitRaftLocalState(raftEngine *badger.DB, region *metapb.Region) (*rspb.RaftLocalState, error) { + raftState, err := GetRaftLocalState(raftEngine, region.Id) + if err != nil && err != badger.ErrKeyNotFound { + return nil, err + } + if err == badger.ErrKeyNotFound { + raftState = new(rspb.RaftLocalState) + raftState.HardState = new(eraftpb.HardState) + if len(region.Peers) > 0 { + // new split region + raftState.LastIndex = RaftInitLogIndex + raftState.HardState.Term = RaftInitLogTerm + raftState.HardState.Commit = RaftInitLogIndex + err = engine_util.PutMeta(raftEngine, RaftStateKey(region.Id), raftState) + if err != nil { + return raftState, err + } + } + } + return raftState, nil +} + +func InitApplyState(kvEngine *badger.DB, region *metapb.Region) (*rspb.RaftApplyState, error) { + applyState, err := GetApplyState(kvEngine, region.Id) + if err != nil && err != badger.ErrKeyNotFound { + return nil, err + } + if err == badger.ErrKeyNotFound { + applyState = new(rspb.RaftApplyState) + applyState.TruncatedState = new(rspb.RaftTruncatedState) + if len(region.Peers) > 0 { + applyState.AppliedIndex = RaftInitLogIndex + applyState.TruncatedState.Index = RaftInitLogIndex + applyState.TruncatedState.Term = RaftInitLogTerm + } + err = engine_util.PutMeta(kvEngine, ApplyStateKey(region.Id), applyState) + if err != nil { + return applyState, err + } + } + return applyState, nil +} + +func InitLastTerm(raftEngine *badger.DB, region *metapb.Region, + raftState *rspb.RaftLocalState, applyState *rspb.RaftApplyState) (uint64, error) { + lastIdx := raftState.LastIndex + if lastIdx == 0 { + return 0, nil + } else if lastIdx == RaftInitLogIndex { + return RaftInitLogTerm, nil + } else if lastIdx == applyState.TruncatedState.Index { + return applyState.TruncatedState.Term, nil + } else { + y.Assert(lastIdx > RaftInitLogIndex) + } + e, err := GetRaftEntry(raftEngine, region.Id, lastIdx) + if err != nil { + return 0, errors.Errorf("[region %s] entry at %d doesn't exist, may lost data.", region, lastIdx) + } + return e.Term, nil +} + +func WriteRegionState(kvWB *engine_util.WriteBatch, region *metapb.Region, state rspb.PeerState) { + regionState := new(rspb.RegionLocalState) + regionState.State = state + regionState.Region = region + kvWB.SetMeta(RegionStateKey(region.Id), regionState) +} diff --git a/kv/raftstore/node.go b/kv/raftstore/node.go new file mode 100644 index 00000000..703a245b --- /dev/null +++ b/kv/raftstore/node.go @@ -0,0 +1,208 @@ +package raftstore + +import ( + "context" + "time" + + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/kv/raftstore/meta" + "github.com/pingcap-incubator/tinykv/kv/raftstore/scheduler_client" + "github.com/pingcap-incubator/tinykv/kv/raftstore/snap" + "github.com/pingcap-incubator/tinykv/kv/raftstore/util" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap/errors" +) + +type Node struct { + clusterID uint64 + store *metapb.Store + cfg *config.Config + system *RaftBatchSystem + schedulerClient scheduler_client.Client +} + +func NewNode(system *RaftBatchSystem, cfg *config.Config, schedulerClient scheduler_client.Client) *Node { + return &Node{ + clusterID: schedulerClient.GetClusterID((context.TODO())), + store: &metapb.Store{ + Address: cfg.StoreAddr, + }, + cfg: cfg, + system: system, + schedulerClient: schedulerClient, + } +} + +func (n *Node) Start(ctx context.Context, engines *engine_util.Engines, trans Transport, snapMgr *snap.SnapManager) error { + storeID, err := n.checkStore(engines) + if err != nil { + return err + } + if storeID == util.InvalidID { + storeID, err = n.bootstrapStore(ctx, engines) + } + if err != nil { + return err + } + n.store.Id = storeID + + firstRegion, err := n.checkOrPrepareBootstrapCluster(ctx, engines, storeID) + if err != nil { + return err + } + newCluster := firstRegion != nil + if newCluster { + log.Infof("try bootstrap cluster, storeID: %d, region: %s", storeID, firstRegion) + newCluster, err = n.BootstrapCluster(ctx, engines, firstRegion) + if err != nil { + return err + } + } + + err = n.schedulerClient.PutStore(ctx, n.store) + if err != nil { + return err + } + if err = n.startNode(engines, trans, snapMgr); err != nil { + return err + } + + return nil +} + +func (n *Node) checkStore(engines *engine_util.Engines) (uint64, error) { + ident := new(raft_serverpb.StoreIdent) + err := engine_util.GetMeta(engines.Kv, meta.StoreIdentKey, ident) + if err != nil { + if err == badger.ErrKeyNotFound { + return 0, nil + } + return 0, err + } + + if ident.ClusterId != n.clusterID { + return 0, errors.Errorf("cluster ID mismatch, local %d != remote %d", ident.ClusterId, n.clusterID) + } + + if ident.StoreId == util.InvalidID { + return 0, errors.Errorf("invalid store ident %s", ident) + } + return ident.StoreId, nil +} + +func (n *Node) bootstrapStore(ctx context.Context, engines *engine_util.Engines) (uint64, error) { + storeID, err := n.allocID(ctx) + if err != nil { + return 0, err + } + err = BootstrapStore(engines, n.clusterID, storeID) + return storeID, err +} + +func (n *Node) allocID(ctx context.Context) (uint64, error) { + return n.schedulerClient.AllocID(ctx) +} + +func (n *Node) checkOrPrepareBootstrapCluster(ctx context.Context, engines *engine_util.Engines, storeID uint64) (*metapb.Region, error) { + var state raft_serverpb.RegionLocalState + if err := engine_util.GetMeta(engines.Kv, meta.PrepareBootstrapKey, &state); err == nil { + return state.Region, nil + } + bootstrapped, err := n.checkClusterBootstrapped(ctx) + if err != nil { + return nil, err + } + if bootstrapped { + return nil, nil + } + return n.prepareBootstrapCluster(ctx, engines, storeID) +} + +const ( + MaxCheckClusterBootstrappedRetryCount = 60 + CheckClusterBootstrapRetrySeconds = 3 +) + +func (n *Node) checkClusterBootstrapped(ctx context.Context) (bool, error) { + for i := 0; i < MaxCheckClusterBootstrappedRetryCount; i++ { + bootstrapped, err := n.schedulerClient.IsBootstrapped(ctx) + if err == nil { + return bootstrapped, nil + } + log.Warnf("check cluster bootstrapped failed, err: %v", err) + time.Sleep(time.Second * CheckClusterBootstrapRetrySeconds) + } + return false, errors.New("check cluster bootstrapped failed") +} + +func (n *Node) prepareBootstrapCluster(ctx context.Context, engines *engine_util.Engines, storeID uint64) (*metapb.Region, error) { + regionID, err := n.allocID(ctx) + if err != nil { + return nil, err + } + log.Infof("alloc first region id, regionID: %d, clusterID: %d, storeID: %d", regionID, n.clusterID, storeID) + peerID, err := n.allocID(ctx) + if err != nil { + return nil, err + } + log.Infof("alloc first peer id for first region, peerID: %d, regionID: %d", peerID, regionID) + + return PrepareBootstrap(engines, storeID, regionID, peerID) +} + +func (n *Node) BootstrapCluster(ctx context.Context, engines *engine_util.Engines, firstRegion *metapb.Region) (newCluster bool, err error) { + regionID := firstRegion.GetId() + for retry := 0; retry < MaxCheckClusterBootstrappedRetryCount; retry++ { + if retry != 0 { + time.Sleep(time.Second) + } + + res, err := n.schedulerClient.Bootstrap(ctx, n.store) + if err != nil { + log.Errorf("bootstrap cluster failed, clusterID: %d, err: %v", n.clusterID, err) + continue + } + resErr := res.GetHeader().GetError() + if resErr == nil { + log.Infof("bootstrap cluster ok, clusterID: %d", n.clusterID) + return true, ClearPrepareBootstrapState(engines) + } + if resErr.GetType() == schedulerpb.ErrorType_ALREADY_BOOTSTRAPPED { + region, _, err := n.schedulerClient.GetRegion(ctx, []byte{}) + if err != nil { + log.Errorf("get first region failed, err: %v", err) + continue + } + if region.GetId() == regionID { + return false, ClearPrepareBootstrapState(engines) + } + log.Infof("cluster is already bootstrapped, clusterID: %v", n.clusterID) + return false, ClearPrepareBootstrap(engines, regionID) + } + log.Errorf("bootstrap cluster, clusterID: %v, err: %v", n.clusterID, resErr) + } + return false, errors.New("bootstrap cluster failed") +} + +func (n *Node) startNode(engines *engine_util.Engines, trans Transport, snapMgr *snap.SnapManager) error { + log.Infof("start raft store node, storeID: %d", n.store.GetId()) + return n.system.start(n.store, n.cfg, engines, trans, n.schedulerClient, snapMgr) +} + +func (n *Node) stopNode(storeID uint64) { + log.Infof("stop raft store thread, storeID: %d", storeID) + n.system.shutDown() +} + +func (n *Node) Stop() { + n.stopNode(n.store.GetId()) +} + +func (n *Node) GetStoreID() uint64 { + return n.store.GetId() +} diff --git a/kv/raftstore/peer.go b/kv/raftstore/peer.go new file mode 100644 index 00000000..4767a110 --- /dev/null +++ b/kv/raftstore/peer.go @@ -0,0 +1,387 @@ +package raftstore + +import ( + "fmt" + "time" + + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/kv/raftstore/message" + "github.com/pingcap-incubator/tinykv/kv/raftstore/meta" + "github.com/pingcap-incubator/tinykv/kv/raftstore/runner" + "github.com/pingcap-incubator/tinykv/kv/raftstore/util" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/kv/util/worker" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + rspb "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + "github.com/pingcap-incubator/tinykv/raft" + "github.com/pingcap/errors" +) + +func NotifyStaleReq(term uint64, cb *message.Callback) { + cb.Done(ErrRespStaleCommand(term)) +} + +func NotifyReqRegionRemoved(regionId uint64, cb *message.Callback) { + regionNotFound := &util.ErrRegionNotFound{RegionId: regionId} + resp := ErrResp(regionNotFound) + cb.Done(resp) +} + +// If we create the peer actively, like bootstrap/split/merge region, we should +// use this function to create the peer. The region must contain the peer info +// for this store. +func createPeer(storeID uint64, cfg *config.Config, sched chan<- worker.Task, + engines *engine_util.Engines, region *metapb.Region) (*peer, error) { + metaPeer := util.FindPeer(region, storeID) + if metaPeer == nil { + return nil, errors.Errorf("find no peer for store %d in region %v", storeID, region) + } + log.Infof("region %v create peer with ID %d", region, metaPeer.Id) + return NewPeer(storeID, cfg, engines, region, sched, metaPeer) +} + +// The peer can be created from another node with raft membership changes, and we only +// know the region_id and peer_id when creating this replicated peer, the region info +// will be retrieved later after applying snapshot. +func replicatePeer(storeID uint64, cfg *config.Config, sched chan<- worker.Task, + engines *engine_util.Engines, regionID uint64, metaPeer *metapb.Peer) (*peer, error) { + // We will remove tombstone key when apply snapshot + log.Infof("[region %v] replicates peer with ID %d", regionID, metaPeer.GetId()) + region := &metapb.Region{ + Id: regionID, + RegionEpoch: &metapb.RegionEpoch{}, + } + return NewPeer(storeID, cfg, engines, region, sched, metaPeer) +} + +type proposal struct { + // index + term for unique identification + index uint64 + term uint64 + cb *message.Callback +} + +type peer struct { + // The ticker of the peer, used to trigger + // * raft tick + // * raft log gc + // * region heartbeat + // * split check + ticker *ticker + // Instance of the Raft module + RaftGroup *raft.RawNode + // The peer storage for the Raft module + peerStorage *PeerStorage + + // Record the meta information of the peer + Meta *metapb.Peer + regionId uint64 + // Tag which is useful for printing log + Tag string + + // Record the callback of the proposals + // (Used in 2B) + proposals []*proposal + + // Index of last scheduled compacted raft log. + // (Used in 2C) + LastCompactedIdx uint64 + + // Cache the peers information from other stores + // when sending raft messages to other peers, it's used to get the store id of target peer + // (Used in 3B conf change) + peerCache map[uint64]*metapb.Peer + // Record the instants of peers being added into the configuration. + // Remove them after they are not pending any more. + // (Used in 3B conf change) + PeersStartPendingTime map[uint64]time.Time + // Mark the peer as stopped, set when peer is destroyed + // (Used in 3B conf change) + stopped bool + + // An inaccurate difference in region size since last reset. + // split checker is triggered when it exceeds the threshold, it makes split checker not scan the data very often + // (Used in 3B split) + SizeDiffHint uint64 + // Approximate size of the region. + // It's updated everytime the split checker scan the data + // (Used in 3B split) + ApproximateSize *uint64 +} + +func NewPeer(storeId uint64, cfg *config.Config, engines *engine_util.Engines, region *metapb.Region, regionSched chan<- worker.Task, + meta *metapb.Peer) (*peer, error) { + if meta.GetId() == util.InvalidID { + return nil, fmt.Errorf("invalid peer id") + } + tag := fmt.Sprintf("[region %v] %v", region.GetId(), meta.GetId()) + + ps, err := NewPeerStorage(engines, region, regionSched, tag) + if err != nil { + return nil, err + } + + appliedIndex := ps.AppliedIndex() + + raftCfg := &raft.Config{ + ID: meta.GetId(), + ElectionTick: cfg.RaftElectionTimeoutTicks, + HeartbeatTick: cfg.RaftHeartbeatTicks, + Applied: appliedIndex, + Storage: ps, + } + + raftGroup, err := raft.NewRawNode(raftCfg) + if err != nil { + return nil, err + } + p := &peer{ + Meta: meta, + regionId: region.GetId(), + RaftGroup: raftGroup, + peerStorage: ps, + peerCache: make(map[uint64]*metapb.Peer), + PeersStartPendingTime: make(map[uint64]time.Time), + Tag: tag, + ticker: newTicker(region.GetId(), cfg), + } + + // If this region has only one peer and I am the one, campaign directly. + if len(region.GetPeers()) == 1 && region.GetPeers()[0].GetStoreId() == storeId { + err = p.RaftGroup.Campaign() + if err != nil { + return nil, err + } + } + + return p, nil +} + +func (p *peer) insertPeerCache(peer *metapb.Peer) { + p.peerCache[peer.GetId()] = peer +} + +func (p *peer) removePeerCache(peerID uint64) { + delete(p.peerCache, peerID) +} + +func (p *peer) getPeerFromCache(peerID uint64) *metapb.Peer { + if peer, ok := p.peerCache[peerID]; ok { + return peer + } + for _, peer := range p.peerStorage.Region().GetPeers() { + if peer.GetId() == peerID { + p.insertPeerCache(peer) + return peer + } + } + return nil +} + +func (p *peer) nextProposalIndex() uint64 { + return p.RaftGroup.Raft.RaftLog.LastIndex() + 1 +} + +/// Tries to destroy itself. Returns a job (if needed) to do more cleaning tasks. +func (p *peer) MaybeDestroy() bool { + if p.stopped { + log.Infof("%v is being destroyed, skip", p.Tag) + return false + } + return true +} + +/// Does the real destroy worker.Task which includes: +/// 1. Set the region to tombstone; +/// 2. Clear data; +/// 3. Notify all pending requests. +func (p *peer) Destroy(engine *engine_util.Engines, keepData bool) error { + start := time.Now() + region := p.Region() + log.Infof("%v begin to destroy", p.Tag) + + // Set Tombstone state explicitly + kvWB := new(engine_util.WriteBatch) + raftWB := new(engine_util.WriteBatch) + if err := p.peerStorage.clearMeta(kvWB, raftWB); err != nil { + return err + } + meta.WriteRegionState(kvWB, region, rspb.PeerState_Tombstone) + // write kv rocksdb first in case of restart happen between two write + if err := kvWB.WriteToDB(engine.Kv); err != nil { + return err + } + if err := raftWB.WriteToDB(engine.Raft); err != nil { + return err + } + + if p.peerStorage.isInitialized() && !keepData { + // If we meet panic when deleting data and raft log, the dirty data + // will be cleared by a newer snapshot applying or restart. + p.peerStorage.ClearData() + } + + for _, proposal := range p.proposals { + NotifyReqRegionRemoved(region.Id, proposal.cb) + } + p.proposals = nil + + log.Infof("%v destroy itself, takes %v", p.Tag, time.Now().Sub(start)) + return nil +} + +func (p *peer) isInitialized() bool { + return p.peerStorage.isInitialized() +} + +func (p *peer) storeID() uint64 { + return p.Meta.StoreId +} + +func (p *peer) Region() *metapb.Region { + return p.peerStorage.Region() +} + +/// Set the region of a peer. +/// +/// This will update the region of the peer, caller must ensure the region +/// has been preserved in a durable device. +func (p *peer) SetRegion(region *metapb.Region) { + p.peerStorage.SetRegion(region) +} + +func (p *peer) PeerId() uint64 { + return p.Meta.GetId() +} + +func (p *peer) LeaderId() uint64 { + return p.RaftGroup.Raft.Lead +} + +func (p *peer) IsLeader() bool { + return p.RaftGroup.Raft.State == raft.StateLeader +} + +func (p *peer) Send(trans Transport, msgs []eraftpb.Message) { + for _, msg := range msgs { + err := p.sendRaftMessage(msg, trans) + if err != nil { + log.Debugf("%v send message err: %v", p.Tag, err) + } + } +} + +/// Collects all pending peers and update `peers_start_pending_time`. +func (p *peer) CollectPendingPeers() []*metapb.Peer { + pendingPeers := make([]*metapb.Peer, 0, len(p.Region().GetPeers())) + truncatedIdx := p.peerStorage.truncatedIndex() + for id, progress := range p.RaftGroup.GetProgress() { + if id == p.Meta.GetId() { + continue + } + if progress.Match < truncatedIdx { + if peer := p.getPeerFromCache(id); peer != nil { + pendingPeers = append(pendingPeers, peer) + if _, ok := p.PeersStartPendingTime[id]; !ok { + now := time.Now() + p.PeersStartPendingTime[id] = now + log.Debugf("%v peer %v start pending at %v", p.Tag, id, now) + } + } + } + } + return pendingPeers +} + +func (p *peer) clearPeersStartPendingTime() { + for id := range p.PeersStartPendingTime { + delete(p.PeersStartPendingTime, id) + } +} + +/// Returns `true` if any new peer catches up with the leader in replicating logs. +/// And updates `PeersStartPendingTime` if needed. +func (p *peer) AnyNewPeerCatchUp(peerId uint64) bool { + if len(p.PeersStartPendingTime) == 0 { + return false + } + if !p.IsLeader() { + p.clearPeersStartPendingTime() + return false + } + if startPendingTime, ok := p.PeersStartPendingTime[peerId]; ok { + truncatedIdx := p.peerStorage.truncatedIndex() + progress, ok := p.RaftGroup.Raft.Prs[peerId] + if ok { + if progress.Match >= truncatedIdx { + delete(p.PeersStartPendingTime, peerId) + elapsed := time.Since(startPendingTime) + log.Debugf("%v peer %v has caught up logs, elapsed: %v", p.Tag, peerId, elapsed) + return true + } + } + } + return false +} + +func (p *peer) MaybeCampaign(parentIsLeader bool) bool { + // The peer campaigned when it was created, no need to do it again. + if len(p.Region().GetPeers()) <= 1 || !parentIsLeader { + return false + } + + // If last peer is the leader of the region before split, it's intuitional for + // it to become the leader of new split region. + p.RaftGroup.Campaign() + return true +} + +func (p *peer) Term() uint64 { + return p.RaftGroup.Raft.Term +} + +func (p *peer) HeartbeatScheduler(ch chan<- worker.Task) { + ch <- &runner.SchedulerRegionHeartbeatTask{ + Region: p.Region(), + Peer: p.Meta, + PendingPeers: p.CollectPendingPeers(), + ApproximateSize: p.ApproximateSize, + } +} + +func (p *peer) sendRaftMessage(msg eraftpb.Message, trans Transport) error { + sendMsg := new(rspb.RaftMessage) + sendMsg.RegionId = p.regionId + // set current epoch + sendMsg.RegionEpoch = &metapb.RegionEpoch{ + ConfVer: p.Region().RegionEpoch.ConfVer, + Version: p.Region().RegionEpoch.Version, + } + + fromPeer := *p.Meta + toPeer := p.getPeerFromCache(msg.To) + if toPeer == nil { + return fmt.Errorf("failed to lookup recipient peer %v in region %v", msg.To, p.regionId) + } + log.Debugf("%v, send raft msg %v from %v to %v", p.Tag, msg.MsgType, fromPeer.Id, toPeer.Id) + + sendMsg.FromPeer = &fromPeer + sendMsg.ToPeer = toPeer + + // There could be two cases: + // 1. Target peer already exists but has not established communication with leader yet + // 2. Target peer is added newly due to member change or region split, but it's not + // created yet + // For both cases the region start key and end key are attached in RequestVote and + // Heartbeat message for the store of that peer to check whether to create a new peer + // when receiving these messages, or just to wait for a pending region split to perform + // later. + if p.peerStorage.isInitialized() && util.IsInitialMsg(&msg) { + sendMsg.StartKey = append([]byte{}, p.Region().StartKey...) + sendMsg.EndKey = append([]byte{}, p.Region().EndKey...) + } + sendMsg.Message = &msg + return trans.Send(sendMsg) +} diff --git a/kv/raftstore/peer_msg_handler.go b/kv/raftstore/peer_msg_handler.go new file mode 100644 index 00000000..b2dfd625 --- /dev/null +++ b/kv/raftstore/peer_msg_handler.go @@ -0,0 +1,567 @@ +package raftstore + +import ( + "fmt" + "time" + + "github.com/Connor1996/badger/y" + "github.com/pingcap-incubator/tinykv/kv/raftstore/message" + "github.com/pingcap-incubator/tinykv/kv/raftstore/runner" + "github.com/pingcap-incubator/tinykv/kv/raftstore/snap" + "github.com/pingcap-incubator/tinykv/kv/raftstore/util" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_cmdpb" + rspb "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/btree" + "github.com/pingcap/errors" +) + +type PeerTick int + +const ( + PeerTickRaft PeerTick = 0 + PeerTickRaftLogGC PeerTick = 1 + PeerTickSplitRegionCheck PeerTick = 2 + PeerTickSchedulerHeartbeat PeerTick = 3 +) + +type peerMsgHandler struct { + *peer + ctx *GlobalContext +} + +func newPeerMsgHandler(peer *peer, ctx *GlobalContext) *peerMsgHandler { + return &peerMsgHandler{ + peer: peer, + ctx: ctx, + } +} + +func (d *peerMsgHandler) HandleRaftReady() { + if d.stopped { + return + } + // Your Code Here (2B). +} + +func (d *peerMsgHandler) HandleMsg(msg message.Msg) { + switch msg.Type { + case message.MsgTypeRaftMessage: + raftMsg := msg.Data.(*rspb.RaftMessage) + if err := d.onRaftMsg(raftMsg); err != nil { + log.Errorf("%s handle raft message error %v", d.Tag, err) + } + case message.MsgTypeRaftCmd: + raftCMD := msg.Data.(*message.MsgRaftCmd) + d.proposeRaftCommand(raftCMD.Request, raftCMD.Callback) + case message.MsgTypeTick: + d.onTick() + case message.MsgTypeSplitRegion: + split := msg.Data.(*message.MsgSplitRegion) + log.Infof("%s on split with %v", d.Tag, split.SplitKey) + d.onPrepareSplitRegion(split.RegionEpoch, split.SplitKey, split.Callback) + case message.MsgTypeRegionApproximateSize: + d.onApproximateRegionSize(msg.Data.(uint64)) + case message.MsgTypeGcSnap: + gcSnap := msg.Data.(*message.MsgGCSnap) + d.onGCSnap(gcSnap.Snaps) + case message.MsgTypeStart: + d.startTicker() + } +} + +func (d *peerMsgHandler) preProposeRaftCommand(req *raft_cmdpb.RaftCmdRequest) error { + // Check store_id, make sure that the msg is dispatched to the right place. + if err := util.CheckStoreID(req, d.storeID()); err != nil { + return err + } + + // Check whether the store has the right peer to handle the request. + regionID := d.regionId + leaderID := d.LeaderId() + if !d.IsLeader() { + leader := d.getPeerFromCache(leaderID) + return &util.ErrNotLeader{RegionId: regionID, Leader: leader} + } + // peer_id must be the same as peer's. + if err := util.CheckPeerID(req, d.PeerId()); err != nil { + return err + } + // Check whether the term is stale. + if err := util.CheckTerm(req, d.Term()); err != nil { + return err + } + err := util.CheckRegionEpoch(req, d.Region(), true) + if errEpochNotMatching, ok := err.(*util.ErrEpochNotMatch); ok { + // Attach the region which might be split from the current region. But it doesn't + // matter if the region is not split from the current region. If the region meta + // received by the TiKV driver is newer than the meta cached in the driver, the meta is + // updated. + siblingRegion := d.findSiblingRegion() + if siblingRegion != nil { + errEpochNotMatching.Regions = append(errEpochNotMatching.Regions, siblingRegion) + } + return errEpochNotMatching + } + return err +} + +func (d *peerMsgHandler) proposeRaftCommand(msg *raft_cmdpb.RaftCmdRequest, cb *message.Callback) { + err := d.preProposeRaftCommand(msg) + if err != nil { + cb.Done(ErrResp(err)) + return + } + // Your Code Here (2B). +} + +func (d *peerMsgHandler) onTick() { + if d.stopped { + return + } + d.ticker.tickClock() + if d.ticker.isOnTick(PeerTickRaft) { + d.onRaftBaseTick() + } + if d.ticker.isOnTick(PeerTickRaftLogGC) { + d.onRaftGCLogTick() + } + if d.ticker.isOnTick(PeerTickSchedulerHeartbeat) { + d.onSchedulerHeartbeatTick() + } + if d.ticker.isOnTick(PeerTickSplitRegionCheck) { + d.onSplitRegionCheckTick() + } + d.ctx.tickDriverSender <- d.regionId +} + +func (d *peerMsgHandler) startTicker() { + d.ticker = newTicker(d.regionId, d.ctx.cfg) + d.ctx.tickDriverSender <- d.regionId + d.ticker.schedule(PeerTickRaft) + d.ticker.schedule(PeerTickRaftLogGC) + d.ticker.schedule(PeerTickSplitRegionCheck) + d.ticker.schedule(PeerTickSchedulerHeartbeat) +} + +func (d *peerMsgHandler) onRaftBaseTick() { + d.RaftGroup.Tick() + d.ticker.schedule(PeerTickRaft) +} + +func (d *peerMsgHandler) ScheduleCompactLog(firstIndex uint64, truncatedIndex uint64) { + raftLogGCTask := &runner.RaftLogGCTask{ + RaftEngine: d.ctx.engine.Raft, + RegionID: d.regionId, + StartIdx: d.LastCompactedIdx, + EndIdx: truncatedIndex + 1, + } + d.LastCompactedIdx = raftLogGCTask.EndIdx + d.ctx.raftLogGCTaskSender <- raftLogGCTask +} + +func (d *peerMsgHandler) onRaftMsg(msg *rspb.RaftMessage) error { + log.Debugf("%s handle raft message %s from %d to %d", + d.Tag, msg.GetMessage().GetMsgType(), msg.GetFromPeer().GetId(), msg.GetToPeer().GetId()) + if !d.validateRaftMessage(msg) { + return nil + } + if d.stopped { + return nil + } + if msg.GetIsTombstone() { + // we receive a message tells us to remove self. + d.handleGCPeerMsg(msg) + return nil + } + if d.checkMessage(msg) { + return nil + } + key, err := d.checkSnapshot(msg) + if err != nil { + return err + } + if key != nil { + // If the snapshot file is not used again, then it's OK to + // delete them here. If the snapshot file will be reused when + // receiving, then it will fail to pass the check again, so + // missing snapshot files should not be noticed. + s, err1 := d.ctx.snapMgr.GetSnapshotForApplying(*key) + if err1 != nil { + return err1 + } + d.ctx.snapMgr.DeleteSnapshot(*key, s, false) + return nil + } + d.insertPeerCache(msg.GetFromPeer()) + err = d.RaftGroup.Step(*msg.GetMessage()) + if err != nil { + return err + } + if d.AnyNewPeerCatchUp(msg.FromPeer.Id) { + d.HeartbeatScheduler(d.ctx.schedulerTaskSender) + } + return nil +} + +// return false means the message is invalid, and can be ignored. +func (d *peerMsgHandler) validateRaftMessage(msg *rspb.RaftMessage) bool { + regionID := msg.GetRegionId() + from := msg.GetFromPeer() + to := msg.GetToPeer() + log.Debugf("[region %d] handle raft message %s from %d to %d", regionID, msg, from.GetId(), to.GetId()) + if to.GetStoreId() != d.storeID() { + log.Warnf("[region %d] store not match, to store id %d, mine %d, ignore it", + regionID, to.GetStoreId(), d.storeID()) + return false + } + if msg.RegionEpoch == nil { + log.Errorf("[region %d] missing epoch in raft message, ignore it", regionID) + return false + } + return true +} + +/// Checks if the message is sent to the correct peer. +/// +/// Returns true means that the message can be dropped silently. +func (d *peerMsgHandler) checkMessage(msg *rspb.RaftMessage) bool { + fromEpoch := msg.GetRegionEpoch() + isVoteMsg := util.IsVoteMessage(msg.Message) + fromStoreID := msg.FromPeer.GetStoreId() + + // Let's consider following cases with three nodes [1, 2, 3] and 1 is leader: + // a. 1 removes 2, 2 may still send MsgAppendResponse to 1. + // We should ignore this stale message and let 2 remove itself after + // applying the ConfChange log. + // b. 2 is isolated, 1 removes 2. When 2 rejoins the cluster, 2 will + // send stale MsgRequestVote to 1 and 3, at this time, we should tell 2 to gc itself. + // c. 2 is isolated but can communicate with 3. 1 removes 3. + // 2 will send stale MsgRequestVote to 3, 3 should ignore this message. + // d. 2 is isolated but can communicate with 3. 1 removes 2, then adds 4, remove 3. + // 2 will send stale MsgRequestVote to 3, 3 should tell 2 to gc itself. + // e. 2 is isolated. 1 adds 4, 5, 6, removes 3, 1. Now assume 4 is leader. + // After 2 rejoins the cluster, 2 may send stale MsgRequestVote to 1 and 3, + // 1 and 3 will ignore this message. Later 4 will send messages to 2 and 2 will + // rejoin the raft group again. + // f. 2 is isolated. 1 adds 4, 5, 6, removes 3, 1. Now assume 4 is leader, and 4 removes 2. + // unlike case e, 2 will be stale forever. + // TODO: for case f, if 2 is stale for a long time, 2 will communicate with scheduler and scheduler will + // tell 2 is stale, so 2 can remove itself. + region := d.Region() + if util.IsEpochStale(fromEpoch, region.RegionEpoch) && util.FindPeer(region, fromStoreID) == nil { + // The message is stale and not in current region. + handleStaleMsg(d.ctx.trans, msg, region.RegionEpoch, isVoteMsg) + return true + } + target := msg.GetToPeer() + if target.Id < d.PeerId() { + log.Infof("%s target peer ID %d is less than %d, msg maybe stale", d.Tag, target.Id, d.PeerId()) + return true + } else if target.Id > d.PeerId() { + if d.MaybeDestroy() { + log.Infof("%s is stale as received a larger peer %s, destroying", d.Tag, target) + d.destroyPeer() + d.ctx.router.sendStore(message.NewMsg(message.MsgTypeStoreRaftMessage, msg)) + } + return true + } + return false +} + +func handleStaleMsg(trans Transport, msg *rspb.RaftMessage, curEpoch *metapb.RegionEpoch, + needGC bool) { + regionID := msg.RegionId + fromPeer := msg.FromPeer + toPeer := msg.ToPeer + msgType := msg.Message.GetMsgType() + + if !needGC { + log.Infof("[region %d] raft message %s is stale, current %v ignore it", + regionID, msgType, curEpoch) + return + } + gcMsg := &rspb.RaftMessage{ + RegionId: regionID, + FromPeer: fromPeer, + ToPeer: toPeer, + RegionEpoch: curEpoch, + IsTombstone: true, + } + if err := trans.Send(gcMsg); err != nil { + log.Errorf("[region %d] send message failed %v", regionID, err) + } +} + +func (d *peerMsgHandler) handleGCPeerMsg(msg *rspb.RaftMessage) { + fromEpoch := msg.RegionEpoch + if !util.IsEpochStale(d.Region().RegionEpoch, fromEpoch) { + return + } + if !util.PeerEqual(d.Meta, msg.ToPeer) { + log.Infof("%s receive stale gc msg, ignore", d.Tag) + return + } + log.Infof("%s peer %s receives gc message, trying to remove", d.Tag, msg.ToPeer) + if d.MaybeDestroy() { + d.destroyPeer() + } +} + +// Returns `None` if the `msg` doesn't contain a snapshot or it contains a snapshot which +// doesn't conflict with any other snapshots or regions. Otherwise a `snap.SnapKey` is returned. +func (d *peerMsgHandler) checkSnapshot(msg *rspb.RaftMessage) (*snap.SnapKey, error) { + if msg.Message.Snapshot == nil { + return nil, nil + } + regionID := msg.RegionId + snapshot := msg.Message.Snapshot + key := snap.SnapKeyFromRegionSnap(regionID, snapshot) + snapData := new(rspb.RaftSnapshotData) + err := snapData.Unmarshal(snapshot.Data) + if err != nil { + return nil, err + } + snapRegion := snapData.Region + peerID := msg.ToPeer.Id + var contains bool + for _, peer := range snapRegion.Peers { + if peer.Id == peerID { + contains = true + break + } + } + if !contains { + log.Infof("%s %s doesn't contains peer %d, skip", d.Tag, snapRegion, peerID) + return &key, nil + } + meta := d.ctx.storeMeta + if !util.RegionEqual(meta.regions[d.regionId], d.Region()) { + if !d.isInitialized() { + log.Infof("%s stale delegate detected, skip", d.Tag) + return &key, nil + } else { + panic(fmt.Sprintf("%s meta corrupted %s != %s", d.Tag, meta.regions[d.regionId], d.Region())) + } + } + + existRegions := meta.getOverlapRegions(snapRegion) + for _, existRegion := range existRegions { + if existRegion.GetId() == snapRegion.GetId() { + continue + } + log.Infof("%s region overlapped %s %s", d.Tag, existRegion, snapRegion) + return &key, nil + } + + // check if snapshot file exists. + _, err = d.ctx.snapMgr.GetSnapshotForApplying(key) + if err != nil { + return nil, err + } + return nil, nil +} + +func (d *peerMsgHandler) destroyPeer() { + log.Infof("%s starts destroy", d.Tag) + regionID := d.regionId + // We can't destroy a peer which is applying snapshot. + meta := d.ctx.storeMeta + isInitialized := d.isInitialized() + if err := d.Destroy(d.ctx.engine, false); err != nil { + // If not panic here, the peer will be recreated in the next restart, + // then it will be gc again. But if some overlap region is created + // before restarting, the gc action will delete the overlap region's + // data too. + panic(fmt.Sprintf("%s destroy peer %v", d.Tag, err)) + } + d.ctx.router.close(regionID) + d.stopped = true + if isInitialized && meta.regionRanges.Delete(®ionItem{region: d.Region()}) == nil { + panic(d.Tag + " meta corruption detected") + } + if _, ok := meta.regions[regionID]; !ok { + panic(d.Tag + " meta corruption detected") + } + delete(meta.regions, regionID) +} + +func (d *peerMsgHandler) findSiblingRegion() (result *metapb.Region) { + meta := d.ctx.storeMeta + item := ®ionItem{region: d.Region()} + meta.regionRanges.AscendGreaterOrEqual(item, func(i btree.Item) bool { + result = i.(*regionItem).region + return true + }) + return +} + +func (d *peerMsgHandler) onRaftGCLogTick() { + d.ticker.schedule(PeerTickRaftLogGC) + if !d.IsLeader() { + return + } + + appliedIdx := d.peerStorage.AppliedIndex() + firstIdx, _ := d.peerStorage.FirstIndex() + var compactIdx uint64 + if appliedIdx > firstIdx && appliedIdx-firstIdx >= d.ctx.cfg.RaftLogGcCountLimit { + compactIdx = appliedIdx + } else { + return + } + + y.Assert(compactIdx > 0) + compactIdx -= 1 + if compactIdx < firstIdx { + // In case compact_idx == first_idx before subtraction. + return + } + + term, err := d.RaftGroup.Raft.RaftLog.Term(compactIdx) + if err != nil { + log.Fatalf("appliedIdx: %d, firstIdx: %d, compactIdx: %d", appliedIdx, firstIdx, compactIdx) + panic(err) + } + + // Create a compact log request and notify directly. + regionID := d.regionId + request := newCompactLogRequest(regionID, d.Meta, compactIdx, term) + d.proposeRaftCommand(request, nil) +} + +func (d *peerMsgHandler) onSplitRegionCheckTick() { + d.ticker.schedule(PeerTickSplitRegionCheck) + // To avoid frequent scan, we only add new scan tasks if all previous tasks + // have finished. + if len(d.ctx.splitCheckTaskSender) > 0 { + return + } + + if !d.IsLeader() { + return + } + if d.ApproximateSize != nil && d.SizeDiffHint < d.ctx.cfg.RegionSplitSize/8 { + return + } + d.ctx.splitCheckTaskSender <- &runner.SplitCheckTask{ + Region: d.Region(), + } + d.SizeDiffHint = 0 +} + +func (d *peerMsgHandler) onPrepareSplitRegion(regionEpoch *metapb.RegionEpoch, splitKey []byte, cb *message.Callback) { + if err := d.validateSplitRegion(regionEpoch, splitKey); err != nil { + cb.Done(ErrResp(err)) + return + } + region := d.Region() + d.ctx.schedulerTaskSender <- &runner.SchedulerAskSplitTask{ + Region: region, + SplitKey: splitKey, + Peer: d.Meta, + Callback: cb, + } +} + +func (d *peerMsgHandler) validateSplitRegion(epoch *metapb.RegionEpoch, splitKey []byte) error { + if len(splitKey) == 0 { + err := errors.Errorf("%s split key should not be empty", d.Tag) + log.Error(err) + return err + } + + if !d.IsLeader() { + // region on this store is no longer leader, skipped. + log.Infof("%s not leader, skip", d.Tag) + return &util.ErrNotLeader{ + RegionId: d.regionId, + Leader: d.getPeerFromCache(d.LeaderId()), + } + } + + region := d.Region() + latestEpoch := region.GetRegionEpoch() + + // This is a little difference for `check_region_epoch` in region split case. + // Here we just need to check `version` because `conf_ver` will be update + // to the latest value of the peer, and then send to Scheduler. + if latestEpoch.Version != epoch.Version { + log.Infof("%s epoch changed, retry later, prev_epoch: %s, epoch %s", + d.Tag, latestEpoch, epoch) + return &util.ErrEpochNotMatch{ + Message: fmt.Sprintf("%s epoch changed %s != %s, retry later", d.Tag, latestEpoch, epoch), + Regions: []*metapb.Region{region}, + } + } + return nil +} + +func (d *peerMsgHandler) onApproximateRegionSize(size uint64) { + d.ApproximateSize = &size +} + +func (d *peerMsgHandler) onSchedulerHeartbeatTick() { + d.ticker.schedule(PeerTickSchedulerHeartbeat) + + if !d.IsLeader() { + return + } + d.HeartbeatScheduler(d.ctx.schedulerTaskSender) +} + +func (d *peerMsgHandler) onGCSnap(snaps []snap.SnapKeyWithSending) { + compactedIdx := d.peerStorage.truncatedIndex() + compactedTerm := d.peerStorage.truncatedTerm() + for _, snapKeyWithSending := range snaps { + key := snapKeyWithSending.SnapKey + if snapKeyWithSending.IsSending { + snap, err := d.ctx.snapMgr.GetSnapshotForSending(key) + if err != nil { + log.Errorf("%s failed to load snapshot for %s %v", d.Tag, key, err) + continue + } + if key.Term < compactedTerm || key.Index < compactedIdx { + log.Infof("%s snap file %s has been compacted, delete", d.Tag, key) + d.ctx.snapMgr.DeleteSnapshot(key, snap, false) + } else if fi, err1 := snap.Meta(); err1 == nil { + modTime := fi.ModTime() + if time.Since(modTime) > 4*time.Hour { + log.Infof("%s snap file %s has been expired, delete", d.Tag, key) + d.ctx.snapMgr.DeleteSnapshot(key, snap, false) + } + } + } else if key.Term <= compactedTerm && + (key.Index < compactedIdx || key.Index == compactedIdx) { + log.Infof("%s snap file %s has been applied, delete", d.Tag, key) + a, err := d.ctx.snapMgr.GetSnapshotForApplying(key) + if err != nil { + log.Errorf("%s failed to load snapshot for %s %v", d.Tag, key, err) + continue + } + d.ctx.snapMgr.DeleteSnapshot(key, a, false) + } + } +} + +func newAdminRequest(regionID uint64, peer *metapb.Peer) *raft_cmdpb.RaftCmdRequest { + return &raft_cmdpb.RaftCmdRequest{ + Header: &raft_cmdpb.RaftRequestHeader{ + RegionId: regionID, + Peer: peer, + }, + } +} + +func newCompactLogRequest(regionID uint64, peer *metapb.Peer, compactIndex, compactTerm uint64) *raft_cmdpb.RaftCmdRequest { + req := newAdminRequest(regionID, peer) + req.AdminRequest = &raft_cmdpb.AdminRequest{ + CmdType: raft_cmdpb.AdminCmdType_CompactLog, + CompactLog: &raft_cmdpb.CompactLogRequest{ + CompactIndex: compactIndex, + CompactTerm: compactTerm, + }, + } + return req +} diff --git a/kv/raftstore/peer_storage.go b/kv/raftstore/peer_storage.go new file mode 100644 index 00000000..33605a36 --- /dev/null +++ b/kv/raftstore/peer_storage.go @@ -0,0 +1,449 @@ +package raftstore + +import ( + "bytes" + "fmt" + "time" + + "github.com/Connor1996/badger" + "github.com/Connor1996/badger/y" + "github.com/golang/protobuf/proto" + "github.com/pingcap-incubator/tinykv/kv/raftstore/meta" + "github.com/pingcap-incubator/tinykv/kv/raftstore/runner" + "github.com/pingcap-incubator/tinykv/kv/raftstore/snap" + "github.com/pingcap-incubator/tinykv/kv/raftstore/util" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/kv/util/worker" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + rspb "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + "github.com/pingcap-incubator/tinykv/raft" + "github.com/pingcap/errors" +) + +type ApplySnapResult struct { + // PrevRegion is the region before snapshot applied + PrevRegion *metapb.Region + Region *metapb.Region +} + +var _ raft.Storage = new(PeerStorage) + +type PeerStorage struct { + // Tag which is useful for printing log + Tag string + // The underlying storage + Engines *engine_util.Engines + + // Cache for the persistent states + region *metapb.Region + raftState rspb.RaftLocalState + applyState rspb.RaftApplyState // (Should be updated too when applying committed entries) + lastTerm uint64 + + // States for generating snapshot + snapState snap.SnapState + regionSched chan<- worker.Task + snapTriedCnt int +} + +func NewPeerStorage(engines *engine_util.Engines, region *metapb.Region, regionSched chan<- worker.Task, tag string) (*PeerStorage, error) { + log.Debugf("%s creating storage for %s", tag, region.String()) + raftState, err := meta.InitRaftLocalState(engines.Raft, region) + if err != nil { + return nil, err + } + applyState, err := meta.InitApplyState(engines.Kv, region) + if err != nil { + return nil, err + } + if raftState.LastIndex < applyState.AppliedIndex { + panic(fmt.Sprintf("%s unexpected raft log index: lastIndex %d < appliedIndex %d", + tag, raftState.LastIndex, applyState.AppliedIndex)) + } + lastTerm, err := meta.InitLastTerm(engines.Raft, region, raftState, applyState) + if err != nil { + return nil, err + } + return &PeerStorage{ + Engines: engines, + region: region, + Tag: tag, + raftState: *raftState, + lastTerm: lastTerm, + regionSched: regionSched, + }, nil +} + +func (ps *PeerStorage) InitialState() (eraftpb.HardState, eraftpb.ConfState, error) { + raftState := ps.raftState + if raft.IsEmptyHardState(*raftState.HardState) { + y.AssertTruef(!ps.isInitialized(), + "peer for region %s is initialized but local state %+v has empty hard state", + ps.region, ps.raftState) + return eraftpb.HardState{}, eraftpb.ConfState{}, nil + } + return *raftState.HardState, util.ConfStateFromRegion(ps.region), nil +} + +func (ps *PeerStorage) Entries(low, high uint64) ([]eraftpb.Entry, error) { + if err := ps.checkRange(low, high); err != nil || low == high { + return nil, err + } + buf := make([]eraftpb.Entry, 0, high-low) + nextIndex := low + txn := ps.Engines.Raft.NewTransaction(false) + defer txn.Discard() + startKey := meta.RaftLogKey(ps.region.Id, low) + endKey := meta.RaftLogKey(ps.region.Id, high) + iter := txn.NewIterator(badger.DefaultIteratorOptions) + defer iter.Close() + for iter.Seek(startKey); iter.Valid(); iter.Next() { + item := iter.Item() + if bytes.Compare(item.Key(), endKey) >= 0 { + break + } + val, err := item.Value() + if err != nil { + return nil, err + } + var entry eraftpb.Entry + if err = entry.Unmarshal(val); err != nil { + return nil, err + } + // May meet gap or has been compacted. + if entry.Index != nextIndex { + break + } + nextIndex++ + buf = append(buf, entry) + } + // If we get the correct number of entries, returns. + if len(buf) == int(high-low) { + return buf, nil + } + // Here means we don't fetch enough entries. + return nil, raft.ErrUnavailable +} + +func (ps *PeerStorage) Term(idx uint64) (uint64, error) { + if idx == ps.truncatedIndex() { + return ps.truncatedTerm(), nil + } + if err := ps.checkRange(idx, idx+1); err != nil { + return 0, err + } + if ps.truncatedTerm() == ps.lastTerm || idx == ps.raftState.LastIndex { + return ps.lastTerm, nil + } + var entry eraftpb.Entry + if err := engine_util.GetMeta(ps.Engines.Raft, meta.RaftLogKey(ps.region.Id, idx), &entry); err != nil { + return 0, err + } + return entry.Term, nil +} + +func (ps *PeerStorage) LastIndex() (uint64, error) { + return ps.raftState.LastIndex, nil +} + +func (ps *PeerStorage) FirstIndex() (uint64, error) { + return ps.truncatedIndex() + 1, nil +} + +func (ps *PeerStorage) Snapshot() (eraftpb.Snapshot, error) { + var snapshot eraftpb.Snapshot + if ps.snapState.StateType == snap.SnapState_Generating { + select { + case s := <-ps.snapState.Receiver: + snapshot = *s + default: + return snapshot, raft.ErrSnapshotTemporarilyUnavailable + } + ps.snapState.StateType = snap.SnapState_Relax + if snapshot.GetMetadata() != nil { + ps.snapTriedCnt = 0 + if ps.validateSnap(&snapshot) { + return snapshot, nil + } + } else { + log.Warnf("%s failed to try generating snapshot, times: %d", ps.Tag, ps.snapTriedCnt) + } + } + + if ps.snapTriedCnt >= 5 { + err := errors.Errorf("failed to get snapshot after %d times", ps.snapTriedCnt) + ps.snapTriedCnt = 0 + return snapshot, err + } + + log.Infof("%s requesting snapshot", ps.Tag) + ps.snapTriedCnt++ + ps.ScheduleGenerateSnapshot() + + return snapshot, raft.ErrSnapshotTemporarilyUnavailable +} + +func (ps *PeerStorage) ScheduleGenerateSnapshot() { + ch := make(chan *eraftpb.Snapshot, 1) + ps.snapState = snap.SnapState{ + StateType: snap.SnapState_Generating, + Receiver: ch, + } + ps.regionSched <- &runner.RegionTaskGen{ + RegionId: ps.region.GetId(), + Notifier: ch, + } +} + +func (ps *PeerStorage) isInitialized() bool { + return len(ps.region.Peers) > 0 +} + +func (ps *PeerStorage) Region() *metapb.Region { + return ps.region +} + +func (ps *PeerStorage) SetRegion(region *metapb.Region) { + ps.region = region +} + +func (ps *PeerStorage) checkRange(low, high uint64) error { + if low > high { + return errors.Errorf("low %d is greater than high %d", low, high) + } else if low <= ps.truncatedIndex() { + return raft.ErrCompacted + } else if high > ps.raftState.LastIndex+1 { + return errors.Errorf("entries' high %d is out of bound, lastIndex %d", + high, ps.raftState.LastIndex) + } + return nil +} + +func (ps *PeerStorage) truncatedIndex() uint64 { + return ps.applyState.TruncatedState.Index +} + +func (ps *PeerStorage) truncatedTerm() uint64 { + return ps.applyState.TruncatedState.Term +} + +func (ps *PeerStorage) AppliedIndex() uint64 { + return ps.applyState.AppliedIndex +} + +func (ps *PeerStorage) validateSnap(snap *eraftpb.Snapshot) bool { + idx := snap.GetMetadata().GetIndex() + if idx < ps.truncatedIndex() { + log.Infof("%s snapshot is stale, generate again, snapIndex: %d, truncatedIndex: %d", ps.Tag, idx, ps.truncatedIndex()) + return false + } + var snapData rspb.RaftSnapshotData + if err := proto.UnmarshalMerge(snap.GetData(), &snapData); err != nil { + log.Errorf("%s failed to decode snapshot, it may be corrupted, err: %v", ps.Tag, err) + return false + } + snapEpoch := snapData.GetRegion().GetRegionEpoch() + latestEpoch := ps.region.GetRegionEpoch() + if snapEpoch.GetConfVer() < latestEpoch.GetConfVer() { + log.Infof("%s snapshot epoch is stale, snapEpoch: %s, latestEpoch: %s", ps.Tag, snapEpoch, latestEpoch) + return false + } + return true +} + +// Append the given entries to the raft log using previous last index or self.last_index. +// Return the new last index for later update. After we commit in engine, we can set last_index +// to the return one. +func (ps *PeerStorage) Append(entries []eraftpb.Entry, raftWB *engine_util.WriteBatch) error { + log.Debugf("%s append %d entries", ps.Tag, len(entries)) + prevLastIndex := ps.raftState.LastIndex + if len(entries) == 0 { + return nil + } + lastEntry := entries[len(entries)-1] + lastIndex := lastEntry.Index + lastTerm := lastEntry.Term + for _, entry := range entries { + err := raftWB.SetMeta(meta.RaftLogKey(ps.region.Id, entry.Index), &entry) + if err != nil { + return err + } + } + // Delete any previously appended log entries which never committed. + for i := lastIndex + 1; i <= prevLastIndex; i++ { + raftWB.DeleteMeta(meta.RaftLogKey(ps.region.Id, i)) + } + ps.raftState.LastIndex = lastIndex + ps.lastTerm = lastTerm + return nil +} + +func (ps *PeerStorage) clearMeta(kvWB, raftWB *engine_util.WriteBatch) error { + return ClearMeta(ps.Engines, kvWB, raftWB, ps.region.Id, ps.raftState.LastIndex) +} + +// Delete all data that is not covered by `new_region`. +func (ps *PeerStorage) clearExtraData(newRegion *metapb.Region) { + oldStartKey, oldEndKey := ps.region.GetStartKey(), ps.region.GetEndKey() + newStartKey, newEndKey := newRegion.GetStartKey(), newRegion.GetEndKey() + regionId := newRegion.Id + if bytes.Compare(oldStartKey, newStartKey) < 0 { + ps.regionSched <- &runner.RegionTaskDestroy{ + RegionId: regionId, + StartKey: oldStartKey, + EndKey: newStartKey, + } + } + if bytes.Compare(newEndKey, oldEndKey) < 0 { + ps.regionSched <- &runner.RegionTaskDestroy{ + RegionId: regionId, + StartKey: newEndKey, + EndKey: oldEndKey, + } + } +} + +func ClearMeta(engines *engine_util.Engines, kvWB, raftWB *engine_util.WriteBatch, regionID uint64, lastIndex uint64) error { + start := time.Now() + kvWB.DeleteMeta(meta.RegionStateKey(regionID)) + kvWB.DeleteMeta(meta.ApplyStateKey(regionID)) + + firstIndex := lastIndex + 1 + beginLogKey := meta.RaftLogKey(regionID, 0) + endLogKey := meta.RaftLogKey(regionID, firstIndex) + err := engines.Raft.View(func(txn *badger.Txn) error { + it := txn.NewIterator(badger.DefaultIteratorOptions) + defer it.Close() + it.Seek(beginLogKey) + if it.Valid() && bytes.Compare(it.Item().Key(), endLogKey) < 0 { + logIdx, err1 := meta.RaftLogIndex(it.Item().Key()) + if err1 != nil { + return err1 + } + firstIndex = logIdx + } + return nil + }) + if err != nil { + return err + } + for i := firstIndex; i <= lastIndex; i++ { + raftWB.DeleteMeta(meta.RaftLogKey(regionID, i)) + } + raftWB.DeleteMeta(meta.RaftStateKey(regionID)) + log.Infof( + "[region %d] clear peer 1 meta key 1 apply key 1 raft key and %d raft logs, takes %v", + regionID, + lastIndex+1-firstIndex, + time.Since(start), + ) + return nil +} + +// Apply the peer with given snapshot. +func (ps *PeerStorage) ApplySnapshot(snap *eraftpb.Snapshot, kvWB *engine_util.WriteBatch, raftWB *engine_util.WriteBatch) (*ApplySnapResult, error) { + log.Infof("%v begin to apply snapshot", ps.Tag) + + snapData := new(rspb.RaftSnapshotData) + if err := snapData.Unmarshal(snap.Data); err != nil { + return nil, err + } + + if snapData.Region.Id != ps.region.Id { + return nil, fmt.Errorf("mismatch region id %v != %v", snapData.Region.Id, ps.region.Id) + } + + if ps.isInitialized() { + // we can only delete the old data when the peer is initialized. + if err := ps.clearMeta(kvWB, raftWB); err != nil { + return nil, err + } + } + + ps.raftState.LastIndex = snap.Metadata.Index + ps.lastTerm = snap.Metadata.Term + + applyRes := &ApplySnapResult{ + PrevRegion: ps.region, + Region: snapData.Region, + } + // cleanup data before scheduling apply worker.Task + if ps.isInitialized() { + ps.clearExtraData(snapData.Region) + } + ps.region = snapData.Region + ps.applyState = rspb.RaftApplyState{ + AppliedIndex: snap.Metadata.Index, + // The snapshot only contains log which index > applied index, so + // here the truncate state's (index, term) is in snapshot metadata. + TruncatedState: &rspb.RaftTruncatedState{ + Index: snap.Metadata.Index, + Term: snap.Metadata.Term, + }, + } + kvWB.SetMeta(meta.ApplyStateKey(ps.region.GetId()), &ps.applyState) + ps.ScheduleApplyingSnapshotAndWait(snapData.Region, snap.Metadata) + meta.WriteRegionState(kvWB, snapData.Region, rspb.PeerState_Normal) + + log.Debugf("%v apply snapshot for region %v with state %v ok", ps.Tag, snapData.Region, ps.applyState) + return applyRes, nil +} + +/// Save memory states to disk. +/// Do not modify ready in this function, this is a requirement to advance the ready object properly later. +func (ps *PeerStorage) SaveReadyState(ready *raft.Ready) (*ApplySnapResult, error) { + kvWB, raftWB := new(engine_util.WriteBatch), new(engine_util.WriteBatch) + prevRaftState := ps.raftState + + var applyRes *ApplySnapResult = nil + var err error + if !raft.IsEmptySnap(&ready.Snapshot) { + applyRes, err = ps.ApplySnapshot(&ready.Snapshot, kvWB, raftWB) + if err != nil { + return nil, err + } + } + + if len(ready.Entries) != 0 { + if err := ps.Append(ready.Entries, raftWB); err != nil { + return nil, err + } + } + + if !raft.IsEmptyHardState(ready.HardState) { + ps.raftState.HardState = &ready.HardState + } + + if !proto.Equal(&prevRaftState, &ps.raftState) { + raftWB.SetMeta(meta.RaftStateKey(ps.region.GetId()), &ps.raftState) + } + + kvWB.MustWriteToDB(ps.Engines.Kv) + raftWB.MustWriteToDB(ps.Engines.Raft) + return applyRes, nil +} + +func (ps *PeerStorage) ScheduleApplyingSnapshotAndWait(snapRegion *metapb.Region, snapMeta *eraftpb.SnapshotMetadata) { + ch := make(chan bool) + ps.snapState = snap.SnapState{ + StateType: snap.SnapState_Applying, + } + ps.regionSched <- &runner.RegionTaskApply{ + RegionId: ps.region.Id, + Notifier: ch, + SnapMeta: snapMeta, + StartKey: snapRegion.GetStartKey(), + EndKey: snapRegion.GetEndKey(), + } + <-ch +} + +func (ps *PeerStorage) ClearData() { + ps.regionSched <- &runner.RegionTaskDestroy{ + RegionId: ps.region.GetId(), + StartKey: ps.region.GetStartKey(), + EndKey: ps.region.GetEndKey(), + } +} diff --git a/kv/raftstore/peer_storage_test.go b/kv/raftstore/peer_storage_test.go new file mode 100644 index 00000000..69aff99e --- /dev/null +++ b/kv/raftstore/peer_storage_test.go @@ -0,0 +1,240 @@ +package raftstore + +import ( + "bytes" + "testing" + + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/raftstore/meta" + "github.com/pingcap-incubator/tinykv/kv/raftstore/util" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + rspb "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + "github.com/pingcap-incubator/tinykv/raft" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func newTestPeerStorage(t *testing.T) *PeerStorage { + engines := util.NewTestEngines() + err := BootstrapStore(engines, 1, 1) + require.Nil(t, err) + region, err := PrepareBootstrap(engines, 1, 1, 1) + require.Nil(t, err) + peerStore, err := NewPeerStorage(engines, region, nil, "") + require.Nil(t, err) + return peerStore +} + +func newTestPeerStorageFromEnts(t *testing.T, ents []eraftpb.Entry) *PeerStorage { + peerStore := newTestPeerStorage(t) + kvWB := new(engine_util.WriteBatch) + raftWB := new(engine_util.WriteBatch) + require.Nil(t, peerStore.Append(ents[1:], raftWB)) + applyState := peerStore.applyState + applyState.TruncatedState = &rspb.RaftTruncatedState{ + Index: ents[0].Index, + Term: ents[0].Term, + } + applyState.AppliedIndex = ents[len(ents)-1].Index + kvWB.SetMeta(meta.ApplyStateKey(peerStore.region.GetId()), &applyState) + require.Nil(t, peerStore.Engines.WriteRaft(raftWB)) + peerStore.Engines.WriteKV(kvWB) + return peerStore +} + +func cleanUpTestData(peerStore *PeerStorage) { + if err := peerStore.Engines.Destroy(); err != nil { + panic(err) + } +} + +func newTestEntry(index, term uint64) eraftpb.Entry { + return eraftpb.Entry{ + Index: index, + Term: term, + Data: []byte{0}, + } +} + +func TestPeerStorageTerm(t *testing.T) { + ents := []eraftpb.Entry{ + newTestEntry(3, 3), newTestEntry(4, 4), newTestEntry(5, 5), + } + tests := []struct { + idx uint64 + term uint64 + err error + }{ + {2, 0, raft.ErrCompacted}, + {3, 3, nil}, + {4, 4, nil}, + {5, 5, nil}, + } + for _, tt := range tests { + peerStore := newTestPeerStorageFromEnts(t, ents) + term, err := peerStore.Term(tt.idx) + if err != nil { + assert.Equal(t, err, tt.err) + } else { + assert.Equal(t, term, tt.term) + } + cleanUpTestData(peerStore) + } +} + +func appendEnts(t *testing.T, peerStore *PeerStorage, ents []eraftpb.Entry) { + raftWB := new(engine_util.WriteBatch) + require.Nil(t, peerStore.Append(ents, raftWB)) + raftWB.SetMeta(meta.RaftStateKey(peerStore.region.GetId()), &peerStore.raftState) + require.Nil(t, peerStore.Engines.WriteRaft(raftWB)) +} + +func getMetaKeyCount(t *testing.T, peerStore *PeerStorage) int { + regionID := peerStore.region.Id + count := 0 + metaStart := meta.RegionMetaPrefixKey(regionID) + metaEnd := meta.RegionMetaPrefixKey(regionID + 1) + err := peerStore.Engines.Kv.View(func(txn *badger.Txn) error { + it := txn.NewIterator(badger.DefaultIteratorOptions) + defer it.Close() + for it.Seek(metaStart); it.Valid(); it.Next() { + if bytes.Compare(it.Item().Key(), metaEnd) >= 0 { + break + } + count++ + } + return nil + }) + require.Nil(t, err) + raftStart := meta.RegionRaftPrefixKey(regionID) + raftEnd := meta.RegionRaftPrefixKey(regionID + 1) + err = peerStore.Engines.Kv.View(func(txn *badger.Txn) error { + it := txn.NewIterator(badger.DefaultIteratorOptions) + defer it.Close() + for it.Seek(metaStart); it.Valid(); it.Next() { + if bytes.Compare(it.Item().Key(), metaEnd) >= 0 { + break + } + count++ + } + return nil + }) + require.Nil(t, err) + err = peerStore.Engines.Raft.View(func(txn *badger.Txn) error { + it := txn.NewIterator(badger.DefaultIteratorOptions) + defer it.Close() + for it.Seek(raftStart); it.Valid(); it.Next() { + if bytes.Compare(it.Item().Key(), raftEnd) >= 0 { + break + } + count++ + } + return nil + }) + require.Nil(t, err) + return count +} + +func TestPeerStorageClearMeta(t *testing.T) { + peerStore := newTestPeerStorageFromEnts(t, []eraftpb.Entry{ + newTestEntry(3, 3), + newTestEntry(4, 4), + }) + defer cleanUpTestData(peerStore) + appendEnts(t, peerStore, []eraftpb.Entry{ + newTestEntry(5, 5), + newTestEntry(6, 6), + }) + assert.Equal(t, 6, getMetaKeyCount(t, peerStore)) + kvWB := new(engine_util.WriteBatch) + raftWB := new(engine_util.WriteBatch) + require.Nil(t, peerStore.clearMeta(kvWB, raftWB)) + require.Nil(t, peerStore.Engines.WriteKV(kvWB)) + require.Nil(t, peerStore.Engines.WriteRaft(raftWB)) + assert.Equal(t, 0, getMetaKeyCount(t, peerStore)) +} + +func TestPeerStorageEntries(t *testing.T) { + ents := []eraftpb.Entry{ + newTestEntry(3, 3), + newTestEntry(4, 4), + newTestEntry(5, 5), + newTestEntry(6, 6), + } + tests := []struct { + low uint64 + high uint64 + entries []eraftpb.Entry + err error + }{ + {2, 6, nil, raft.ErrCompacted}, + {3, 4, nil, raft.ErrCompacted}, + {4, 5, []eraftpb.Entry{ + newTestEntry(4, 4), + }, nil}, + {4, 6, []eraftpb.Entry{ + newTestEntry(4, 4), + newTestEntry(5, 5), + }, nil}, + } + + for i, tt := range tests { + peerStore := newTestPeerStorageFromEnts(t, ents) + defer cleanUpTestData(peerStore) + entries, err := peerStore.Entries(tt.low, tt.high) + if err != nil { + assert.Equal(t, tt.err, err) + } else { + assert.Equal(t, tt.entries, entries, "%d", i) + } + } +} + +func TestPeerStorageAppend(t *testing.T) { + ents := []eraftpb.Entry{ + newTestEntry(3, 3), newTestEntry(4, 4), newTestEntry(5, 5)} + tests := []struct { + appends []eraftpb.Entry + results []eraftpb.Entry + }{ + { + []eraftpb.Entry{newTestEntry(3, 3), newTestEntry(4, 4), newTestEntry(5, 5)}, + []eraftpb.Entry{newTestEntry(4, 4), newTestEntry(5, 5)}, + }, + { + []eraftpb.Entry{newTestEntry(3, 3), newTestEntry(4, 6), newTestEntry(5, 6)}, + []eraftpb.Entry{newTestEntry(4, 6), newTestEntry(5, 6)}, + }, + { + []eraftpb.Entry{ + newTestEntry(3, 3), + newTestEntry(4, 4), + newTestEntry(5, 5), + newTestEntry(6, 5), + }, + []eraftpb.Entry{newTestEntry(4, 4), newTestEntry(5, 5), newTestEntry(6, 5)}, + }, + // truncate incoming entries, truncate the existing entries and append + { + []eraftpb.Entry{newTestEntry(2, 3), newTestEntry(3, 3), newTestEntry(4, 5)}, + []eraftpb.Entry{newTestEntry(4, 5)}, + }, + // truncate the existing entries and append + {[]eraftpb.Entry{newTestEntry(4, 5)}, []eraftpb.Entry{newTestEntry(4, 5)}}, + // direct append + { + []eraftpb.Entry{newTestEntry(6, 5)}, + []eraftpb.Entry{newTestEntry(4, 4), newTestEntry(5, 5), newTestEntry(6, 5)}, + }, + } + for _, tt := range tests { + peerStore := newTestPeerStorageFromEnts(t, ents) + defer cleanUpTestData(peerStore) + appendEnts(t, peerStore, tt.appends) + li := peerStore.raftState.LastIndex + acutualEntries, err := peerStore.Entries(4, li+1) + require.Nil(t, err) + assert.Equal(t, tt.results, acutualEntries) + } +} diff --git a/kv/raftstore/raft_worker.go b/kv/raftstore/raft_worker.go new file mode 100644 index 00000000..cb6adf48 --- /dev/null +++ b/kv/raftstore/raft_worker.go @@ -0,0 +1,69 @@ +package raftstore + +import ( + "sync" + + "github.com/pingcap-incubator/tinykv/kv/raftstore/message" +) + +// raftWorker is responsible for run raft commands and apply raft logs. +type raftWorker struct { + pr *router + + raftCh chan message.Msg + ctx *GlobalContext + + closeCh <-chan struct{} +} + +func newRaftWorker(ctx *GlobalContext, pm *router) *raftWorker { + return &raftWorker{ + raftCh: pm.peerSender, + ctx: ctx, + pr: pm, + } +} + +// run runs raft commands. +// On each loop, raft commands are batched by channel buffer. +// After commands are handled, we collect apply messages by peers, make a applyBatch, send it to apply channel. +func (rw *raftWorker) run(closeCh <-chan struct{}, wg *sync.WaitGroup) { + defer wg.Done() + var msgs []message.Msg + for { + msgs = msgs[:0] + select { + case <-closeCh: + return + case msg := <-rw.raftCh: + msgs = append(msgs, msg) + } + pending := len(rw.raftCh) + for i := 0; i < pending; i++ { + msgs = append(msgs, <-rw.raftCh) + } + peerStateMap := make(map[uint64]*peerState) + for _, msg := range msgs { + peerState := rw.getPeerState(peerStateMap, msg.RegionID) + if peerState == nil { + continue + } + newPeerMsgHandler(peerState.peer, rw.ctx).HandleMsg(msg) + } + for _, peerState := range peerStateMap { + newPeerMsgHandler(peerState.peer, rw.ctx).HandleRaftReady() + } + } +} + +func (rw *raftWorker) getPeerState(peersMap map[uint64]*peerState, regionID uint64) *peerState { + peer, ok := peersMap[regionID] + if !ok { + peer = rw.pr.get(regionID) + if peer == nil { + return nil + } + peersMap[regionID] = peer + } + return peer +} diff --git a/kv/raftstore/router.go b/kv/raftstore/router.go new file mode 100644 index 00000000..6cd3a448 --- /dev/null +++ b/kv/raftstore/router.go @@ -0,0 +1,104 @@ +package raftstore + +import ( + "sync" + "sync/atomic" + + "github.com/pingcap-incubator/tinykv/kv/raftstore/message" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_cmdpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + + "github.com/pingcap/errors" +) + +// peerState contains the peer states that needs to run raft command and apply command. +type peerState struct { + closed uint32 + peer *peer +} + +// router routes a message to a peer. +type router struct { + peers sync.Map // regionID -> peerState + peerSender chan message.Msg + storeSender chan<- message.Msg +} + +func newRouter(storeSender chan<- message.Msg) *router { + pm := &router{ + peerSender: make(chan message.Msg, 40960), + storeSender: storeSender, + } + return pm +} + +func (pr *router) get(regionID uint64) *peerState { + v, ok := pr.peers.Load(regionID) + if ok { + return v.(*peerState) + } + return nil +} + +func (pr *router) register(peer *peer) { + id := peer.regionId + newPeer := &peerState{ + peer: peer, + } + pr.peers.Store(id, newPeer) +} + +func (pr *router) close(regionID uint64) { + v, ok := pr.peers.Load(regionID) + if ok { + ps := v.(*peerState) + atomic.StoreUint32(&ps.closed, 1) + pr.peers.Delete(regionID) + } +} + +func (pr *router) send(regionID uint64, msg message.Msg) error { + msg.RegionID = regionID + p := pr.get(regionID) + if p == nil || atomic.LoadUint32(&p.closed) == 1 { + return errPeerNotFound + } + pr.peerSender <- msg + return nil +} + +func (pr *router) sendStore(msg message.Msg) { + pr.storeSender <- msg +} + +var errPeerNotFound = errors.New("peer not found") + +type RaftstoreRouter struct { + router *router +} + +func NewRaftstoreRouter(router *router) *RaftstoreRouter { + return &RaftstoreRouter{router: router} +} + +func (r *RaftstoreRouter) Send(regionID uint64, msg message.Msg) error { + return r.router.send(regionID, msg) +} + +func (r *RaftstoreRouter) SendRaftMessage(msg *raft_serverpb.RaftMessage) error { + regionID := msg.RegionId + if r.router.send(regionID, message.NewPeerMsg(message.MsgTypeRaftMessage, regionID, msg)) != nil { + r.router.sendStore(message.NewPeerMsg(message.MsgTypeStoreRaftMessage, regionID, msg)) + } + return nil + +} + +func (r *RaftstoreRouter) SendRaftCommand(req *raft_cmdpb.RaftCmdRequest, cb *message.Callback) error { + cmd := &message.MsgRaftCmd{ + Request: req, + Callback: cb, + } + regionID := req.Header.RegionId + return r.router.send(regionID, message.NewPeerMsg(message.MsgTypeRaftCmd, regionID, cmd)) +} diff --git a/kv/raftstore/runner/raftlog_gc.go b/kv/raftstore/runner/raftlog_gc.go new file mode 100644 index 00000000..4a9bf2e5 --- /dev/null +++ b/kv/raftstore/runner/raftlog_gc.go @@ -0,0 +1,90 @@ +package runner + +import ( + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/raftstore/meta" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/kv/util/worker" + "github.com/pingcap-incubator/tinykv/log" +) + +type RaftLogGCTask struct { + RaftEngine *badger.DB + RegionID uint64 + StartIdx uint64 + EndIdx uint64 +} + +type raftLogGcTaskRes uint64 + +type raftLogGCTaskHandler struct { + taskResCh chan<- raftLogGcTaskRes +} + +func NewRaftLogGCTaskHandler() *raftLogGCTaskHandler { + return &raftLogGCTaskHandler{} +} + +// gcRaftLog does the GC job and returns the count of logs collected. +func (r *raftLogGCTaskHandler) gcRaftLog(raftDb *badger.DB, regionId, startIdx, endIdx uint64) (uint64, error) { + // Find the raft log idx range needed to be gc. + firstIdx := startIdx + if firstIdx == 0 { + firstIdx = endIdx + err := raftDb.View(func(txn *badger.Txn) error { + startKey := meta.RaftLogKey(regionId, 0) + ite := txn.NewIterator(badger.DefaultIteratorOptions) + defer ite.Close() + if ite.Seek(startKey); ite.Valid() { + var err error + if firstIdx, err = meta.RaftLogIndex(ite.Item().Key()); err != nil { + return err + } + } + return nil + }) + if err != nil { + return 0, err + } + } + + if firstIdx >= endIdx { + log.Infof("no need to gc, [regionId: %d]", regionId) + return 0, nil + } + + raftWb := engine_util.WriteBatch{} + for idx := firstIdx; idx < endIdx; idx += 1 { + key := meta.RaftLogKey(regionId, idx) + raftWb.DeleteMeta(key) + } + if raftWb.Len() != 0 { + if err := raftWb.WriteToDB(raftDb); err != nil { + return 0, err + } + } + return endIdx - firstIdx, nil +} + +func (r *raftLogGCTaskHandler) reportCollected(collected uint64) { + if r.taskResCh == nil { + return + } + r.taskResCh <- raftLogGcTaskRes(collected) +} + +func (r *raftLogGCTaskHandler) Handle(t worker.Task) { + logGcTask, ok := t.(*RaftLogGCTask) + if !ok { + log.Error("unsupported worker.Task: %+v", t) + return + } + log.Debugf("execute gc log. [regionId: %d, endIndex: %d]", logGcTask.RegionID, logGcTask.EndIdx) + collected, err := r.gcRaftLog(logGcTask.RaftEngine, logGcTask.RegionID, logGcTask.StartIdx, logGcTask.EndIdx) + if err != nil { + log.Errorf("failed to gc. [regionId: %d, collected: %d, err: %v]", logGcTask.RegionID, collected, err) + } else { + log.Debugf("collected log entries. [regionId: %d, entryCount: %d]", logGcTask.RegionID, collected) + } + r.reportCollected(collected) +} diff --git a/kv/raftstore/runner/region_task.go b/kv/raftstore/runner/region_task.go new file mode 100644 index 00000000..b971f85c --- /dev/null +++ b/kv/raftstore/runner/region_task.go @@ -0,0 +1,208 @@ +package runner + +import ( + "encoding/hex" + "fmt" + "time" + + "github.com/Connor1996/badger" + "github.com/juju/errors" + "github.com/pingcap-incubator/tinykv/kv/raftstore/meta" + "github.com/pingcap-incubator/tinykv/kv/raftstore/snap" + "github.com/pingcap-incubator/tinykv/kv/raftstore/util" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/kv/util/worker" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + rspb "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" +) + +// There're some tasks for region worker, such as: +// `TaskTypeRegionGen` which will cause the worker to generate a snapshot according to RegionId, +// `TaskTypeRegionApply` which will apply a snapshot to the region that id equals RegionId, +// `TaskTypeRegionDestroy` which will clean up the key range from StartKey to EndKey. + +type RegionTaskGen struct { + RegionId uint64 // specify the region which the task is for. + Notifier chan<- *eraftpb.Snapshot // when it finishes snapshot generating, it notifies notifier. +} + +type RegionTaskApply struct { + RegionId uint64 // specify the region which the task is for. + Notifier chan<- bool // when it finishes snapshot applying, it notifies notifier. + SnapMeta *eraftpb.SnapshotMetadata // the region meta information of the snapshot + StartKey []byte // `StartKey` and `EndKey` are origin region's range, it's used to clean up certain range of region before applying snapshot. + EndKey []byte +} + +type RegionTaskDestroy struct { + RegionId uint64 // specify the region which the task is for. + StartKey []byte // `StartKey` and `EndKey` are used to destroy certain range of region. + EndKey []byte +} + +type regionTaskHandler struct { + ctx *snapContext +} + +func NewRegionTaskHandler(engines *engine_util.Engines, mgr *snap.SnapManager) *regionTaskHandler { + return ®ionTaskHandler{ + ctx: &snapContext{ + engines: engines, + mgr: mgr, + }, + } +} + +func (r *regionTaskHandler) Handle(t worker.Task) { + switch t.(type) { + case *RegionTaskGen: + task := t.(*RegionTaskGen) + // It is safe for now to handle generating and applying snapshot concurrently, + // but it may not when merge is implemented. + r.ctx.handleGen(task.RegionId, task.Notifier) + case *RegionTaskApply: + task := t.(*RegionTaskApply) + r.ctx.handleApply(task.RegionId, task.Notifier, task.StartKey, task.EndKey, task.SnapMeta) + case *RegionTaskDestroy: + task := t.(*RegionTaskDestroy) + r.ctx.cleanUpRange(task.RegionId, task.StartKey, task.EndKey) + } +} + +type snapContext struct { + engines *engine_util.Engines + batchSize uint64 + mgr *snap.SnapManager +} + +// handleGen handles the task of generating snapshot of the Region. +func (snapCtx *snapContext) handleGen(regionId uint64, notifier chan<- *eraftpb.Snapshot) { + snap, err := doSnapshot(snapCtx.engines, snapCtx.mgr, regionId) + if err != nil { + log.Errorf("failed to generate snapshot!!!, [regionId: %d, err : %v]", regionId, err) + } else { + notifier <- snap + } +} + +// applySnap applies snapshot data of the Region. +func (snapCtx *snapContext) applySnap(regionId uint64, startKey, endKey []byte, snapMeta *eraftpb.SnapshotMetadata) error { + log.Infof("begin apply snap data. [regionId: %d]", regionId) + + // cleanUpOriginData clear up the region data before applying snapshot + snapCtx.cleanUpRange(regionId, startKey, endKey) + + snapKey := snap.SnapKey{RegionID: regionId, Index: snapMeta.Index, Term: snapMeta.Term} + snapCtx.mgr.Register(snapKey, snap.SnapEntryApplying) + defer snapCtx.mgr.Deregister(snapKey, snap.SnapEntryApplying) + + snapshot, err := snapCtx.mgr.GetSnapshotForApplying(snapKey) + if err != nil { + return errors.New(fmt.Sprintf("missing snapshot file %s", err)) + } + + t := time.Now() + applyOptions := snap.NewApplyOptions(snapCtx.engines.Kv, &metapb.Region{ + Id: regionId, + StartKey: startKey, + EndKey: endKey, + }) + if err := snapshot.Apply(*applyOptions); err != nil { + return err + } + + log.Infof("applying new data. [regionId: %d, timeTakes: %v]", regionId, time.Now().Sub(t)) + return nil +} + +// handleApply tries to apply the snapshot of the specified Region. It calls `applySnap` to do the actual work. +func (snapCtx *snapContext) handleApply(regionId uint64, notifier chan<- bool, startKey, endKey []byte, snapMeta *eraftpb.SnapshotMetadata) { + err := snapCtx.applySnap(regionId, startKey, endKey, snapMeta) + if err != nil { + notifier <- false + log.Fatalf("failed to apply snap!!!. err: %v", err) + } + notifier <- true +} + +// cleanUpRange cleans up the data within the range. +func (snapCtx *snapContext) cleanUpRange(regionId uint64, startKey, endKey []byte) { + if err := engine_util.DeleteRange(snapCtx.engines.Kv, startKey, endKey); err != nil { + log.Fatalf("failed to delete data in range, [regionId: %d, startKey: %s, endKey: %s, err: %v]", regionId, + hex.EncodeToString(startKey), hex.EncodeToString(endKey), err) + } else { + log.Infof("succeed in deleting data in range. [regionId: %d, startKey: %s, endKey: %s]", regionId, + hex.EncodeToString(startKey), hex.EncodeToString(endKey)) + } +} + +func getAppliedIdxTermForSnapshot(raft *badger.DB, kv *badger.Txn, regionId uint64) (uint64, uint64, error) { + applyState := new(rspb.RaftApplyState) + err := engine_util.GetMetaFromTxn(kv, meta.ApplyStateKey(regionId), applyState) + if err != nil { + return 0, 0, err + } + + idx := applyState.AppliedIndex + var term uint64 + if idx == applyState.TruncatedState.Index { + term = applyState.TruncatedState.Term + } else { + entry, err := meta.GetRaftEntry(raft, regionId, idx) + if err != nil { + return 0, 0, err + } else { + term = entry.GetTerm() + } + } + return idx, term, nil +} + +func doSnapshot(engines *engine_util.Engines, mgr *snap.SnapManager, regionId uint64) (*eraftpb.Snapshot, error) { + log.Debugf("begin to generate a snapshot. [regionId: %d]", regionId) + + txn := engines.Kv.NewTransaction(false) + + index, term, err := getAppliedIdxTermForSnapshot(engines.Raft, txn, regionId) + if err != nil { + return nil, err + } + + key := snap.SnapKey{RegionID: regionId, Index: index, Term: term} + mgr.Register(key, snap.SnapEntryGenerating) + defer mgr.Deregister(key, snap.SnapEntryGenerating) + + regionState := new(rspb.RegionLocalState) + err = engine_util.GetMetaFromTxn(txn, meta.RegionStateKey(regionId), regionState) + if err != nil { + panic(err) + } + if regionState.GetState() != rspb.PeerState_Normal { + return nil, errors.Errorf("snap job %d seems stale, skip", regionId) + } + + region := regionState.GetRegion() + confState := util.ConfStateFromRegion(region) + snapshot := &eraftpb.Snapshot{ + Metadata: &eraftpb.SnapshotMetadata{ + Index: key.Index, + Term: key.Term, + ConfState: &confState, + }, + } + s, err := mgr.GetSnapshotForBuilding(key) + if err != nil { + return nil, err + } + // Set snapshot data + snapshotData := &rspb.RaftSnapshotData{Region: region} + snapshotStatics := snap.SnapStatistics{} + err = s.Build(txn, region, snapshotData, &snapshotStatics, mgr) + if err != nil { + return nil, err + } + snapshot.Data, err = snapshotData.Marshal() + return snapshot, err +} diff --git a/kv/raftstore/runner/runner_test.go b/kv/raftstore/runner/runner_test.go new file mode 100644 index 00000000..9e385877 --- /dev/null +++ b/kv/raftstore/runner/runner_test.go @@ -0,0 +1,278 @@ +package runner + +import ( + "encoding/binary" + "io" + "io/ioutil" + "testing" + + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/raftstore/message" + "github.com/pingcap-incubator/tinykv/kv/raftstore/meta" + "github.com/pingcap-incubator/tinykv/kv/raftstore/snap" + "github.com/pingcap-incubator/tinykv/kv/raftstore/util" + "github.com/pingcap-incubator/tinykv/kv/util/codec" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/kv/util/worker" + "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_cmdpb" + rspb "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + "github.com/pingcap/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// copySnapshot is a helper function to copy snapshot. +// Only used in tests. +func copySnapshot(to, from snap.Snapshot) error { + if !to.Exists() { + _, err := io.Copy(to, from) + if err != nil { + return errors.WithStack(err) + } + return to.Save() + } + return nil +} + +func newEnginesWithKVDb(t *testing.T, kv *badger.DB) *engine_util.Engines { + engines := new(engine_util.Engines) + engines.Kv = kv + var err error + engines.RaftPath, err = ioutil.TempDir("", "tinykv_raft") + require.Nil(t, err) + raftOpts := badger.DefaultOptions + raftOpts.Dir = engines.RaftPath + raftOpts.ValueDir = engines.RaftPath + raftOpts.ValueThreshold = 256 + engines.Raft, err = badger.Open(raftOpts) + require.Nil(t, err) + return engines +} + +func getTestDBForRegions(t *testing.T, path string, regions []uint64) *badger.DB { + db := openDB(t, path) + fillDBData(t, db) + for _, regionID := range regions { + // Put apply state into kv engine. + applyState := &rspb.RaftApplyState{ + AppliedIndex: 10, + TruncatedState: &rspb.RaftTruncatedState{ + Index: 10, + }, + } + require.Nil(t, engine_util.PutMeta(db, meta.ApplyStateKey(regionID), applyState)) + + // Put region info into kv engine. + region := genTestRegion(regionID, 1, 1) + regionState := new(rspb.RegionLocalState) + regionState.Region = region + require.Nil(t, engine_util.PutMeta(db, meta.RegionStateKey(regionID), regionState)) + } + return db +} + +func genTestRegion(regionID, storeID, peerID uint64) *metapb.Region { + return &metapb.Region{ + Id: regionID, + StartKey: []byte(""), + EndKey: []byte(""), + RegionEpoch: &metapb.RegionEpoch{ + Version: 1, + ConfVer: 1, + }, + Peers: []*metapb.Peer{ + {StoreId: storeID, Id: peerID}, + }, + } +} + +func openDB(t *testing.T, dir string) *badger.DB { + opts := badger.DefaultOptions + opts.Dir = dir + opts.ValueDir = dir + db, err := badger.Open(opts) + require.Nil(t, err) + return db +} + +func fillDBData(t *testing.T, db *badger.DB) { + // write some data for multiple cfs. + wb := new(engine_util.WriteBatch) + value := make([]byte, 32) + wb.SetCF(engine_util.CfDefault, []byte("key"), value) + wb.SetCF(engine_util.CfWrite, []byte("key"), value) + wb.SetCF(engine_util.CfLock, []byte("key"), value) + err := wb.WriteToDB(db) + require.Nil(t, err) +} + +func TestGcRaftLog(t *testing.T) { + engines := util.NewTestEngines() + defer engines.Destroy() + raftDb := engines.Raft + taskResCh := make(chan raftLogGcTaskRes, 1) + runner := raftLogGCTaskHandler{taskResCh: taskResCh} + + // generate raft logs + regionId := uint64(1) + raftWb := new(engine_util.WriteBatch) + for i := uint64(0); i < 100; i++ { + raftWb.SetMeta(meta.RaftLogKey(regionId, i), &eraftpb.Entry{Data: []byte("entry")}) + } + raftWb.WriteToDB(raftDb) + + type tempHolder struct { + raftLogGcTask worker.Task + expectedCollected uint64 + nonExistRange [2]uint64 + existRange [2]uint64 + } + + tbls := []tempHolder{ + { + raftLogGcTask: &RaftLogGCTask{ + RaftEngine: raftDb, + RegionID: regionId, + StartIdx: uint64(0), + EndIdx: uint64(10), + }, + expectedCollected: uint64(10), + nonExistRange: [...]uint64{0, 10}, + existRange: [...]uint64{10, 100}, + }, + + { + raftLogGcTask: &RaftLogGCTask{ + RaftEngine: raftDb, + RegionID: regionId, + StartIdx: uint64(0), + EndIdx: uint64(50), + }, + expectedCollected: uint64(40), + nonExistRange: [...]uint64{0, 50}, + existRange: [...]uint64{50, 100}, + }, + + { + raftLogGcTask: &RaftLogGCTask{ + RaftEngine: raftDb, + RegionID: regionId, + StartIdx: uint64(50), + EndIdx: uint64(50), + }, + expectedCollected: uint64(0), + nonExistRange: [...]uint64{0, 50}, + existRange: [...]uint64{50, 100}, + }, + + { + raftLogGcTask: &RaftLogGCTask{ + RaftEngine: raftDb, + RegionID: regionId, + StartIdx: uint64(50), + EndIdx: uint64(60), + }, + expectedCollected: uint64(10), + nonExistRange: [...]uint64{0, 60}, + existRange: [...]uint64{60, 100}, + }, + } + + for _, h := range tbls { + runner.Handle(h.raftLogGcTask) + res := <-taskResCh + assert.Equal(t, h.expectedCollected, uint64(res)) + raftLogMustNotExist(t, raftDb, 1, h.nonExistRange[0], h.nonExistRange[1]) + raftLogMustExist(t, raftDb, 1, h.existRange[0], h.existRange[1]) + } +} + +func raftLogMustNotExist(t *testing.T, db *badger.DB, regionId, startIdx, endIdx uint64) { + for i := startIdx; i < endIdx; i++ { + k := meta.RaftLogKey(regionId, i) + db.View(func(txn *badger.Txn) error { + _, err := txn.Get(k) + assert.Equal(t, err, badger.ErrKeyNotFound) + return nil + }) + } +} + +func raftLogMustExist(t *testing.T, db *badger.DB, regionId, startIdx, endIdx uint64) { + for i := startIdx; i < endIdx; i++ { + k := meta.RaftLogKey(regionId, i) + db.View(func(txn *badger.Txn) error { + item, err := txn.Get(k) + assert.Nil(t, err) + assert.NotNil(t, item) + return nil + }) + } +} + +func cleanUpTestEngineData(engines *engine_util.Engines) { + if err := engines.Destroy(); err != nil { + panic(err) + } +} + +type TaskResRouter struct { + ch chan<- message.Msg +} + +func (r *TaskResRouter) Send(regionID uint64, msg message.Msg) error { + r.ch <- msg + return nil +} + +func (r *TaskResRouter) SendRaftMessage(msg *rspb.RaftMessage) error { + return nil +} + +func (r *TaskResRouter) SendRaftCommand(req *raft_cmdpb.RaftCmdRequest, cb *message.Callback) error { + return nil +} + +func encodeKey(key []byte, ts uint64) []byte { + encodedKey := codec.EncodeBytes(key) + newKey := append(encodedKey, make([]byte, 8)...) + binary.BigEndian.PutUint64(newKey[len(newKey)-8:], ^ts) + return newKey +} + +func TestSplitCheck(t *testing.T) { + engines := util.NewTestEngines() + defer cleanUpTestEngineData(engines) + db := engines.Kv + taskResCh := make(chan message.Msg, 1) + + runner := &splitCheckHandler{ + engine: db, + router: &TaskResRouter{ch: taskResCh}, + checker: newSizeSplitChecker(100, 50), + } + + kvWb := new(engine_util.WriteBatch) + // the length of each kv pair is 21 + kvWb.SetCF(engine_util.CfDefault, encodeKey([]byte("k1"), 1), []byte("entry")) + kvWb.SetCF(engine_util.CfDefault, encodeKey([]byte("k1"), 2), []byte("entry")) + kvWb.SetCF(engine_util.CfDefault, encodeKey([]byte("k2"), 1), []byte("entry")) + kvWb.SetCF(engine_util.CfDefault, encodeKey([]byte("k2"), 2), []byte("entry")) + kvWb.SetCF(engine_util.CfDefault, encodeKey([]byte("k3"), 3), []byte("entry")) + kvWb.MustWriteToDB(db) + + task := &SplitCheckTask{ + Region: &metapb.Region{ + StartKey: []byte(""), + EndKey: []byte(""), + }, + } + + runner.Handle(task) + msg := <-taskResCh + split, ok := msg.Data.(*message.MsgSplitRegion) + assert.True(t, ok) + assert.Equal(t, split.SplitKey, codec.EncodeBytes([]byte("k2"))) +} diff --git a/kv/raftstore/runner/scheduler_task.go b/kv/raftstore/runner/scheduler_task.go new file mode 100644 index 00000000..d5a3f49f --- /dev/null +++ b/kv/raftstore/runner/scheduler_task.go @@ -0,0 +1,152 @@ +package runner + +import ( + "context" + + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/raftstore/message" + "github.com/pingcap-incubator/tinykv/kv/raftstore/scheduler_client" + "github.com/pingcap-incubator/tinykv/kv/util/worker" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_cmdpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/shirou/gopsutil/disk" +) + +type SchedulerAskSplitTask struct { + Region *metapb.Region + SplitKey []byte + Peer *metapb.Peer + Callback *message.Callback +} + +type SchedulerRegionHeartbeatTask struct { + Region *metapb.Region + Peer *metapb.Peer + PendingPeers []*metapb.Peer + ApproximateSize *uint64 +} + +type SchedulerStoreHeartbeatTask struct { + Stats *schedulerpb.StoreStats + Engine *badger.DB + Path string +} + +type SchedulerTaskHandler struct { + storeID uint64 + SchedulerClient scheduler_client.Client + router message.RaftRouter +} + +func NewSchedulerTaskHandler(storeID uint64, SchedulerClient scheduler_client.Client, router message.RaftRouter) *SchedulerTaskHandler { + return &SchedulerTaskHandler{ + storeID: storeID, + SchedulerClient: SchedulerClient, + router: router, + } +} + +func (r *SchedulerTaskHandler) Handle(t worker.Task) { + switch t.(type) { + case *SchedulerAskSplitTask: + r.onAskSplit(t.(*SchedulerAskSplitTask)) + case *SchedulerRegionHeartbeatTask: + r.onHeartbeat(t.(*SchedulerRegionHeartbeatTask)) + case *SchedulerStoreHeartbeatTask: + r.onStoreHeartbeat(t.(*SchedulerStoreHeartbeatTask)) + default: + log.Error("unsupported worker.Task: %+v", t) + } +} + +func (r *SchedulerTaskHandler) Start() { + r.SchedulerClient.SetRegionHeartbeatResponseHandler(r.storeID, r.onRegionHeartbeatResponse) +} + +func (r *SchedulerTaskHandler) onRegionHeartbeatResponse(resp *schedulerpb.RegionHeartbeatResponse) { + if changePeer := resp.GetChangePeer(); changePeer != nil { + r.sendAdminRequest(resp.RegionId, resp.RegionEpoch, resp.TargetPeer, &raft_cmdpb.AdminRequest{ + CmdType: raft_cmdpb.AdminCmdType_ChangePeer, + ChangePeer: &raft_cmdpb.ChangePeerRequest{ + ChangeType: changePeer.ChangeType, + Peer: changePeer.Peer, + }, + }, message.NewCallback()) + } else if transferLeader := resp.GetTransferLeader(); transferLeader != nil { + r.sendAdminRequest(resp.RegionId, resp.RegionEpoch, resp.TargetPeer, &raft_cmdpb.AdminRequest{ + CmdType: raft_cmdpb.AdminCmdType_TransferLeader, + TransferLeader: &raft_cmdpb.TransferLeaderRequest{ + Peer: transferLeader.Peer, + }, + }, message.NewCallback()) + } +} + +func (r *SchedulerTaskHandler) onAskSplit(t *SchedulerAskSplitTask) { + resp, err := r.SchedulerClient.AskSplit(context.TODO(), t.Region) + if err != nil { + log.Error(err) + return + } + + aq := &raft_cmdpb.AdminRequest{ + CmdType: raft_cmdpb.AdminCmdType_Split, + Split: &raft_cmdpb.SplitRequest{ + SplitKey: t.SplitKey, + NewRegionId: resp.NewRegionId, + NewPeerIds: resp.NewPeerIds, + }, + } + r.sendAdminRequest(t.Region.GetId(), t.Region.GetRegionEpoch(), t.Peer, aq, t.Callback) +} + +func (r *SchedulerTaskHandler) onHeartbeat(t *SchedulerRegionHeartbeatTask) { + var size int64 + if t.ApproximateSize != nil { + size = int64(*t.ApproximateSize) + } + + req := &schedulerpb.RegionHeartbeatRequest{ + Region: t.Region, + Leader: t.Peer, + PendingPeers: t.PendingPeers, + ApproximateSize: uint64(size), + } + r.SchedulerClient.RegionHeartbeat(req) +} + +func (r *SchedulerTaskHandler) onStoreHeartbeat(t *SchedulerStoreHeartbeatTask) { + diskStat, err := disk.Usage(t.Path) + if err != nil { + log.Error(err) + return + } + + capacity := diskStat.Total + lsmSize, vlogSize := t.Engine.Size() + usedSize := t.Stats.UsedSize + uint64(lsmSize) + uint64(vlogSize) // t.Stats.UsedSize contains size of snapshot files. + available := uint64(0) + if capacity > usedSize { + available = capacity - usedSize + } + + t.Stats.Capacity = capacity + t.Stats.UsedSize = usedSize + t.Stats.Available = available + + r.SchedulerClient.StoreHeartbeat(context.TODO(), t.Stats) +} + +func (r *SchedulerTaskHandler) sendAdminRequest(regionID uint64, epoch *metapb.RegionEpoch, peer *metapb.Peer, req *raft_cmdpb.AdminRequest, callback *message.Callback) { + cmd := &raft_cmdpb.RaftCmdRequest{ + Header: &raft_cmdpb.RaftRequestHeader{ + RegionId: regionID, + Peer: peer, + RegionEpoch: epoch, + }, + AdminRequest: req, + } + r.router.SendRaftCommand(cmd, callback) +} diff --git a/kv/raftstore/runner/split_checker.go b/kv/raftstore/runner/split_checker.go new file mode 100644 index 00000000..12676122 --- /dev/null +++ b/kv/raftstore/runner/split_checker.go @@ -0,0 +1,134 @@ +package runner + +import ( + "encoding/hex" + + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/kv/raftstore/message" + "github.com/pingcap-incubator/tinykv/kv/raftstore/util" + "github.com/pingcap-incubator/tinykv/kv/util/codec" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/kv/util/worker" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" +) + +type SplitCheckTask struct { + Region *metapb.Region +} + +type splitCheckHandler struct { + engine *badger.DB + router message.RaftRouter + checker *sizeSplitChecker +} + +func NewSplitCheckHandler(engine *badger.DB, router message.RaftRouter, conf *config.Config) *splitCheckHandler { + runner := &splitCheckHandler{ + engine: engine, + router: router, + checker: newSizeSplitChecker(conf.RegionMaxSize, conf.RegionSplitSize), + } + return runner +} + +/// run checks a region with split checkers to produce split keys and generates split admin command. +func (r *splitCheckHandler) Handle(t worker.Task) { + spCheckTask, ok := t.(*SplitCheckTask) + if !ok { + log.Error("unsupported worker.Task: %+v", t) + return + } + region := spCheckTask.Region + regionId := region.Id + log.Debugf("executing split check worker.Task: [regionId: %d, startKey: %s, endKey: %s]", regionId, + hex.EncodeToString(region.StartKey), hex.EncodeToString(region.EndKey)) + key := r.splitCheck(regionId, region.StartKey, region.EndKey) + if key != nil { + _, userKey, err := codec.DecodeBytes(key) + if err == nil { + // It's not a raw key. + // To make sure the keys of same user key locate in one Region, decode and then encode to truncate the timestamp + key = codec.EncodeBytes(userKey) + } + msg := message.Msg{ + Type: message.MsgTypeSplitRegion, + RegionID: regionId, + Data: &message.MsgSplitRegion{ + RegionEpoch: region.GetRegionEpoch(), + SplitKey: key, + }, + } + err = r.router.Send(regionId, msg) + if err != nil { + log.Warnf("failed to send check result: [regionId: %d, err: %v]", regionId, err) + } + } else { + log.Debugf("no need to send, split key not found: [regionId: %v]", regionId) + } +} + +/// SplitCheck gets the split keys by scanning the range. +func (r *splitCheckHandler) splitCheck(regionID uint64, startKey, endKey []byte) []byte { + txn := r.engine.NewTransaction(false) + defer txn.Discard() + + r.checker.reset() + it := engine_util.NewCFIterator(engine_util.CfDefault, txn) + defer it.Close() + for it.Seek(startKey); it.Valid(); it.Next() { + item := it.Item() + key := item.Key() + if engine_util.ExceedEndKey(key, endKey) { + // update region size + r.router.Send(regionID, message.Msg{ + Type: message.MsgTypeRegionApproximateSize, + Data: r.checker.currentSize, + }) + break + } + if r.checker.onKv(key, item) { + break + } + } + return r.checker.getSplitKey() +} + +type sizeSplitChecker struct { + maxSize uint64 + splitSize uint64 + + currentSize uint64 + splitKey []byte +} + +func newSizeSplitChecker(maxSize, splitSize uint64) *sizeSplitChecker { + return &sizeSplitChecker{ + maxSize: maxSize, + splitSize: splitSize, + } +} + +func (checker *sizeSplitChecker) reset() { + checker.currentSize = 0 + checker.splitKey = nil +} + +func (checker *sizeSplitChecker) onKv(key []byte, item engine_util.DBItem) bool { + valueSize := uint64(item.ValueSize()) + size := uint64(len(key)) + valueSize + checker.currentSize += size + if checker.currentSize > checker.splitSize && checker.splitKey == nil { + checker.splitKey = util.SafeCopy(key) + } + return checker.currentSize > checker.maxSize +} + +func (checker *sizeSplitChecker) getSplitKey() []byte { + // Make sure not to split when less than maxSize for last part + if checker.currentSize < checker.maxSize { + checker.splitKey = nil + } + return checker.splitKey +} diff --git a/kv/raftstore/scheduler_client/client.go b/kv/raftstore/scheduler_client/client.go new file mode 100644 index 00000000..d8f5db3f --- /dev/null +++ b/kv/raftstore/scheduler_client/client.go @@ -0,0 +1,554 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheduler_client + +import ( + "context" + "net/url" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/juju/errors" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "google.golang.org/grpc" +) + +// Client is a Scheduler client. +// It should not be used after calling Close(). +type Client interface { + GetClusterID(ctx context.Context) uint64 + AllocID(ctx context.Context) (uint64, error) + Bootstrap(ctx context.Context, store *metapb.Store) (*schedulerpb.BootstrapResponse, error) + IsBootstrapped(ctx context.Context) (bool, error) + PutStore(ctx context.Context, store *metapb.Store) error + GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) + GetRegion(ctx context.Context, key []byte) (*metapb.Region, *metapb.Peer, error) + GetRegionByID(ctx context.Context, regionID uint64) (*metapb.Region, *metapb.Peer, error) + AskSplit(ctx context.Context, region *metapb.Region) (*schedulerpb.AskSplitResponse, error) + StoreHeartbeat(ctx context.Context, stats *schedulerpb.StoreStats) error + RegionHeartbeat(*schedulerpb.RegionHeartbeatRequest) error + SetRegionHeartbeatResponseHandler(storeID uint64, h func(*schedulerpb.RegionHeartbeatResponse)) + Close() +} + +const ( + schedulerTimeout = time.Second + retryInterval = time.Second + maxInitClusterRetries = 100 + maxRetryCount = 10 +) + +var ( + // errFailInitClusterID is returned when failed to load clusterID from all supplied Scheduler addresses. + errFailInitClusterID = errors.New("[scheduler] failed to get cluster id") +) + +type client struct { + urls []string + clusterID uint64 + tag string + + connMu struct { + sync.RWMutex + clientConns map[string]*grpc.ClientConn + leader string + } + checkLeaderCh chan struct{} + + receiveRegionHeartbeatCh chan *schedulerpb.RegionHeartbeatResponse + regionCh chan *schedulerpb.RegionHeartbeatRequest + pendingRequest *schedulerpb.RegionHeartbeatRequest + + wg sync.WaitGroup + ctx context.Context + cancel context.CancelFunc + + heartbeatHandler atomic.Value +} + +// NewClient creates a Scheduler client. +func NewClient(pdAddrs []string, tag string) (Client, error) { + ctx, cancel := context.WithCancel(context.Background()) + urls := make([]string, 0, len(pdAddrs)) + for _, addr := range pdAddrs { + if strings.Contains(addr, "://") { + urls = append(urls, addr) + } else { + urls = append(urls, "http://"+addr) + } + } + log.Infof("[%s][scheduler] create scheduler client with endpoints %v", tag, urls) + + c := &client{ + urls: urls, + receiveRegionHeartbeatCh: make(chan *schedulerpb.RegionHeartbeatResponse, 1), + checkLeaderCh: make(chan struct{}, 1), + ctx: ctx, + cancel: cancel, + tag: tag, + regionCh: make(chan *schedulerpb.RegionHeartbeatRequest, 64), + } + c.connMu.clientConns = make(map[string]*grpc.ClientConn) + + var ( + err error + members *schedulerpb.GetMembersResponse + ) + for i := 0; i < maxRetryCount; i++ { + if members, err = c.updateLeader(); err == nil { + break + } + time.Sleep(retryInterval) + } + if err != nil { + return nil, err + } + + c.clusterID = members.GetHeader().GetClusterId() + log.Infof("[%s][scheduler] init cluster id %v", tag, c.clusterID) + c.wg.Add(2) + go c.checkLeaderLoop() + go c.heartbeatStreamLoop() + + return c, nil +} + +func (c *client) schedulerUpdateLeader() { + select { + case c.checkLeaderCh <- struct{}{}: + default: + } +} + +func (c *client) checkLeaderLoop() { + defer c.wg.Done() + + ctx, cancel := context.WithCancel(c.ctx) + defer cancel() + ticker := time.NewTicker(time.Minute) + defer ticker.Stop() + + for { + select { + case <-c.checkLeaderCh: + case <-ticker.C: + case <-ctx.Done(): + return + } + + if _, err := c.updateLeader(); err != nil { + log.Errorf("[scheduler] failed updateLeader, err: %s", err) + } + } +} + +func (c *client) updateLeader() (*schedulerpb.GetMembersResponse, error) { + for _, u := range c.urls { + ctx, cancel := context.WithTimeout(c.ctx, schedulerTimeout) + members, err := c.getMembers(ctx, u) + cancel() + if err != nil || members.GetLeader() == nil || len(members.GetLeader().GetClientUrls()) == 0 { + select { + case <-c.ctx.Done(): + return nil, err + default: + continue + } + } + + c.updateURLs(members.GetMembers(), members.GetLeader()) + return members, c.switchLeader(members.GetLeader().GetClientUrls()) + } + return nil, errors.Errorf("failed to get leader from %v", c.urls) +} + +func (c *client) updateURLs(members []*schedulerpb.Member, leader *schedulerpb.Member) { + urls := make([]string, 0, len(members)) + for _, m := range members { + if m.GetMemberId() == leader.GetMemberId() { + continue + } + urls = append(urls, m.GetClientUrls()...) + } + c.urls = append(urls, leader.GetClientUrls()...) +} + +func (c *client) switchLeader(addrs []string) error { + addr := addrs[0] + + c.connMu.RLock() + oldLeader := c.connMu.leader + c.connMu.RUnlock() + + if addr == oldLeader { + return nil + } + + log.Infof("[scheduler] switch leader, new-leader: %s, old-leader: %s", addr, oldLeader) + if _, err := c.getOrCreateConn(addr); err != nil { + return err + } + + c.connMu.Lock() + c.connMu.leader = addr + c.connMu.Unlock() + return nil +} + +func (c *client) getMembers(ctx context.Context, url string) (*schedulerpb.GetMembersResponse, error) { + cc, err := c.getOrCreateConn(url) + if err != nil { + return nil, err + } + return schedulerpb.NewSchedulerClient(cc).GetMembers(ctx, new(schedulerpb.GetMembersRequest)) +} + +func (c *client) getOrCreateConn(addr string) (*grpc.ClientConn, error) { + c.connMu.RLock() + conn, ok := c.connMu.clientConns[addr] + c.connMu.RUnlock() + if ok { + return conn, nil + } + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + cc, err := grpc.Dial(u.Host, grpc.WithInsecure()) + if err != nil { + return nil, err + } + c.connMu.Lock() + defer c.connMu.Unlock() + if old, ok := c.connMu.clientConns[addr]; ok { + cc.Close() + return old, nil + } + c.connMu.clientConns[addr] = cc + return cc, nil +} + +func (c *client) leaderClient() schedulerpb.SchedulerClient { + c.connMu.RLock() + defer c.connMu.RUnlock() + + return schedulerpb.NewSchedulerClient(c.connMu.clientConns[c.connMu.leader]) +} + +func (c *client) doRequest(ctx context.Context, f func(context.Context, schedulerpb.SchedulerClient) error) error { + var err error + for i := 0; i < maxRetryCount; i++ { + ctx1, cancel := context.WithTimeout(ctx, schedulerTimeout) + err = f(ctx1, c.leaderClient()) + cancel() + if err == nil { + return nil + } + + c.schedulerUpdateLeader() + select { + case <-time.After(retryInterval): + continue + case <-ctx.Done(): + return ctx.Err() + } + } + return errors.New("failed too many times") +} + +func (c *client) heartbeatStreamLoop() { + defer c.wg.Done() + + for { + select { + case <-c.ctx.Done(): + return + default: + } + + ctx, cancel := context.WithCancel(c.ctx) + c.connMu.RLock() + stream, err := c.leaderClient().RegionHeartbeat(ctx) + c.connMu.RUnlock() + if err != nil { + cancel() + c.schedulerUpdateLeader() + time.Sleep(retryInterval) + continue + } + + errCh := make(chan error, 1) + wg := &sync.WaitGroup{} + wg.Add(2) + + go c.reportRegionHeartbeat(ctx, stream, errCh, wg) + go c.receiveRegionHeartbeat(stream, errCh, wg) + select { + case err := <-errCh: + log.Warnf("[%s][scheduler] heartbeat stream get error: %s ", c.tag, err) + cancel() + c.schedulerUpdateLeader() + time.Sleep(retryInterval) + wg.Wait() + case <-c.ctx.Done(): + log.Info("cancel heartbeat stream loop") + cancel() + return + } + } +} + +func (c *client) receiveRegionHeartbeat(stream schedulerpb.Scheduler_RegionHeartbeatClient, errCh chan error, wg *sync.WaitGroup) { + defer wg.Done() + for { + resp, err := stream.Recv() + if err != nil { + errCh <- err + return + } + + if h := c.heartbeatHandler.Load(); h != nil { + h.(func(*schedulerpb.RegionHeartbeatResponse))(resp) + } + } +} + +func (c *client) reportRegionHeartbeat(ctx context.Context, stream schedulerpb.Scheduler_RegionHeartbeatClient, errCh chan error, wg *sync.WaitGroup) { + defer wg.Done() + for { + request, ok := c.getNextHeartbeatRequest(ctx) + if !ok { + return + } + + request.Header = c.requestHeader() + err := stream.Send(request) + if err != nil { + c.pendingRequest = request + errCh <- err + return + } + } +} + +func (c *client) getNextHeartbeatRequest(ctx context.Context) (*schedulerpb.RegionHeartbeatRequest, bool) { + if c.pendingRequest != nil { + req := c.pendingRequest + c.pendingRequest = nil + return req, true + } + + select { + case <-ctx.Done(): + return nil, false + case request, ok := <-c.regionCh: + if !ok { + return nil, false + } + return request, true + } +} + +func (c *client) Close() { + c.cancel() + c.wg.Wait() + c.connMu.Lock() + defer c.connMu.Unlock() + for _, cc := range c.connMu.clientConns { + cc.Close() + } +} + +func (c *client) GetClusterID(context.Context) uint64 { + return c.clusterID +} + +func (c *client) AllocID(ctx context.Context) (uint64, error) { + var resp *schedulerpb.AllocIDResponse + err := c.doRequest(ctx, func(ctx context.Context, client schedulerpb.SchedulerClient) error { + var err1 error + resp, err1 = client.AllocID(ctx, &schedulerpb.AllocIDRequest{ + Header: c.requestHeader(), + }) + return err1 + }) + if err != nil { + return 0, err + } + return resp.GetId(), nil +} + +func (c *client) Bootstrap(ctx context.Context, store *metapb.Store) (resp *schedulerpb.BootstrapResponse, err error) { + err = c.doRequest(ctx, func(ctx context.Context, client schedulerpb.SchedulerClient) error { + var err1 error + resp, err1 = client.Bootstrap(ctx, &schedulerpb.BootstrapRequest{ + Header: c.requestHeader(), + Store: store, + }) + return err1 + }) + return resp, err +} + +func (c *client) IsBootstrapped(ctx context.Context) (bool, error) { + var resp *schedulerpb.IsBootstrappedResponse + err := c.doRequest(ctx, func(ctx context.Context, client schedulerpb.SchedulerClient) error { + var err1 error + resp, err1 = client.IsBootstrapped(ctx, &schedulerpb.IsBootstrappedRequest{Header: c.requestHeader()}) + return err1 + }) + if err != nil { + return false, err + } + if herr := resp.Header.GetError(); herr != nil { + return false, errors.New(herr.String()) + } + return resp.Bootstrapped, nil +} + +func (c *client) PutStore(ctx context.Context, store *metapb.Store) error { + var resp *schedulerpb.PutStoreResponse + err := c.doRequest(ctx, func(ctx context.Context, client schedulerpb.SchedulerClient) error { + var err1 error + resp, err1 = client.PutStore(ctx, &schedulerpb.PutStoreRequest{ + Header: c.requestHeader(), + Store: store, + }) + return err1 + }) + if err != nil { + return err + } + if herr := resp.Header.GetError(); herr != nil { + return errors.New(herr.String()) + } + return nil +} + +func (c *client) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) { + var resp *schedulerpb.GetStoreResponse + err := c.doRequest(ctx, func(ctx context.Context, client schedulerpb.SchedulerClient) error { + var err1 error + resp, err1 = client.GetStore(ctx, &schedulerpb.GetStoreRequest{ + Header: c.requestHeader(), + StoreId: storeID, + }) + return err1 + }) + if err != nil { + return nil, err + } + if herr := resp.Header.GetError(); herr != nil { + return nil, errors.New(herr.String()) + } + return resp.Store, nil +} + +func (c *client) GetRegion(ctx context.Context, key []byte) (*metapb.Region, *metapb.Peer, error) { + var resp *schedulerpb.GetRegionResponse + err := c.doRequest(ctx, func(ctx context.Context, client schedulerpb.SchedulerClient) error { + var err1 error + resp, err1 = client.GetRegion(ctx, &schedulerpb.GetRegionRequest{ + Header: c.requestHeader(), + RegionKey: key, + }) + return err1 + }) + if err != nil { + return nil, nil, err + } + if herr := resp.Header.GetError(); herr != nil { + return nil, nil, errors.New(herr.String()) + } + return resp.Region, resp.Leader, nil +} + +func (c *client) GetRegionByID(ctx context.Context, regionID uint64) (*metapb.Region, *metapb.Peer, error) { + var resp *schedulerpb.GetRegionResponse + err := c.doRequest(ctx, func(ctx context.Context, client schedulerpb.SchedulerClient) error { + var err1 error + resp, err1 = client.GetRegionByID(ctx, &schedulerpb.GetRegionByIDRequest{ + Header: c.requestHeader(), + RegionId: regionID, + }) + return err1 + }) + if err != nil { + return nil, nil, err + } + if herr := resp.Header.GetError(); herr != nil { + return nil, nil, errors.New(herr.String()) + } + return resp.Region, resp.Leader, nil +} + +func (c *client) AskSplit(ctx context.Context, region *metapb.Region) (resp *schedulerpb.AskSplitResponse, err error) { + err = c.doRequest(ctx, func(ctx context.Context, client schedulerpb.SchedulerClient) error { + var err1 error + resp, err1 = client.AskSplit(ctx, &schedulerpb.AskSplitRequest{ + Header: c.requestHeader(), + Region: region, + }) + return err1 + }) + if err != nil { + return nil, err + } + if herr := resp.Header.GetError(); herr != nil { + return nil, errors.New(herr.String()) + } + return resp, nil +} + +func (c *client) StoreHeartbeat(ctx context.Context, stats *schedulerpb.StoreStats) error { + var resp *schedulerpb.StoreHeartbeatResponse + err := c.doRequest(ctx, func(ctx context.Context, client schedulerpb.SchedulerClient) error { + var err1 error + resp, err1 = client.StoreHeartbeat(ctx, &schedulerpb.StoreHeartbeatRequest{ + Header: c.requestHeader(), + Stats: stats, + }) + return err1 + }) + if err != nil { + return err + } + if herr := resp.Header.GetError(); herr != nil { + return errors.New(herr.String()) + } + return nil +} + +func (c *client) RegionHeartbeat(request *schedulerpb.RegionHeartbeatRequest) error { + c.regionCh <- request + return nil +} + +func (c *client) SetRegionHeartbeatResponseHandler(_ uint64, h func(*schedulerpb.RegionHeartbeatResponse)) { + if h == nil { + h = func(*schedulerpb.RegionHeartbeatResponse) {} + } + c.heartbeatHandler.Store(h) +} + +func (c *client) requestHeader() *schedulerpb.RequestHeader { + return &schedulerpb.RequestHeader{ + ClusterId: c.clusterID, + } +} diff --git a/kv/raftstore/snap/snap.go b/kv/raftstore/snap/snap.go new file mode 100644 index 00000000..65dc45d4 --- /dev/null +++ b/kv/raftstore/snap/snap.go @@ -0,0 +1,783 @@ +package snap + +import ( + "fmt" + "hash" + "hash/crc32" + "io" + "os" + "path/filepath" + "strings" + "sync/atomic" + "time" + + "github.com/Connor1996/badger" + "github.com/Connor1996/badger/table" + + "github.com/pingcap-incubator/tinykv/kv/util" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + rspb "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + "github.com/pingcap/errors" +) + +type SnapStateType int + +const ( + SnapState_Relax SnapStateType = 0 + iota + SnapState_Generating + SnapState_Applying +) + +type SnapState struct { + StateType SnapStateType + Receiver chan *eraftpb.Snapshot +} + +const ( + snapGenPrefix = "gen" // Name prefix for the self-generated snapshot file. + snapRevPrefix = "rev" // Name prefix for the received snapshot file. + sstFileSuffix = ".sst" + tmpFileSuffix = ".tmp" + cloneFileSuffix = ".clone" + metaFileSuffix = ".meta" + deleteRetryMaxTime = 6 + deleteRetryDuration = 500 * time.Millisecond +) + +type ApplySnapAbortError string + +func (e ApplySnapAbortError) Error() string { + return string(e) +} + +var ( + errAbort = ApplySnapAbortError("abort") +) + +type SnapKeyWithSending struct { + SnapKey SnapKey + IsSending bool +} + +type SnapKey struct { + RegionID uint64 + Term uint64 + Index uint64 +} + +func (k SnapKey) String() string { + return fmt.Sprintf("%d_%d_%d", k.RegionID, k.Term, k.Index) +} + +func SnapKeyFromRegionSnap(regionID uint64, snap *eraftpb.Snapshot) SnapKey { + return SnapKey{ + RegionID: regionID, + Term: snap.Metadata.Term, + Index: snap.Metadata.Index, + } +} + +func SnapKeyFromSnap(snap *eraftpb.Snapshot) (SnapKey, error) { + data := new(rspb.RaftSnapshotData) + err := data.Unmarshal(snap.Data) + if err != nil { + return SnapKey{}, err + } + return SnapKeyFromRegionSnap(data.Region.Id, snap), nil +} + +type SnapStatistics struct { + Size uint64 + KVCount int +} + +type ApplyOptions struct { + DB *badger.DB + Region *metapb.Region +} + +func NewApplyOptions(db *badger.DB, region *metapb.Region) *ApplyOptions { + return &ApplyOptions{ + DB: db, + Region: region, + } +} + +// `Snapshot` is an interface for snapshot. +// It's used in these scenarios: +// 1. build local snapshot +// 2. read local snapshot and then replicate it to remote raftstores +// 3. receive snapshot from remote raftstore and write it to local storage +// 4. apply snapshot +// 5. snapshot gc +type Snapshot interface { + io.Reader + io.Writer + Build(dbSnap *badger.Txn, region *metapb.Region, snapData *rspb.RaftSnapshotData, stat *SnapStatistics, deleter SnapshotDeleter) error + Path() string + Exists() bool + Delete() + Meta() (os.FileInfo, error) + TotalSize() uint64 + Save() error + Apply(option ApplyOptions) error +} + +// `SnapshotDeleter` is a trait for deleting snapshot. +// It's used to ensure that the snapshot deletion happens under the protection of locking +// to avoid race case for concurrent read/write. +type SnapshotDeleter interface { + // DeleteSnapshot returns true if it successfully delete the specified snapshot. + DeleteSnapshot(key SnapKey, snapshot Snapshot, checkEntry bool) bool +} + +func retryDeleteSnapshot(deleter SnapshotDeleter, key SnapKey, snap Snapshot) bool { + for i := 0; i < deleteRetryMaxTime; i++ { + if deleter.DeleteSnapshot(key, snap, true) { + return true + } + time.Sleep(deleteRetryDuration) + } + return false +} + +func genSnapshotMeta(cfFiles []*CFFile) (*rspb.SnapshotMeta, error) { + cfMetas := make([]*rspb.SnapshotCFFile, 0, len(engine_util.CFs)) + for _, cfFile := range cfFiles { + var found bool + for _, snapCF := range engine_util.CFs { + if snapCF == cfFile.CF { + found = true + break + } + } + if !found { + return nil, errors.Errorf("failed to encode invalid snapshot CF %s", cfFile.CF) + } + cfMeta := &rspb.SnapshotCFFile{ + Cf: cfFile.CF, + Size_: cfFile.Size, + Checksum: cfFile.Checksum, + } + cfMetas = append(cfMetas, cfMeta) + } + return &rspb.SnapshotMeta{ + CfFiles: cfMetas, + }, nil +} + +func checkFileSize(path string, expectedSize uint64) error { + size, err := util.GetFileSize(path) + if err != nil { + return err + } + if size != expectedSize { + return errors.Errorf("invalid size %d for snapshot cf file %s, expected %d", size, path, expectedSize) + } + return nil +} + +func checkFileChecksum(path string, expectedChecksum uint32) error { + checksum, err := util.CalcCRC32(path) + if err != nil { + return err + } + if checksum != expectedChecksum { + return errors.Errorf("invalid checksum %d for snapshot cf file %s, expected %d", + checksum, path, expectedChecksum) + } + return nil +} + +func checkFileSizeAndChecksum(path string, expectedSize uint64, expectedChecksum uint32) error { + err := checkFileSize(path, expectedSize) + if err == nil { + err = checkFileChecksum(path, expectedChecksum) + } + return err +} + +type CFFile struct { + CF string + Path string + TmpPath string + SstWriter *table.Builder + File *os.File + KVCount int + Size uint64 + WrittenSize uint64 + Checksum uint32 + WriteDigest hash.Hash32 +} + +type MetaFile struct { + Meta *rspb.SnapshotMeta + Path string + File *os.File + + // for writing snapshot + TmpPath string +} + +var _ Snapshot = new(Snap) + +type Snap struct { + key SnapKey + displayPath string + CFFiles []*CFFile + cfIndex int + + MetaFile *MetaFile + SizeTrack *int64 + holdTmpFiles bool +} + +func NewSnap(dir string, key SnapKey, sizeTrack *int64, isSending, toBuild bool, + deleter SnapshotDeleter) (*Snap, error) { + if !util.DirExists(dir) { + err := os.MkdirAll(dir, 0700) + if err != nil { + return nil, errors.WithStack(err) + } + } + var snapPrefix string + if isSending { + snapPrefix = snapGenPrefix + } else { + snapPrefix = snapRevPrefix + } + prefix := fmt.Sprintf("%s_%s", snapPrefix, key) + displayPath := getDisplayPath(dir, prefix) + cfFiles := make([]*CFFile, 0, len(engine_util.CFs)) + for _, cf := range engine_util.CFs { + fileName := fmt.Sprintf("%s_%s%s", prefix, cf, sstFileSuffix) + path := filepath.Join(dir, fileName) + tmpPath := path + tmpFileSuffix + cfFile := &CFFile{ + CF: cf, + Path: path, + TmpPath: tmpPath, + } + cfFiles = append(cfFiles, cfFile) + } + metaFileName := fmt.Sprintf("%s%s", prefix, metaFileSuffix) + metaFilePath := filepath.Join(dir, metaFileName) + metaTmpPath := metaFilePath + tmpFileSuffix + metaFile := &MetaFile{ + Path: metaFilePath, + TmpPath: metaTmpPath, + } + s := &Snap{ + key: key, + displayPath: displayPath, + CFFiles: cfFiles, + MetaFile: metaFile, + SizeTrack: sizeTrack, + } + + // load snapshot meta if meta file exists. + if util.FileExists(metaFile.Path) { + err := s.loadSnapMeta() + if err != nil { + if !toBuild { + return nil, err + } + log.Warnf("failed to load existent snapshot meta when try to build %s: %v", s.Path(), err) + if !retryDeleteSnapshot(deleter, key, s) { + log.Warnf("failed to delete snapshot %s because it's already registered elsewhere", s.Path()) + return nil, err + } + } + } + return s, nil +} + +func NewSnapForBuilding(dir string, key SnapKey, sizeTrack *int64, deleter SnapshotDeleter) (*Snap, error) { + s, err := NewSnap(dir, key, sizeTrack, true, true, deleter) + if err != nil { + return nil, err + } + err = s.initForBuilding() + if err != nil { + return nil, err + } + return s, nil +} + +func NewSnapForSending(dir string, key SnapKey, sizeTrack *int64, deleter SnapshotDeleter) (*Snap, error) { + s, err := NewSnap(dir, key, sizeTrack, true, false, deleter) + if err != nil { + return nil, err + } + if !s.Exists() { + // Skip the initialization below if it doesn't exists. + return s, nil + } + for _, cfFile := range s.CFFiles { + // initialize cf file size and reader + if cfFile.Size > 0 { + cfFile.File, err = os.Open(cfFile.Path) + if err != nil { + return nil, errors.WithStack(err) + } + } + } + return s, nil +} + +func NewSnapForReceiving(dir string, key SnapKey, snapshotMeta *rspb.SnapshotMeta, + sizeTrack *int64, deleter SnapshotDeleter) (*Snap, error) { + s, err := NewSnap(dir, key, sizeTrack, false, false, deleter) + if err != nil { + return nil, err + } + err = s.setSnapshotMeta(snapshotMeta) + if err != nil { + return nil, err + } + if s.Exists() { + return s, nil + } + f, err := os.OpenFile(s.MetaFile.TmpPath, os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + return nil, err + } + s.MetaFile.File = f + s.holdTmpFiles = true + + for _, cfFile := range s.CFFiles { + if cfFile.Size == 0 { + continue + } + f, err = os.OpenFile(cfFile.TmpPath, os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + return nil, err + } + cfFile.File = f + cfFile.WriteDigest = crc32.NewIEEE() + } + return s, nil +} + +func NewSnapForApplying(dir string, key SnapKey, sizeTrack *int64, deleter SnapshotDeleter) (*Snap, error) { + return NewSnap(dir, key, sizeTrack, false, false, deleter) +} + +func (s *Snap) initForBuilding() error { + if s.Exists() { + return nil + } + file, err := os.OpenFile(s.MetaFile.TmpPath, os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + return err + } + s.MetaFile.File = file + s.holdTmpFiles = true + for _, cfFile := range s.CFFiles { + file, err = os.OpenFile(cfFile.TmpPath, os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + return err + } + cfFile.SstWriter = table.NewExternalTableBuilder(file, nil, badger.DefaultOptions.TableBuilderOptions) + } + return nil +} + +func (s *Snap) readSnapshotMeta() (*rspb.SnapshotMeta, error) { + fi, err := os.Stat(s.MetaFile.Path) + if err != nil { + return nil, errors.WithStack(err) + } + file, err := os.Open(s.MetaFile.Path) + if err != nil { + return nil, errors.WithStack(err) + } + size := fi.Size() + buf := make([]byte, size) + _, err = io.ReadFull(file, buf) + if err != nil { + return nil, errors.WithStack(err) + } + snapshotMeta := new(rspb.SnapshotMeta) + err = snapshotMeta.Unmarshal(buf) + if err != nil { + return nil, errors.WithStack(err) + } + return snapshotMeta, nil +} + +func (s *Snap) setSnapshotMeta(snapshotMeta *rspb.SnapshotMeta) error { + if len(snapshotMeta.CfFiles) != len(s.CFFiles) { + return errors.Errorf("invalid CF number of snapshot meta, expect %d, got %d", + len(s.CFFiles), len(snapshotMeta.CfFiles)) + } + for i, cfFile := range s.CFFiles { + meta := snapshotMeta.CfFiles[i] + if meta.Cf != cfFile.CF { + return errors.Errorf("invalid %d CF in snapshot meta, expect %s, got %s", i, cfFile.CF, meta.Cf) + } + if util.FileExists(cfFile.Path) { + // Check only the file size for `exists()` to work correctly. + err := checkFileSize(cfFile.Path, meta.GetSize_()) + if err != nil { + return err + } + } + cfFile.Size = uint64(meta.GetSize_()) + cfFile.Checksum = meta.GetChecksum() + } + s.MetaFile.Meta = snapshotMeta + return nil +} + +func (s *Snap) loadSnapMeta() error { + snapshotMeta, err := s.readSnapshotMeta() + if err != nil { + return err + } + err = s.setSnapshotMeta(snapshotMeta) + if err != nil { + return err + } + // check if there is a data corruption when the meta file exists + // but cf files are deleted. + if !s.Exists() { + return errors.Errorf("snapshot %s is corrupted, some cf file is missing", s.Path()) + } + return nil +} + +func getDisplayPath(dir string, prefix string) string { + cfNames := "(" + strings.Join(engine_util.CFs[:], "|") + ")" + return fmt.Sprintf("%s/%s_%s%s", dir, prefix, cfNames, sstFileSuffix) +} + +func (s *Snap) validate() error { + for _, cfFile := range s.CFFiles { + if cfFile.Size == 0 { + // Skip empty file. The checksum of this cf file should be 0 and + // this is checked when loading the snapshot meta. + continue + } + // TODO: prepare and validate for ingestion + } + return nil +} + +func (s *Snap) saveCFFiles() error { + for _, cfFile := range s.CFFiles { + if cfFile.KVCount > 0 { + err := cfFile.SstWriter.Finish() + if err != nil { + return err + } + } + cfFile.SstWriter.Close() + size, err := util.GetFileSize(cfFile.TmpPath) + if err != nil { + return err + } + if size > 0 { + err = os.Rename(cfFile.TmpPath, cfFile.Path) + if err != nil { + return errors.WithStack(err) + } + cfFile.Size = size + // add size + atomic.AddInt64(s.SizeTrack, int64(size)) + cfFile.Checksum, err = util.CalcCRC32(cfFile.Path) + if err != nil { + return err + } + } else { + // Clean up the `tmp_path` if this cf file is empty. + _, err = util.DeleteFileIfExists(cfFile.TmpPath) + if err != nil { + return err + } + } + } + return nil +} + +func (s *Snap) saveMetaFile() error { + bin, err := s.MetaFile.Meta.Marshal() + if err != nil { + return errors.WithStack(err) + } + _, err = s.MetaFile.File.Write(bin) + if err != nil { + return errors.WithStack(err) + } + err = os.Rename(s.MetaFile.TmpPath, s.MetaFile.Path) + if err != nil { + return errors.WithStack(err) + } + s.holdTmpFiles = false + return nil +} + +func (s *Snap) Build(dbSnap *badger.Txn, region *metapb.Region, snapData *rspb.RaftSnapshotData, stat *SnapStatistics, deleter SnapshotDeleter) error { + if s.Exists() { + err := s.validate() + if err == nil { + // set snapshot meta data + snapData.FileSize = s.TotalSize() + snapData.Meta = s.MetaFile.Meta + return nil + } + log.Errorf("[region %d] file %s is corrupted, will rebuild: %v", region.Id, s.Path(), err) + if !retryDeleteSnapshot(deleter, s.key, s) { + log.Errorf("[region %d] failed to delete corrupted snapshot %s because it's already registered elsewhere", + region.Id, s.Path()) + return err + } + err = s.initForBuilding() + if err != nil { + return err + } + } + + builder := newSnapBuilder(s.CFFiles, dbSnap, region) + err := builder.build() + if err != nil { + return err + } + log.Infof("region %d scan snapshot %s, key count %d, size %d", region.Id, s.Path(), builder.kvCount, builder.size) + err = s.saveCFFiles() + if err != nil { + return err + } + stat.KVCount = builder.kvCount + snapshotMeta, err := genSnapshotMeta(s.CFFiles) + if err != nil { + return err + } + s.MetaFile.Meta = snapshotMeta + err = s.saveMetaFile() + if err != nil { + return err + } + totalSize := s.TotalSize() + stat.Size = totalSize + // set snapshot meta data + snapData.FileSize = totalSize + snapData.Meta = s.MetaFile.Meta + return nil +} + +func (s *Snap) Path() string { + return s.displayPath +} + +func (s *Snap) Exists() bool { + for _, cfFile := range s.CFFiles { + if cfFile.Size > 0 && !util.FileExists(cfFile.Path) { + return false + } + } + return util.FileExists(s.MetaFile.Path) +} + +func (s *Snap) Delete() { + log.Debugf("deleting %s", s.Path()) + for _, cfFile := range s.CFFiles { + if s.holdTmpFiles { + _, err := util.DeleteFileIfExists(cfFile.TmpPath) + if err != nil { + panic(err) + } + } + deleted, err := util.DeleteFileIfExists(cfFile.Path) + if err != nil { + panic(err) + } + if deleted { + atomic.AddInt64(s.SizeTrack, -int64(cfFile.Size)) + } + } + _, err := util.DeleteFileIfExists(s.MetaFile.Path) + if err != nil { + panic(err) + } + if s.holdTmpFiles { + _, err := util.DeleteFileIfExists(s.MetaFile.TmpPath) + if err != nil { + panic(err) + } + } +} + +func (s *Snap) Meta() (os.FileInfo, error) { + fi, err := os.Stat(s.MetaFile.Path) + if err != nil { + return nil, errors.WithStack(err) + } + return fi, nil +} + +func (s *Snap) TotalSize() (total uint64) { + for _, cf := range s.CFFiles { + total += cf.Size + } + return +} + +func (s *Snap) Save() error { + log.Debugf("saving to %s", s.MetaFile.Path) + for _, cfFile := range s.CFFiles { + if cfFile.Size == 0 { + // skip empty cf file. + continue + } + // Check each cf file has been fully written, and the checksum matches. + if cfFile.WrittenSize != cfFile.Size { + return errors.Errorf("snapshot file %s for CF %s size mismatch, real size %d, expected %d", + cfFile.Path, cfFile.CF, cfFile.WrittenSize, cfFile.Size) + } + checksum := cfFile.WriteDigest.Sum32() + if cfFile.Checksum != checksum { + return errors.Errorf("snapshot file %s for CF %s checksum mismatch, real checksum %d, expected %d", + cfFile.Path, cfFile.CF, checksum, cfFile.Checksum) + } + err := os.Rename(cfFile.TmpPath, cfFile.Path) + if err != nil { + return errors.WithStack(err) + } + atomic.AddInt64(s.SizeTrack, int64(cfFile.Size)) + } + // write meta file + bin, err := s.MetaFile.Meta.Marshal() + if err != nil { + return errors.WithStack(err) + } + _, err = s.MetaFile.File.Write(bin) + if err != nil { + return errors.WithStack(err) + } + err = s.MetaFile.File.Sync() + if err != nil { + return errors.WithStack(err) + } + err = os.Rename(s.MetaFile.TmpPath, s.MetaFile.Path) + if err != nil { + return errors.WithStack(err) + } + s.holdTmpFiles = false + return nil +} + +func (s *Snap) Apply(opts ApplyOptions) error { + err := s.validate() + if err != nil { + return err + } + + externalFiles := make([]*os.File, 0, len(s.CFFiles)) + for _, cfFile := range s.CFFiles { + if cfFile.Size == 0 { + // Skip empty cf file + continue + } + file, err := os.Open(cfFile.Path) + if err != nil { + log.Errorf("open ingest file %s failed: %s", cfFile.Path, err) + return err + } + externalFiles = append(externalFiles, file) + } + n, err := opts.DB.IngestExternalFiles(externalFiles) + if err != nil { + log.Errorf("ingest sst failed (first %d files succeeded): %s", n, err) + return err + } + log.Infof("apply snapshot ingested %d tables", n) + return nil +} + +func (s *Snap) Read(b []byte) (int, error) { + if len(b) == 0 { + return 0, nil + } + for s.cfIndex < len(s.CFFiles) { + cfFile := s.CFFiles[s.cfIndex] + if cfFile.Size == 0 { + s.cfIndex++ + continue + } + n, err := cfFile.File.Read(b) + if n > 0 { + return n, nil + } + if err != nil { + if err == io.EOF { + s.cfIndex++ + continue + } + return 0, errors.WithStack(err) + } + } + return 0, io.EOF +} + +func (s *Snap) Write(b []byte) (int, error) { + if len(b) == 0 { + return 0, nil + } + nextBuf := b + for s.cfIndex < len(s.CFFiles) { + cfFile := s.CFFiles[s.cfIndex] + if cfFile.Size == 0 { + s.cfIndex++ + continue + } + left := cfFile.Size - cfFile.WrittenSize + if left == 0 { + s.cfIndex++ + continue + } + file := cfFile.File + digest := cfFile.WriteDigest + if len(nextBuf) > int(left) { + _, err := file.Write(nextBuf[:left]) + if err != nil { + return 0, errors.WithStack(err) + } + digest.Write(nextBuf[:left]) + cfFile.WrittenSize += left + s.cfIndex++ + nextBuf = nextBuf[left:] + } else { + _, err := file.Write(nextBuf) + if err != nil { + return 0, errors.WithStack(err) + } + digest.Write(nextBuf) + cfFile.WrittenSize += uint64(len(nextBuf)) + return len(b), nil + } + } + return len(b) - len(nextBuf), nil +} + +func (s *Snap) Drop() { + var cfTmpFileExists bool + for _, cfFile := range s.CFFiles { + // cleanup if some of the cf files and meta file is partly written + if util.FileExists(cfFile.TmpPath) { + cfTmpFileExists = true + break + } + } + if cfTmpFileExists || util.FileExists(s.MetaFile.TmpPath) { + s.Delete() + return + } + // cleanup if data corruption happens and any file goes missing + if !s.Exists() { + s.Delete() + } +} diff --git a/kv/raftstore/snap/snap_builder.go b/kv/raftstore/snap/snap_builder.go new file mode 100644 index 00000000..fc65610b --- /dev/null +++ b/kv/raftstore/snap/snap_builder.go @@ -0,0 +1,60 @@ +package snap + +import ( + "github.com/Connor1996/badger" + "github.com/Connor1996/badger/y" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" +) + +// snapBuilder builds snapshot files. +type snapBuilder struct { + region *metapb.Region + txn *badger.Txn + cfFiles []*CFFile + kvCount int + size int +} + +func newSnapBuilder(cfFiles []*CFFile, dbSnap *badger.Txn, region *metapb.Region) *snapBuilder { + return &snapBuilder{ + region: region, + cfFiles: cfFiles, + txn: dbSnap, + } +} + +func (b *snapBuilder) build() error { + defer b.txn.Discard() + startKey, endKey := b.region.StartKey, b.region.EndKey + + for _, file := range b.cfFiles { + cf := file.CF + sstWriter := file.SstWriter + + it := engine_util.NewCFIterator(cf, b.txn) + for it.Seek(startKey); it.Valid(); it.Next() { + item := it.Item() + key := item.Key() + if engine_util.ExceedEndKey(key, endKey) { + break + } + value, err := item.Value() + if err != nil { + return err + } + cfKey := engine_util.KeyWithCF(cf, key) + if err := sstWriter.Add(cfKey, y.ValueStruct{ + Value: value, + }); err != nil { + return err + } + file.KVCount++ + file.Size += uint64(len(cfKey) + len(value)) + } + it.Close() + b.kvCount += file.KVCount + b.size += int(file.Size) + } + return nil +} diff --git a/kv/raftstore/snap/snap_manager.go b/kv/raftstore/snap/snap_manager.go new file mode 100644 index 00000000..f011c1b4 --- /dev/null +++ b/kv/raftstore/snap/snap_manager.go @@ -0,0 +1,345 @@ +package snap + +import ( + "io/ioutil" + "math" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/pingcap-incubator/tinykv/log" + rspb "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + "github.com/pingcap/errors" +) + +type SnapEntry int + +const ( + SnapEntryGenerating SnapEntry = 1 + SnapEntrySending SnapEntry = 2 + SnapEntryReceiving SnapEntry = 3 + SnapEntryApplying SnapEntry = 4 +) + +func (e SnapEntry) String() string { + switch e { + case SnapEntryGenerating: + return "generating" + case SnapEntrySending: + return "sending" + case SnapEntryReceiving: + return "receiving" + case SnapEntryApplying: + return "applying" + } + return "unknown" +} + +type SnapStats struct { + ReceivingCount int + SendingCount int +} + +type SnapManager struct { + base string + snapSize *int64 + registryLock sync.RWMutex + registry map[SnapKey][]SnapEntry + MaxTotalSize uint64 +} + +func NewSnapManager(path string) *SnapManager { + return new(SnapManagerBuilder).Build(path) +} + +func (sm *SnapManager) Init() error { + fi, err := os.Stat(sm.base) + if os.IsNotExist(err) { + err = os.MkdirAll(sm.base, 0600) + if err != nil { + return errors.WithStack(err) + } + return nil + } else if err != nil { + return errors.WithStack(err) + } + if !fi.IsDir() { + return errors.Errorf("%s should be a directory", sm.base) + } + fis, err := ioutil.ReadDir(sm.base) + if err != nil { + return errors.WithStack(err) + } + for _, fi := range fis { + if !fi.IsDir() { + name := fi.Name() + if strings.HasSuffix(name, tmpFileSuffix) { + err = os.Remove(filepath.Join(sm.base, name)) + if err != nil { + return errors.WithStack(err) + } + } else if strings.HasSuffix(name, sstFileSuffix) { + atomic.AddInt64(sm.snapSize, fi.Size()) + } + } + } + return nil +} + +func (sm *SnapManager) ListIdleSnap() ([]SnapKeyWithSending, error) { + fis, err := ioutil.ReadDir(sm.base) + if err != nil { + return nil, errors.WithStack(err) + } + results := make([]SnapKeyWithSending, 0, len(fis)) + for _, fi := range fis { + if fi.IsDir() { + continue + } + name := fi.Name() + if !strings.HasSuffix(name, metaFileSuffix) { + continue + } + name = name[:len(name)-len(metaFileSuffix)] + var key SnapKeyWithSending + if strings.HasPrefix(name, snapGenPrefix) { + key.IsSending = true + } + numberStrs := strings.Split(name, "_") + if len(numberStrs) != 4 { + return nil, errors.Errorf("failed to parse file %s", name) + } + key.SnapKey.RegionID, err = strconv.ParseUint(numberStrs[1], 10, 64) + if err != nil { + return nil, errors.WithStack(err) + } + key.SnapKey.Term, err = strconv.ParseUint(numberStrs[2], 10, 64) + if err != nil { + return nil, errors.WithStack(err) + } + key.SnapKey.Index, err = strconv.ParseUint(numberStrs[3], 10, 64) + if err != nil { + return nil, errors.WithStack(err) + } + sm.registryLock.RLock() + _, ok := sm.registry[key.SnapKey] + sm.registryLock.RUnlock() + if ok { + // Skip those registered snapshot. + continue + } + results = append(results, key) + } + sort.Slice(results, func(i, j int) bool { + keyI := &results[i].SnapKey + keyJ := &results[j].SnapKey + if keyI.RegionID == keyJ.RegionID { + if keyI.Term == keyJ.Term { + if keyI.Index == keyJ.Index { + return !results[i].IsSending + } + return keyI.Index < keyJ.Index + } + return keyI.Term < keyJ.Term + } + return keyI.RegionID < keyJ.RegionID + }) + return results, nil +} + +func (sm *SnapManager) HasRegistered(key SnapKey) bool { + sm.registryLock.RLock() + _, ok := sm.registry[key] + sm.registryLock.RUnlock() + return ok +} + +func (sm *SnapManager) GetTotalSnapSize() uint64 { + return uint64(atomic.LoadInt64(sm.snapSize)) +} + +func (sm *SnapManager) GetSnapshotForBuilding(key SnapKey) (Snapshot, error) { + if sm.GetTotalSnapSize() > sm.MaxTotalSize { + err := sm.deleteOldIdleSnaps() + if err != nil { + return nil, err + } + } + return NewSnapForBuilding(sm.base, key, sm.snapSize, sm) +} + +func (sm *SnapManager) deleteOldIdleSnaps() error { + idleSnaps, err := sm.ListIdleSnap() + if err != nil { + return err + } + type snapWithModTime struct { + key SnapKey + snap Snapshot + modTime time.Time + } + snaps := make([]snapWithModTime, 0, len(idleSnaps)) + for _, idleSnap := range idleSnaps { + if !idleSnap.IsSending { + continue + } + snap, err := sm.GetSnapshotForSending(idleSnap.SnapKey) + if err != nil { + continue + } + fi, err := snap.Meta() + if err != nil { + return err + } + snaps = append(snaps, snapWithModTime{key: idleSnap.SnapKey, snap: snap, modTime: fi.ModTime()}) + } + sort.Slice(snaps, func(i, j int) bool { + return snaps[i].modTime.Before(snaps[j].modTime) + }) + for sm.GetTotalSnapSize() > sm.MaxTotalSize { + if len(snaps) == 0 { + return errors.New("too many snapshots") + } + oldest := snaps[0] + snaps = snaps[1:] + sm.DeleteSnapshot(oldest.key, oldest.snap, false) + } + return nil +} + +func (sm *SnapManager) GetSnapshotForSending(snapKey SnapKey) (Snapshot, error) { + return NewSnapForSending(sm.base, snapKey, sm.snapSize, sm) +} + +func (sm *SnapManager) GetSnapshotForReceiving(snapKey SnapKey, data []byte) (Snapshot, error) { + snapshotData := new(rspb.RaftSnapshotData) + err := snapshotData.Unmarshal(data) + if err != nil { + return nil, errors.WithStack(err) + } + return NewSnapForReceiving(sm.base, snapKey, snapshotData.Meta, sm.snapSize, sm) +} + +func (sm *SnapManager) GetSnapshotForApplying(snapKey SnapKey) (Snapshot, error) { + snap, err := NewSnapForApplying(sm.base, snapKey, sm.snapSize, sm) + if err != nil { + return nil, err + } + if !snap.Exists() { + return nil, errors.Errorf("snapshot of %s not exists", snapKey) + } + return snap, nil +} + +func (sm *SnapManager) Register(key SnapKey, entry SnapEntry) { + log.Debugf("register key:%s, entry:%d", key, entry) + sm.registryLock.Lock() + defer sm.registryLock.Unlock() + entries, ok := sm.registry[key] + if ok { + for _, e := range entries { + if e == entry { + log.Warnf("%s is registered more than 1 time", key) + return + } + } + } + entries = append(entries, entry) + sm.registry[key] = entries +} + +func (sm *SnapManager) Deregister(key SnapKey, entry SnapEntry) { + log.Debugf("deregister key:%s, entry:%s", key, entry) + sm.registryLock.Lock() + defer sm.registryLock.Unlock() + var handled bool + entries, ok := sm.registry[key] + if ok { + for i, e := range entries { + if e == entry { + entries = append(entries[:i], entries[i+1:]...) + handled = true + break + } + } + if handled { + if len(entries) > 0 { + sm.registry[key] = entries + } else { + delete(sm.registry, key) + } + return + } + } + log.Warnf("stale deregister key:%s, entry:%s", key, entry) +} + +func (sm *SnapManager) Stats() SnapStats { + sm.registryLock.RLock() + defer sm.registryLock.RUnlock() + var sendingCount, receivingCount int + for _, entries := range sm.registry { + var isSending, isReceiving bool + for _, entry := range entries { + switch entry { + case SnapEntryGenerating, SnapEntrySending: + isSending = true + case SnapEntryReceiving, SnapEntryApplying: + isReceiving = true + } + } + if isSending { + sendingCount++ + } + if isReceiving { + receivingCount++ + } + } + return SnapStats{SendingCount: sendingCount, ReceivingCount: receivingCount} +} + +func (sm *SnapManager) DeleteSnapshot(key SnapKey, snapshot Snapshot, checkEntry bool) bool { + sm.registryLock.Lock() + defer sm.registryLock.Unlock() + if checkEntry { + if e, ok := sm.registry[key]; ok { + if len(e) > 0 { + log.Infof("skip to delete %s since it's registered more than 1, registered entries %v", + snapshot.Path(), e) + return false + } + } + } else if _, ok := sm.registry[key]; ok { + log.Infof("skip to delete %s since it's registered.", snapshot.Path()) + return false + } + snapshot.Delete() + return true +} + +type SnapManagerBuilder struct { + maxTotalSize uint64 +} + +func (smb *SnapManagerBuilder) MaxTotalSize(v uint64) *SnapManagerBuilder { + smb.maxTotalSize = v + return smb +} + +func (smb *SnapManagerBuilder) Build(path string) *SnapManager { + var maxTotalSize uint64 = math.MaxUint64 + if smb.maxTotalSize > 0 { + maxTotalSize = smb.maxTotalSize + } + return &SnapManager{ + base: path, + snapSize: new(int64), + registry: map[SnapKey][]SnapEntry{}, + MaxTotalSize: maxTotalSize, + } +} diff --git a/kv/raftstore/snap/snap_test.go b/kv/raftstore/snap/snap_test.go new file mode 100644 index 00000000..00c88466 --- /dev/null +++ b/kv/raftstore/snap/snap_test.go @@ -0,0 +1,241 @@ +package snap + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "sync/atomic" + "testing" + + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + rspb "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + "github.com/pingcap/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + snapTestKey = []byte("tkey") + regionTestBegin = []byte("ta") + regionTestBeginOld = []byte("ua") + regionTestEnd = []byte("tz") + regionTestEndOld = []byte("uz") +) + +const ( + testWriteBatchSize = 10 * 1024 * 1024 +) + +type dummyDeleter struct{} + +func (d *dummyDeleter) DeleteSnapshot(key SnapKey, snapshot Snapshot, checkEntry bool) bool { + snapshot.Delete() + return true +} + +func openDB(t *testing.T, dir string) *badger.DB { + opts := badger.DefaultOptions + opts.Dir = dir + opts.ValueDir = dir + db, err := badger.Open(opts) + require.Nil(t, err) + return db +} + +func fillDBData(t *testing.T, db *badger.DB) { + // write some data for multiple cfs. + wb := new(engine_util.WriteBatch) + value := make([]byte, 32) + wb.SetCF(engine_util.CfDefault, snapTestKey, value) + wb.SetCF(engine_util.CfWrite, snapTestKey, value) + wb.SetCF(engine_util.CfLock, snapTestKey, value) + err := wb.WriteToDB(db) + require.Nil(t, err) +} + +func getKVCount(t *testing.T, db *badger.DB) int { + count := 0 + err := db.View(func(txn *badger.Txn) error { + for _, cf := range engine_util.CFs { + it := engine_util.NewCFIterator(cf, txn) + defer it.Close() + for it.Seek(regionTestBegin); it.Valid(); it.Next() { + if bytes.Compare(it.Item().Key(), regionTestEnd) >= 0 { + break + } + count++ + } + } + return nil + }) + + assert.Nil(t, err) + return count +} + +func genTestRegion(regionID, storeID, peerID uint64) *metapb.Region { + return &metapb.Region{ + Id: regionID, + StartKey: regionTestBegin, + EndKey: regionTestEnd, + RegionEpoch: &metapb.RegionEpoch{ + Version: 1, + ConfVer: 1, + }, + Peers: []*metapb.Peer{ + {StoreId: storeID, Id: peerID}, + }, + } +} + +func assertEqDB(t *testing.T, expected, actual *badger.DB) { + for _, cf := range engine_util.CFs { + expectedVal := getDBValue(t, expected, cf, snapTestKey) + actualVal := getDBValue(t, actual, cf, snapTestKey) + assert.Equal(t, expectedVal, actualVal) + } +} + +func getDBValue(t *testing.T, db *badger.DB, cf string, key []byte) (val []byte) { + val, err := engine_util.GetCF(db, cf, key) + require.Nil(t, err, string(key)) + return val +} + +func TestSnapGenMeta(t *testing.T) { + cfFiles := make([]*CFFile, 0, len(engine_util.CFs)) + for i, cf := range engine_util.CFs { + f := &CFFile{ + CF: cf, + Size: 100 * uint64(i+1), + Checksum: 1000 * uint32(i+1), + } + cfFiles = append(cfFiles, f) + } + meta, err := genSnapshotMeta(cfFiles) + require.Nil(t, err) + for i, cfFileMeta := range meta.CfFiles { + assert.Equal(t, cfFileMeta.Cf, cfFiles[i].CF) + assert.Equal(t, cfFileMeta.Size_, cfFiles[i].Size) + assert.Equal(t, cfFileMeta.Checksum, cfFiles[i].Checksum) + } +} + +func TestSnapDisplayPath(t *testing.T) { + dir, err := ioutil.TempDir("", "snapshot") + require.Nil(t, err) + defer os.RemoveAll(dir) + key := &SnapKey{1, 1, 2} + prefix := fmt.Sprintf("%s_%s", snapGenPrefix, key) + displayPath := getDisplayPath(dir, prefix) + assert.NotEqual(t, displayPath, "") +} + +func TestSnapFile(t *testing.T) { + doTestSnapFile(t, true) + doTestSnapFile(t, false) +} + +func doTestSnapFile(t *testing.T, dbHasData bool) { + regionID := uint64(1) + region := genTestRegion(regionID, 1, 1) + dir, err := ioutil.TempDir("", "snapshot") + require.Nil(t, err) + defer os.RemoveAll(dir) + db := openDB(t, dir) + if dbHasData { + fillDBData(t, db) + } + + snapDir, err := ioutil.TempDir("", "snapshot") + require.Nil(t, err) + defer os.RemoveAll(snapDir) + key := SnapKey{RegionID: regionID, Term: 1, Index: 1} + sizeTrack := new(int64) + deleter := &dummyDeleter{} + s1, err := NewSnapForBuilding(snapDir, key, sizeTrack, deleter) + require.Nil(t, err) + // Ensure that this snapshot file doesn't exist before being built. + assert.False(t, s1.Exists()) + assert.Equal(t, int64(0), atomic.LoadInt64(sizeTrack)) + + snapData := new(rspb.RaftSnapshotData) + snapData.Region = region + stat := new(SnapStatistics) + assert.Nil(t, s1.Build(db.NewTransaction(false), region, snapData, stat, deleter)) + + // Ensure that this snapshot file does exist after being built. + assert.True(t, s1.Exists()) + totalSize := s1.TotalSize() + // Ensure the `size_track` is modified correctly. + size := atomic.LoadInt64(sizeTrack) + assert.Equal(t, int64(totalSize), size) + assert.Equal(t, int64(stat.Size), size) + if dbHasData { + assert.Equal(t, 3, getKVCount(t, db)) + // stat.KVCount is 5 because there are two extra default cf value. + assert.Equal(t, 3, stat.KVCount) + } + + // Ensure this snapshot could be read for sending. + s2, err := NewSnapForSending(snapDir, key, sizeTrack, deleter) + require.Nil(t, err, errors.ErrorStack(err)) + assert.True(t, s2.Exists()) + + dstDir, err := ioutil.TempDir("", "snapshot") + require.Nil(t, err) + defer os.RemoveAll(dstDir) + + s3, err := NewSnapForReceiving(dstDir, key, snapData.Meta, sizeTrack, deleter) + require.Nil(t, err) + assert.False(t, s3.Exists()) + + // Ensure snapshot data could be read out of `s2`, and write into `s3`. + copySize, err := io.Copy(s3, s2) + require.Nil(t, err) + assert.Equal(t, copySize, size) + assert.False(t, s3.Exists()) + assert.Nil(t, s3.Save()) + assert.True(t, s3.Exists()) + + // Ensure the tracked size is handled correctly after receiving a snapshot. + assert.Equal(t, atomic.LoadInt64(sizeTrack), size*2) + + // Ensure `delete()` works to delete the source snapshot. + s2.Delete() + assert.False(t, s2.Exists()) + assert.False(t, s1.Exists()) + assert.Equal(t, atomic.LoadInt64(sizeTrack), size) + + // Ensure a snapshot could be applied to DB. + s4, err := NewSnapForApplying(dstDir, key, sizeTrack, deleter) + require.Nil(t, err) + assert.True(t, s4.Exists()) + + dstDBDir, err := ioutil.TempDir("", "snapshot") + require.Nil(t, err) + defer os.RemoveAll(dstDBDir) + + dstDB := openDB(t, dstDBDir) + opts := ApplyOptions{ + DB: dstDB, + Region: region, + } + err = s4.Apply(opts) + require.Nil(t, err, errors.ErrorStack(err)) + + // Ensure `delete()` works to delete the dest snapshot. + s4.Delete() + assert.False(t, s4.Exists()) + assert.False(t, s3.Exists()) + assert.Equal(t, atomic.LoadInt64(sizeTrack), int64(0)) + + // Verify the data is correct after applying snapshot. + if dbHasData { + assertEqDB(t, db, dstDB) + } +} diff --git a/kv/raftstore/store_worker.go b/kv/raftstore/store_worker.go new file mode 100644 index 00000000..fe4479a2 --- /dev/null +++ b/kv/raftstore/store_worker.go @@ -0,0 +1,309 @@ +package raftstore + +import ( + "sync" + + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/kv/raftstore/message" + "github.com/pingcap-incubator/tinykv/kv/raftstore/meta" + "github.com/pingcap-incubator/tinykv/kv/raftstore/runner" + "github.com/pingcap-incubator/tinykv/kv/raftstore/snap" + "github.com/pingcap-incubator/tinykv/kv/raftstore/util" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + rspb "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap/errors" +) + +type StoreTick int + +const ( + StoreTickSchedulerStoreHeartbeat StoreTick = 1 + StoreTickSnapGC StoreTick = 2 +) + +type storeState struct { + id uint64 + receiver <-chan message.Msg + ticker *ticker +} + +func newStoreState(cfg *config.Config) (chan<- message.Msg, *storeState) { + ch := make(chan message.Msg, 40960) + state := &storeState{ + receiver: (<-chan message.Msg)(ch), + ticker: newStoreTicker(cfg), + } + return (chan<- message.Msg)(ch), state +} + +// storeWorker runs store commands. +type storeWorker struct { + *storeState + ctx *GlobalContext +} + +func newStoreWorker(ctx *GlobalContext, state *storeState) *storeWorker { + return &storeWorker{ + storeState: state, + ctx: ctx, + } +} + +func (sw *storeWorker) run(closeCh <-chan struct{}, wg *sync.WaitGroup) { + defer wg.Done() + for { + var msg message.Msg + select { + case <-closeCh: + return + case msg = <-sw.receiver: + } + sw.handleMsg(msg) + } +} + +func (d *storeWorker) onTick(tick StoreTick) { + switch tick { + case StoreTickSchedulerStoreHeartbeat: + d.onSchedulerStoreHearbeatTick() + case StoreTickSnapGC: + d.onSnapMgrGC() + } +} + +func (d *storeWorker) handleMsg(msg message.Msg) { + switch msg.Type { + case message.MsgTypeStoreRaftMessage: + if err := d.onRaftMessage(msg.Data.(*rspb.RaftMessage)); err != nil { + log.Errorf("handle raft message failed storeID %d, %v", d.id, err) + } + case message.MsgTypeStoreTick: + d.onTick(msg.Data.(StoreTick)) + case message.MsgTypeStoreStart: + d.start(msg.Data.(*metapb.Store)) + } +} + +func (d *storeWorker) start(store *metapb.Store) { + d.id = store.Id + d.ticker.scheduleStore(StoreTickSchedulerStoreHeartbeat) + d.ticker.scheduleStore(StoreTickSnapGC) +} + +/// Checks if the message is targeting a stale peer. +/// +/// Returns true means the message can be dropped silently. +func (d *storeWorker) checkMsg(msg *rspb.RaftMessage) (bool, error) { + regionID := msg.GetRegionId() + fromEpoch := msg.GetRegionEpoch() + msgType := msg.Message.MsgType + isVoteMsg := util.IsVoteMessage(msg.Message) + fromStoreID := msg.FromPeer.StoreId + + // Check if the target is tombstone, + stateKey := meta.RegionStateKey(regionID) + localState := new(rspb.RegionLocalState) + err := engine_util.GetMeta(d.ctx.engine.Kv, stateKey, localState) + if err != nil { + if err == badger.ErrKeyNotFound { + return false, nil + } + return false, err + } + if localState.State != rspb.PeerState_Tombstone { + // Maybe split, but not registered yet. + if util.IsFirstVoteMessage(msg.Message) { + meta := d.ctx.storeMeta + // Last check on whether target peer is created, otherwise, the + // vote message will never be comsumed. + if _, ok := meta.regions[regionID]; ok { + return false, nil + } + meta.pendingVotes = append(meta.pendingVotes, msg) + log.Infof("region %d doesn't exist yet, wait for it to be split.", regionID) + return true, nil + } + return false, errors.Errorf("region %d not exists but not tombstone: %s", regionID, localState) + } + log.Debugf("region %d in tombstone state: %s", regionID, localState) + region := localState.Region + regionEpoch := region.RegionEpoch + // The region in this peer is already destroyed + if util.IsEpochStale(fromEpoch, regionEpoch) { + log.Infof("tombstone peer receives a stale message. region_id:%d, from_region_epoch:%s, current_region_epoch:%s, msg_type:%s", + regionID, fromEpoch, regionEpoch, msgType) + notExist := util.FindPeer(region, fromStoreID) == nil + handleStaleMsg(d.ctx.trans, msg, regionEpoch, isVoteMsg && notExist) + return true, nil + } + if fromEpoch.ConfVer == regionEpoch.ConfVer { + return false, errors.Errorf("tombstone peer [epoch: %s] received an invalid message %s, ignore it", + regionEpoch, msgType) + } + return false, nil +} + +func (d *storeWorker) onRaftMessage(msg *rspb.RaftMessage) error { + regionID := msg.RegionId + if err := d.ctx.router.send(regionID, message.Msg{Type: message.MsgTypeRaftMessage, Data: msg}); err == nil { + return nil + } + log.Debugf("handle raft message. from_peer:%d, to_peer:%d, store:%d, region:%d, msg:%+v", + msg.FromPeer.Id, msg.ToPeer.Id, d.storeState.id, regionID, msg.Message) + if msg.ToPeer.StoreId != d.ctx.store.Id { + log.Warnf("store not match, ignore it. store_id:%d, to_store_id:%d, region_id:%d", + d.ctx.store.Id, msg.ToPeer.StoreId, regionID) + return nil + } + + if msg.RegionEpoch == nil { + log.Errorf("missing region epoch in raft message, ignore it. region_id:%d", regionID) + return nil + } + if msg.IsTombstone { + // Target tombstone peer doesn't exist, so ignore it. + return nil + } + ok, err := d.checkMsg(msg) + if err != nil { + return err + } + if ok { + return nil + } + created, err := d.maybeCreatePeer(regionID, msg) + if err != nil { + return err + } + if !created { + return nil + } + _ = d.ctx.router.send(regionID, message.Msg{Type: message.MsgTypeRaftMessage, Data: msg}) + return nil +} + +/// If target peer doesn't exist, create it. +/// +/// return false to indicate that target peer is in invalid state or +/// doesn't exist and can't be created. +func (d *storeWorker) maybeCreatePeer(regionID uint64, msg *rspb.RaftMessage) (bool, error) { + // we may encounter a message with larger peer id, which means + // current peer is stale, then we should remove current peer + meta := d.ctx.storeMeta + if _, ok := meta.regions[regionID]; ok { + return true, nil + } + if !util.IsInitialMsg(msg.Message) { + log.Debugf("target peer %s doesn't exist", msg.ToPeer) + return false, nil + } + + for _, region := range meta.getOverlapRegions(&metapb.Region{ + StartKey: msg.StartKey, + EndKey: msg.EndKey, + }) { + log.Debugf("msg %s is overlapped with exist region %s", msg, region) + if util.IsFirstVoteMessage(msg.Message) { + meta.pendingVotes = append(meta.pendingVotes, msg) + } + return false, nil + } + + peer, err := replicatePeer( + d.ctx.store.Id, d.ctx.cfg, d.ctx.regionTaskSender, d.ctx.engine, regionID, msg.ToPeer) + if err != nil { + return false, err + } + // following snapshot may overlap, should insert into region_ranges after + // snapshot is applied. + meta.regions[regionID] = peer.Region() + d.ctx.router.register(peer) + _ = d.ctx.router.send(regionID, message.Msg{Type: message.MsgTypeStart}) + return true, nil +} + +func (d *storeWorker) storeHeartbeatScheduler() { + stats := new(schedulerpb.StoreStats) + stats.StoreId = d.ctx.store.Id + stats.RegionCount = uint32(len(d.ctx.storeMeta.regions)) + d.ctx.schedulerTaskSender <- &runner.SchedulerStoreHeartbeatTask{ + Stats: stats, + Engine: d.ctx.engine.Kv, + Path: d.ctx.engine.KvPath, + } +} + +func (d *storeWorker) onSchedulerStoreHearbeatTick() { + d.storeHeartbeatScheduler() + d.ticker.scheduleStore(StoreTickSchedulerStoreHeartbeat) +} + +func (d *storeWorker) handleSnapMgrGC() error { + mgr := d.ctx.snapMgr + snapKeys, err := mgr.ListIdleSnap() + if err != nil { + return err + } + if len(snapKeys) == 0 { + return nil + } + var lastRegionID uint64 + var keys []snap.SnapKeyWithSending + for _, pair := range snapKeys { + key := pair.SnapKey + if lastRegionID == key.RegionID { + keys = append(keys, pair) + continue + } + if len(keys) > 0 { + err = d.scheduleGCSnap(lastRegionID, keys) + if err != nil { + return err + } + keys = nil + } + lastRegionID = key.RegionID + keys = append(keys, pair) + } + if len(keys) > 0 { + return d.scheduleGCSnap(lastRegionID, keys) + } + return nil +} + +func (d *storeWorker) scheduleGCSnap(regionID uint64, keys []snap.SnapKeyWithSending) error { + gcSnap := message.Msg{Type: message.MsgTypeGcSnap, Data: &message.MsgGCSnap{Snaps: keys}} + if d.ctx.router.send(regionID, gcSnap) != nil { + // The snapshot exists because MsgAppend has been rejected. So the + // peer must have been exist. But now it's disconnected, so the peer + // has to be destroyed instead of being created. + log.Infof("region %d is disconnected, remove snaps %v", regionID, keys) + for _, pair := range keys { + key := pair.SnapKey + isSending := pair.IsSending + var snapshot snap.Snapshot + var err error + if isSending { + snapshot, err = d.ctx.snapMgr.GetSnapshotForSending(key) + } else { + snapshot, err = d.ctx.snapMgr.GetSnapshotForApplying(key) + } + if err != nil { + return err + } + d.ctx.snapMgr.DeleteSnapshot(key, snapshot, false) + } + } + return nil +} + +func (d *storeWorker) onSnapMgrGC() { + if err := d.handleSnapMgrGC(); err != nil { + log.Errorf("handle snap GC failed store_id %d, err %s", d.storeState.id, err) + } + d.ticker.scheduleStore(StoreTickSnapGC) +} diff --git a/kv/raftstore/ticker.go b/kv/raftstore/ticker.go new file mode 100644 index 00000000..2496ee3b --- /dev/null +++ b/kv/raftstore/ticker.go @@ -0,0 +1,130 @@ +package raftstore + +import ( + "time" + + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/kv/raftstore/message" +) + +type ticker struct { + regionID uint64 + tick int64 + schedules []tickSchedule +} + +type tickSchedule struct { + runAt int64 + interval int64 +} + +func newTicker(regionID uint64, cfg *config.Config) *ticker { + baseInterval := cfg.RaftBaseTickInterval + t := &ticker{ + regionID: regionID, + schedules: make([]tickSchedule, 6), + } + t.schedules[int(PeerTickRaft)].interval = 1 + t.schedules[int(PeerTickRaftLogGC)].interval = int64(cfg.RaftLogGCTickInterval / baseInterval) + t.schedules[int(PeerTickSplitRegionCheck)].interval = int64(cfg.SplitRegionCheckTickInterval / baseInterval) + t.schedules[int(PeerTickSchedulerHeartbeat)].interval = int64(cfg.SchedulerHeartbeatTickInterval / baseInterval) + return t +} + +const SnapMgrGcTickInterval = 1 * time.Minute + +func newStoreTicker(cfg *config.Config) *ticker { + baseInterval := cfg.RaftBaseTickInterval + t := &ticker{ + schedules: make([]tickSchedule, 4), + } + t.schedules[int(StoreTickSchedulerStoreHeartbeat)].interval = int64(cfg.SchedulerStoreHeartbeatTickInterval / baseInterval) + t.schedules[int(StoreTickSnapGC)].interval = int64(SnapMgrGcTickInterval / baseInterval) + return t +} + +// tickClock should be called when peerMsgHandler received tick message. +func (t *ticker) tickClock() { + t.tick++ +} + +// schedule arrange the next run for the PeerTick. +func (t *ticker) schedule(tp PeerTick) { + sched := &t.schedules[int(tp)] + if sched.interval <= 0 { + sched.runAt = -1 + return + } + sched.runAt = t.tick + sched.interval +} + +// isOnTick checks if the PeerTick should run. +func (t *ticker) isOnTick(tp PeerTick) bool { + sched := &t.schedules[int(tp)] + return sched.runAt == t.tick +} + +func (t *ticker) isOnStoreTick(tp StoreTick) bool { + sched := &t.schedules[int(tp)] + return sched.runAt == t.tick +} + +func (t *ticker) scheduleStore(tp StoreTick) { + sched := &t.schedules[int(tp)] + if sched.interval <= 0 { + sched.runAt = -1 + return + } + sched.runAt = t.tick + sched.interval +} + +type tickDriver struct { + baseTickInterval time.Duration + newRegionCh chan uint64 + regions map[uint64]struct{} + router *router + storeTicker *ticker +} + +func newTickDriver(baseTickInterval time.Duration, router *router, storeTicker *ticker) *tickDriver { + return &tickDriver{ + baseTickInterval: baseTickInterval, + newRegionCh: make(chan uint64), + regions: make(map[uint64]struct{}), + router: router, + storeTicker: storeTicker, + } +} + +func (r *tickDriver) run() { + timer := time.Tick(r.baseTickInterval) + for { + select { + case <-timer: + for regionID := range r.regions { + if r.router.send(regionID, message.NewPeerMsg(message.MsgTypeTick, regionID, nil)) != nil { + delete(r.regions, regionID) + } + } + r.tickStore() + case regionID, ok := <-r.newRegionCh: + if !ok { + return + } + r.regions[regionID] = struct{}{} + } + } +} + +func (r *tickDriver) stop() { + close(r.newRegionCh) +} + +func (r *tickDriver) tickStore() { + r.storeTicker.tickClock() + for i := range r.storeTicker.schedules { + if r.storeTicker.isOnStoreTick(StoreTick(i)) { + r.router.sendStore(message.NewMsg(message.MsgTypeStoreTick, StoreTick(i))) + } + } +} diff --git a/kv/raftstore/util/error.go b/kv/raftstore/util/error.go new file mode 100644 index 00000000..0f54d63d --- /dev/null +++ b/kv/raftstore/util/error.go @@ -0,0 +1,81 @@ +package util + +import ( + "fmt" + + "github.com/pingcap-incubator/tinykv/proto/pkg/errorpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap/errors" +) + +type ErrNotLeader struct { + RegionId uint64 + Leader *metapb.Peer +} + +func (e *ErrNotLeader) Error() string { + return fmt.Sprintf("region %v is not leader", e.RegionId) +} + +type ErrRegionNotFound struct { + RegionId uint64 +} + +func (e *ErrRegionNotFound) Error() string { + return fmt.Sprintf("region %v is not found", e.RegionId) +} + +type ErrKeyNotInRegion struct { + Key []byte + Region *metapb.Region +} + +func (e *ErrKeyNotInRegion) Error() string { + return fmt.Sprintf("key %v is not in region %v", e.Key, e.Region) +} + +type ErrEpochNotMatch struct { + Message string + Regions []*metapb.Region +} + +func (e *ErrEpochNotMatch) Error() string { + return fmt.Sprintf("epoch not match, error msg %v, regions %v", e.Message, e.Regions) +} + +type ErrStaleCommand struct{} + +func (e *ErrStaleCommand) Error() string { + return fmt.Sprintf("stale command") +} + +type ErrStoreNotMatch struct { + RequestStoreId uint64 + ActualStoreId uint64 +} + +func (e *ErrStoreNotMatch) Error() string { + return fmt.Sprintf("store not match, request store id is %v, but actual store id is %v", e.RequestStoreId, e.ActualStoreId) +} + +func RaftstoreErrToPbError(e error) *errorpb.Error { + ret := new(errorpb.Error) + switch err := errors.Cause(e).(type) { + case *ErrNotLeader: + ret.NotLeader = &errorpb.NotLeader{RegionId: err.RegionId, Leader: err.Leader} + case *ErrRegionNotFound: + ret.RegionNotFound = &errorpb.RegionNotFound{RegionId: err.RegionId} + case *ErrKeyNotInRegion: + ret.KeyNotInRegion = &errorpb.KeyNotInRegion{Key: err.Key, RegionId: err.Region.Id, + StartKey: err.Region.StartKey, EndKey: err.Region.EndKey} + case *ErrEpochNotMatch: + ret.EpochNotMatch = &errorpb.EpochNotMatch{CurrentRegions: err.Regions} + case *ErrStaleCommand: + ret.StaleCommand = &errorpb.StaleCommand{} + case *ErrStoreNotMatch: + ret.StoreNotMatch = &errorpb.StoreNotMatch{RequestStoreId: err.RequestStoreId, ActualStoreId: err.ActualStoreId} + default: + ret.Message = e.Error() + } + return ret +} diff --git a/kv/raftstore/util/error_test.go b/kv/raftstore/util/error_test.go new file mode 100644 index 00000000..517d68b0 --- /dev/null +++ b/kv/raftstore/util/error_test.go @@ -0,0 +1,47 @@ +package util + +import ( + "testing" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRaftstoreErrToPbError(t *testing.T) { + regionId := uint64(1) + notLeader := &ErrNotLeader{RegionId: regionId, Leader: nil} + pbErr := RaftstoreErrToPbError(notLeader) + require.NotNil(t, pbErr.NotLeader) + assert.Equal(t, pbErr.NotLeader.RegionId, regionId) + + regionNotFound := &ErrRegionNotFound{RegionId: regionId} + pbErr = RaftstoreErrToPbError(regionNotFound) + require.NotNil(t, pbErr.RegionNotFound) + assert.Equal(t, pbErr.RegionNotFound.RegionId, regionId) + + region := &metapb.Region{Id: regionId, StartKey: []byte{0}, EndKey: []byte{1}} + + keyNotInRegion := &ErrKeyNotInRegion{Key: []byte{2}, Region: region} + pbErr = RaftstoreErrToPbError(keyNotInRegion) + require.NotNil(t, pbErr.KeyNotInRegion) + assert.Equal(t, pbErr.KeyNotInRegion.StartKey, []byte{0}) + assert.Equal(t, pbErr.KeyNotInRegion.EndKey, []byte{1}) + assert.Equal(t, pbErr.KeyNotInRegion.Key, []byte{2}) + + epochNotMatch := &ErrEpochNotMatch{Regions: []*metapb.Region{region}} + pbErr = RaftstoreErrToPbError(epochNotMatch) + require.NotNil(t, pbErr.EpochNotMatch) + assert.Equal(t, pbErr.EpochNotMatch.CurrentRegions, []*metapb.Region{region}) + + staleCommand := &ErrStaleCommand{} + pbErr = RaftstoreErrToPbError(staleCommand) + require.NotNil(t, pbErr.StaleCommand) + + requestStoreId, actualStoreId := uint64(1), uint64(2) + storeNotMatch := &ErrStoreNotMatch{RequestStoreId: requestStoreId, ActualStoreId: actualStoreId} + pbErr = RaftstoreErrToPbError(storeNotMatch) + require.NotNil(t, pbErr.StoreNotMatch) + assert.Equal(t, pbErr.StoreNotMatch.RequestStoreId, requestStoreId) + assert.Equal(t, pbErr.StoreNotMatch.ActualStoreId, actualStoreId) +} diff --git a/kv/raftstore/util/test_util.go b/kv/raftstore/util/test_util.go new file mode 100644 index 00000000..db3d472e --- /dev/null +++ b/kv/raftstore/util/test_util.go @@ -0,0 +1,38 @@ +package util + +import ( + "io/ioutil" + + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" +) + +func NewTestEngines() *engine_util.Engines { + engines := new(engine_util.Engines) + var err error + engines.KvPath, err = ioutil.TempDir("", "tinykv_kv") + if err != nil { + panic("create kv dir failed") + } + kvOpts := badger.DefaultOptions + kvOpts.Dir = engines.KvPath + kvOpts.ValueDir = engines.KvPath + kvOpts.ValueThreshold = 256 + engines.Kv, err = badger.Open(kvOpts) + if err != nil { + panic("open kv db failed") + } + engines.RaftPath, err = ioutil.TempDir("", "tinykv_raft") + if err != nil { + panic("create raft dir failed") + } + raftOpts := badger.DefaultOptions + raftOpts.Dir = engines.RaftPath + raftOpts.ValueDir = engines.RaftPath + raftOpts.ValueThreshold = 256 + engines.Raft, err = badger.Open(raftOpts) + if err != nil { + panic("open raft db failed") + } + return engines +} diff --git a/kv/raftstore/util/util.go b/kv/raftstore/util/util.go new file mode 100644 index 00000000..95b99117 --- /dev/null +++ b/kv/raftstore/util/util.go @@ -0,0 +1,207 @@ +package util + +import ( + "bytes" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/pingcap-incubator/tinykv/kv/raftstore/meta" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_cmdpb" + "github.com/pingcap/errors" +) + +const RaftInvalidIndex uint64 = 0 +const InvalidID uint64 = 0 + +/// `is_initial_msg` checks whether the `msg` can be used to initialize a new peer or not. +// There could be two cases: +// 1. Target peer already exists but has not established communication with leader yet +// 2. Target peer is added newly due to member change or region split, but it's not +// created yet +// For both cases the region start key and end key are attached in RequestVote and +// Heartbeat message for the store of that peer to check whether to create a new peer +// when receiving these messages, or just to wait for a pending region split to perform +// later. +func IsInitialMsg(msg *eraftpb.Message) bool { + return msg.MsgType == eraftpb.MessageType_MsgRequestVote || + // the peer has not been known to this leader, it may exist or not. + (msg.MsgType == eraftpb.MessageType_MsgHeartbeat && msg.Commit == RaftInvalidIndex) +} + +/// Check if key in region range [`start_key`, `end_key`). +func CheckKeyInRegion(key []byte, region *metapb.Region) error { + if bytes.Compare(key, region.StartKey) >= 0 && (len(region.EndKey) == 0 || bytes.Compare(key, region.EndKey) < 0) { + return nil + } else { + return &ErrKeyNotInRegion{Key: key, Region: region} + } +} + +/// Check if key in region range (`start_key`, `end_key`). +func CheckKeyInRegionExclusive(key []byte, region *metapb.Region) error { + if bytes.Compare(region.StartKey, key) < 0 && (len(region.EndKey) == 0 || bytes.Compare(key, region.EndKey) < 0) { + return nil + } else { + return &ErrKeyNotInRegion{Key: key, Region: region} + } +} + +/// Check if key in region range [`start_key`, `end_key`]. +func CheckKeyInRegionInclusive(key []byte, region *metapb.Region) error { + if bytes.Compare(key, region.StartKey) >= 0 && (len(region.EndKey) == 0 || bytes.Compare(key, region.EndKey) <= 0) { + return nil + } else { + return &ErrKeyNotInRegion{Key: key, Region: region} + } +} + +/// check whether epoch is staler than check_epoch. +func IsEpochStale(epoch *metapb.RegionEpoch, checkEpoch *metapb.RegionEpoch) bool { + return epoch.Version < checkEpoch.Version || epoch.ConfVer < checkEpoch.ConfVer +} + +func IsVoteMessage(msg *eraftpb.Message) bool { + tp := msg.GetMsgType() + return tp == eraftpb.MessageType_MsgRequestVote +} + +/// `is_first_vote_msg` checks `msg` is the first vote message or not. It's used for +/// when the message is received but there is no such region in `Store::region_peers` and the +/// region overlaps with others. In this case we should put `msg` into `pending_votes` instead of +/// create the peer. +func IsFirstVoteMessage(msg *eraftpb.Message) bool { + return IsVoteMessage(msg) && msg.Term == meta.RaftInitLogTerm+1 +} + +func CheckRegionEpoch(req *raft_cmdpb.RaftCmdRequest, region *metapb.Region, includeRegion bool) error { + checkVer, checkConfVer := false, false + if req.AdminRequest == nil { + checkVer = true + } else { + switch req.AdminRequest.CmdType { + case raft_cmdpb.AdminCmdType_CompactLog, raft_cmdpb.AdminCmdType_InvalidAdmin: + case raft_cmdpb.AdminCmdType_ChangePeer: + checkConfVer = true + case raft_cmdpb.AdminCmdType_Split, raft_cmdpb.AdminCmdType_TransferLeader: + checkVer = true + checkConfVer = true + } + } + + if !checkVer && !checkConfVer { + return nil + } + + if req.Header == nil { + return fmt.Errorf("missing header!") + } + + if req.Header.RegionEpoch == nil { + return fmt.Errorf("missing epoch!") + } + + fromEpoch := req.Header.RegionEpoch + currentEpoch := region.RegionEpoch + + // We must check epochs strictly to avoid key not in region error. + // + // A 3 nodes TiKV cluster with merge enabled, after commit merge, TiKV A + // tells TiDB with a epoch not match error contains the latest target Region + // info, TiDB updates its region cache and sends requests to TiKV B, + // and TiKV B has not applied commit merge yet, since the region epoch in + // request is higher than TiKV B, the request must be denied due to epoch + // not match, so it does not read on a stale snapshot, thus avoid the + // KeyNotInRegion error. + if (checkConfVer && fromEpoch.ConfVer != currentEpoch.ConfVer) || + (checkVer && fromEpoch.Version != currentEpoch.Version) { + log.Debugf("epoch not match, region id %v, from epoch %v, current epoch %v", + region.Id, fromEpoch, currentEpoch) + + regions := []*metapb.Region{} + if includeRegion { + regions = []*metapb.Region{region} + } + return &ErrEpochNotMatch{Message: fmt.Sprintf("current epoch of region %v is %v, but you sent %v", + region.Id, currentEpoch, fromEpoch), Regions: regions} + } + + return nil +} + +func FindPeer(region *metapb.Region, storeID uint64) *metapb.Peer { + for _, peer := range region.Peers { + if peer.StoreId == storeID { + return peer + } + } + return nil +} + +func RemovePeer(region *metapb.Region, storeID uint64) *metapb.Peer { + for i, peer := range region.Peers { + if peer.StoreId == storeID { + region.Peers = append(region.Peers[:i], region.Peers[i+1:]...) + return peer + } + } + return nil +} + +func ConfStateFromRegion(region *metapb.Region) (confState eraftpb.ConfState) { + for _, p := range region.Peers { + confState.Nodes = append(confState.Nodes, p.GetId()) + } + return +} + +func CheckStoreID(req *raft_cmdpb.RaftCmdRequest, storeID uint64) error { + peer := req.Header.Peer + if peer.StoreId == storeID { + return nil + } + return errors.Errorf("store not match %d %d", peer.StoreId, storeID) +} + +func CheckTerm(req *raft_cmdpb.RaftCmdRequest, term uint64) error { + header := req.Header + if header.Term == 0 || term <= header.Term+1 { + return nil + } + // If header's term is 2 verions behind current term, + // leadership may have been changed away. + return &ErrStaleCommand{} +} + +func CheckPeerID(req *raft_cmdpb.RaftCmdRequest, peerID uint64) error { + peer := req.Header.Peer + if peer.Id == peerID { + return nil + } + return errors.Errorf("mismatch peer id %d != %d", peer.Id, peerID) +} + +func CloneMsg(origin, cloned proto.Message) error { + data, err := proto.Marshal(origin) + if err != nil { + return err + } + return proto.Unmarshal(data, cloned) +} + +func SafeCopy(b []byte) []byte { + return append([]byte{}, b...) +} + +func PeerEqual(l, r *metapb.Peer) bool { + return l.Id == r.Id && l.StoreId == r.StoreId +} + +func RegionEqual(l, r *metapb.Region) bool { + if l == nil || r == nil { + return false + } + return l.Id == r.Id && l.RegionEpoch.Version == r.RegionEpoch.Version && l.RegionEpoch.ConfVer == r.RegionEpoch.ConfVer +} diff --git a/kv/raftstore/util/util_test.go b/kv/raftstore/util/util_test.go new file mode 100644 index 00000000..1cbee8e7 --- /dev/null +++ b/kv/raftstore/util/util_test.go @@ -0,0 +1,193 @@ +package util + +import ( + "testing" + + "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_cmdpb" + "github.com/stretchr/testify/assert" +) + +func TestCheckKeyInRegion(t *testing.T) { + type Case struct { + Key []byte + StartKey []byte + EndKey []byte + IsInRegion bool + Inclusive bool + Exclusive bool + } + test_cases := []Case{ + {Key: []byte{}, StartKey: []byte{}, EndKey: []byte{}, IsInRegion: true, Inclusive: true, Exclusive: false}, + {Key: []byte{}, StartKey: []byte{}, EndKey: []byte{6}, IsInRegion: true, Inclusive: true, Exclusive: false}, + {Key: []byte{}, StartKey: []byte{3}, EndKey: []byte{6}, IsInRegion: false, Inclusive: false, Exclusive: false}, + {Key: []byte{4}, StartKey: []byte{3}, EndKey: []byte{6}, IsInRegion: true, Inclusive: true, Exclusive: true}, + {Key: []byte{4}, StartKey: []byte{3}, EndKey: []byte{}, IsInRegion: true, Inclusive: true, Exclusive: true}, + {Key: []byte{3}, StartKey: []byte{3}, EndKey: []byte{}, IsInRegion: true, Inclusive: true, Exclusive: false}, + {Key: []byte{2}, StartKey: []byte{3}, EndKey: []byte{6}, IsInRegion: false, Inclusive: false, Exclusive: false}, + {Key: []byte{}, StartKey: []byte{3}, EndKey: []byte{6}, IsInRegion: false, Inclusive: false, Exclusive: false}, + {Key: []byte{}, StartKey: []byte{3}, EndKey: []byte{}, IsInRegion: false, Inclusive: false, Exclusive: false}, + {Key: []byte{6}, StartKey: []byte{3}, EndKey: []byte{6}, IsInRegion: false, Inclusive: true, Exclusive: false}, + } + for _, c := range test_cases { + region := new(metapb.Region) + region.StartKey = c.StartKey + region.EndKey = c.EndKey + result := CheckKeyInRegion(c.Key, region) + assert.Equal(t, result == nil, c.IsInRegion) + result = CheckKeyInRegionInclusive(c.Key, region) + assert.Equal(t, result == nil, c.Inclusive) + result = CheckKeyInRegionExclusive(c.Key, region) + assert.Equal(t, result == nil, c.Exclusive) + } +} + +func TestIsInitialMsg(t *testing.T) { + type MsgInfo struct { + MessageType eraftpb.MessageType + Commit uint64 + IsInitialMsg bool + } + tbl := []MsgInfo{ + {MessageType: eraftpb.MessageType_MsgRequestVote, Commit: RaftInvalidIndex, IsInitialMsg: true}, + {MessageType: eraftpb.MessageType_MsgHeartbeat, Commit: RaftInvalidIndex, IsInitialMsg: true}, + {MessageType: eraftpb.MessageType_MsgHeartbeat, Commit: 100, IsInitialMsg: false}, + {MessageType: eraftpb.MessageType_MsgAppend, Commit: 100, IsInitialMsg: false}, + } + for _, m := range tbl { + msg := new(eraftpb.Message) + msg.MsgType = m.MessageType + msg.Commit = m.Commit + assert.Equal(t, IsInitialMsg(msg), m.IsInitialMsg) + } +} + +func TestEpochStale(t *testing.T) { + epoch := new(metapb.RegionEpoch) + epoch.Version = 10 + epoch.ConfVer = 10 + + type Ep struct { + Version uint64 + ConfVer uint64 + IsStale bool + } + tbl := []Ep{ + {Version: 11, ConfVer: 10, IsStale: true}, + {Version: 10, ConfVer: 11, IsStale: true}, + {Version: 10, ConfVer: 10, IsStale: false}, + {Version: 10, ConfVer: 9, IsStale: false}, + } + for _, e := range tbl { + checkEpoch := new(metapb.RegionEpoch) + checkEpoch.Version = e.Version + checkEpoch.ConfVer = e.ConfVer + assert.Equal(t, IsEpochStale(epoch, checkEpoch), e.IsStale) + } +} + +func TestCheckRegionEpoch(t *testing.T) { + epoch := new(metapb.RegionEpoch) + epoch.ConfVer = 2 + epoch.Version = 2 + region := new(metapb.Region) + region.RegionEpoch = epoch + + // Epoch is required for most requests even if it's empty. + emptyReq := new(raft_cmdpb.RaftCmdRequest) + assert.NotNil(t, CheckRegionEpoch(emptyReq, region, false)) + + // These admin commands do not require epoch. + tys := []raft_cmdpb.AdminCmdType{ + raft_cmdpb.AdminCmdType_CompactLog, + raft_cmdpb.AdminCmdType_InvalidAdmin, + } + for _, ty := range tys { + admin := new(raft_cmdpb.AdminRequest) + admin.CmdType = ty + req := new(raft_cmdpb.RaftCmdRequest) + req.AdminRequest = admin + + // It is Okay if req does not have region epoch. + assert.Nil(t, CheckRegionEpoch(req, region, false)) + + req.Header = new(raft_cmdpb.RaftRequestHeader) + req.Header.RegionEpoch = epoch + assert.Nil(t, CheckRegionEpoch(req, region, true)) + assert.Nil(t, CheckRegionEpoch(req, region, false)) + } + + // These admin commands requires epoch.version. + tys = []raft_cmdpb.AdminCmdType{ + raft_cmdpb.AdminCmdType_Split, + raft_cmdpb.AdminCmdType_TransferLeader, + } + for _, ty := range tys { + admin := new(raft_cmdpb.AdminRequest) + admin.CmdType = ty + req := new(raft_cmdpb.RaftCmdRequest) + req.AdminRequest = admin + + // Error if req does not have region epoch. + assert.NotNil(t, CheckRegionEpoch(req, region, false)) + + staleVersionEpoch := *epoch + staleVersionEpoch.Version = 1 + staleRegion := new(metapb.Region) + staleVersionEpochCloned := staleVersionEpoch + staleRegion.RegionEpoch = &staleVersionEpochCloned + staleVersionEpochCloned2 := staleVersionEpoch + req.Header = new(raft_cmdpb.RaftRequestHeader) + req.Header.RegionEpoch = &staleVersionEpochCloned2 + assert.Nil(t, CheckRegionEpoch(req, staleRegion, false)) + + latestVersionEpoch := *epoch + latestVersionEpoch.Version = 3 + + for _, e := range []metapb.RegionEpoch{staleVersionEpoch, latestVersionEpoch} { + eCloned := e + req.Header.RegionEpoch = &eCloned + assert.NotNil(t, CheckRegionEpoch(req, region, false)) + assert.NotNil(t, CheckRegionEpoch(req, region, true)) + } + } + + // These admin commands requires epoch.conf_version. + for _, ty := range []raft_cmdpb.AdminCmdType{ + raft_cmdpb.AdminCmdType_Split, + raft_cmdpb.AdminCmdType_ChangePeer, + raft_cmdpb.AdminCmdType_TransferLeader, + } { + admin := new(raft_cmdpb.AdminRequest) + admin.CmdType = ty + req := new(raft_cmdpb.RaftCmdRequest) + req.AdminRequest = admin + req.Header = new(raft_cmdpb.RaftRequestHeader) + + // Error if req does not have region epoch. + assert.NotNil(t, CheckRegionEpoch(req, region, false)) + + staleConfEpoch := cloneEpoch(epoch) + staleConfEpoch.ConfVer = 1 + staleRegion := new(metapb.Region) + staleRegion.RegionEpoch = cloneEpoch(staleConfEpoch) + req.Header.RegionEpoch = cloneEpoch(staleConfEpoch) + assert.Nil(t, CheckRegionEpoch(req, staleRegion, false)) + + latestConfEpoch := cloneEpoch(epoch) + latestConfEpoch.ConfVer = 3 + for _, e := range []*metapb.RegionEpoch{staleConfEpoch, latestConfEpoch} { + req.Header.RegionEpoch = cloneEpoch(e) + assert.NotNil(t, CheckRegionEpoch(req, region, false)) + assert.NotNil(t, CheckRegionEpoch(req, region, true)) + } + } +} + +func cloneEpoch(epoch *metapb.RegionEpoch) *metapb.RegionEpoch { + return &metapb.RegionEpoch{ + ConfVer: epoch.ConfVer, + Version: epoch.Version, + } +} diff --git a/kv/server/server.go b/kv/server/server.go new file mode 100644 index 00000000..2b5daf03 --- /dev/null +++ b/kv/server/server.go @@ -0,0 +1,110 @@ +package server + +import ( + "context" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/tsoutil" + + "github.com/pingcap-incubator/tinykv/kv/storage" + "github.com/pingcap-incubator/tinykv/kv/storage/raft_storage" + "github.com/pingcap-incubator/tinykv/kv/transaction/latches" + "github.com/pingcap-incubator/tinykv/proto/pkg/coprocessor" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/tinykvpb" +) + +var _ tinykvpb.TinyKvServer = new(Server) + +// Server is a TinyKV server, it 'faces outwards', sending and receiving messages from clients such as TinySQL. +type Server struct { + storage storage.Storage + // used in 4A/4B + Latches *latches.Latches +} + +func NewServer(storage storage.Storage) *Server { + return &Server{ + storage: storage, + Latches: latches.NewLatches(), + } +} + +// The below functions are Server's gRPC API (implements TinyKvServer). + +// Raw API. +func (server *Server) RawGet(_ context.Context, req *kvrpcpb.RawGetRequest) (*kvrpcpb.RawGetResponse, error) { + // Your code here (1). + return nil, nil +} + +func (server *Server) RawPut(_ context.Context, req *kvrpcpb.RawPutRequest) (*kvrpcpb.RawPutResponse, error) { + // Your code here (1). + return nil, nil +} + +func (server *Server) RawDelete(_ context.Context, req *kvrpcpb.RawDeleteRequest) (*kvrpcpb.RawDeleteResponse, error) { + // Your code here (1). + return nil, nil +} + +func (server *Server) RawScan(_ context.Context, req *kvrpcpb.RawScanRequest) (*kvrpcpb.RawScanResponse, error) { + // Your code here (1). + return nil, nil +} + +// Raft commands (tinykv <-> tinykv) +// Only used for RaftStorage, so trivially forward it. +func (server *Server) Raft(stream tinykvpb.TinyKv_RaftServer) error { + return server.storage.(*raft_storage.RaftStorage).Raft(stream) +} + +// Snapshot stream (tinykv <-> tinykv) +// Only used for RaftStorage, so trivially forward it. +func (server *Server) Snapshot(stream tinykvpb.TinyKv_SnapshotServer) error { + return server.storage.(*raft_storage.RaftStorage).Snapshot(stream) +} + +// Transactional API. +func (server *Server) KvGet(_ context.Context, req *kvrpcpb.GetRequest) (*kvrpcpb.GetResponse, error) { + // Your code here (4B). + return nil, nil +} + +func (server *Server) KvPrewrite(_ context.Context, req *kvrpcpb.PrewriteRequest) (*kvrpcpb.PrewriteResponse, error) { + // Your code here (4B). + return nil, nil +} + +func (server *Server) KvCommit(_ context.Context, req *kvrpcpb.CommitRequest) (*kvrpcpb.CommitResponse, error) { + // Your code here (4B). + return nil, nil +} + +func (server *Server) KvScan(_ context.Context, req *kvrpcpb.ScanRequest) (*kvrpcpb.ScanResponse, error) { + // Your code here (4C). + return nil, nil +} + +func (server *Server) KvCheckTxnStatus(_ context.Context, req *kvrpcpb.CheckTxnStatusRequest) (*kvrpcpb.CheckTxnStatusResponse, error) { + // Your code here (4C). + return nil, nil +} + +func (server *Server) KvBatchRollback(_ context.Context, req *kvrpcpb.BatchRollbackRequest) (*kvrpcpb.BatchRollbackResponse, error) { + // Your code here (4C). + return nil, nil +} + +func (server *Server) KvResolveLock(_ context.Context, req *kvrpcpb.ResolveLockRequest) (*kvrpcpb.ResolveLockResponse, error) { + // Your code here (4C). + return nil, nil +} + +// SQL push down commands. +func (server *Server) Coprocessor(_ context.Context, req *coprocessor.Request) (*coprocessor.Response, error) { + return &coprocessor.Response{}, nil +} + +// PhysicalTime returns the physical time part of the timestamp. +func PhysicalTime(ts uint64) uint64 { + return ts >> tsoutil.PhysicalShiftBits +} diff --git a/kv/server/server_test.go b/kv/server/server_test.go new file mode 100644 index 00000000..ad308a53 --- /dev/null +++ b/kv/server/server_test.go @@ -0,0 +1,326 @@ +package server + +import ( + "os" + "testing" + + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/kv/storage" + "github.com/pingcap-incubator/tinykv/kv/storage/standalone_storage" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/stretchr/testify/assert" +) + +func Set(s *standalone_storage.StandAloneStorage, cf string, key []byte, value []byte) error { + return s.Write(nil, []storage.Modify{ + { + Data: storage.Put{ + Cf: cf, + Key: key, + Value: value, + }, + }, + }) +} + +func Get(s *standalone_storage.StandAloneStorage, cf string, key []byte) ([]byte, error) { + reader, err := s.Reader(nil) + if err != nil { + return nil, err + } + return reader.GetCF(cf, key) +} + +func Iter(s *standalone_storage.StandAloneStorage, cf string) (engine_util.DBIterator, error) { + reader, err := s.Reader(nil) + if err != nil { + return nil, err + } + return reader.IterCF(cf), nil +} + +func cleanUpTestData(conf *config.Config) error { + if conf != nil { + return os.RemoveAll(conf.DBPath) + } + return nil +} + +func TestRawGet1(t *testing.T) { + conf := config.NewTestConfig() + s := standalone_storage.NewStandAloneStorage(conf) + server := NewServer(s) + defer cleanUpTestData(conf) + + cf := engine_util.CfDefault + Set(s, cf, []byte{99}, []byte{42}) + + req := &kvrpcpb.RawGetRequest{ + Key: []byte{99}, + Cf: cf, + } + resp, err := server.RawGet(nil, req) + assert.Nil(t, err) + assert.Equal(t, []byte{42}, resp.Value) +} + +func TestRawGetNotFound1(t *testing.T) { + conf := config.NewTestConfig() + s := standalone_storage.NewStandAloneStorage(conf) + server := NewServer(s) + defer cleanUpTestData(conf) + + cf := engine_util.CfDefault + req := &kvrpcpb.RawGetRequest{ + Key: []byte{99}, + Cf: cf, + } + resp, err := server.RawGet(nil, req) + assert.Nil(t, err) + assert.True(t, resp.NotFound) +} + +func TestRawPut1(t *testing.T) { + conf := config.NewTestConfig() + s := standalone_storage.NewStandAloneStorage(conf) + server := NewServer(s) + defer cleanUpTestData(conf) + + cf := engine_util.CfDefault + req := &kvrpcpb.RawPutRequest{ + Key: []byte{99}, + Value: []byte{42}, + Cf: cf, + } + + _, err := server.RawPut(nil, req) + + got, err := Get(s, cf, []byte{99}) + assert.Nil(t, err) + assert.Equal(t, []byte{42}, got) +} + +func TestRawGetAfterRawPut1(t *testing.T) { + conf := config.NewTestConfig() + s := standalone_storage.NewStandAloneStorage(conf) + server := NewServer(s) + defer cleanUpTestData(conf) + + put1 := &kvrpcpb.RawPutRequest{ + Key: []byte{99}, + Value: []byte{42}, + Cf: engine_util.CfDefault, + } + _, err := server.RawPut(nil, put1) + assert.Nil(t, err) + + put2 := &kvrpcpb.RawPutRequest{ + Key: []byte{99}, + Value: []byte{44}, + Cf: engine_util.CfWrite, + } + _, err = server.RawPut(nil, put2) + assert.Nil(t, err) + + get1 := &kvrpcpb.RawGetRequest{ + Key: []byte{99}, + Cf: engine_util.CfDefault, + } + resp, err := server.RawGet(nil, get1) + assert.Nil(t, err) + assert.Equal(t, []byte{42}, resp.Value) + + get2 := &kvrpcpb.RawGetRequest{ + Key: []byte{99}, + Cf: engine_util.CfWrite, + } + resp, err = server.RawGet(nil, get2) + assert.Nil(t, err) + assert.Equal(t, []byte{44}, resp.Value) +} + +func TestRawGetAfterRawDelete1(t *testing.T) { + conf := config.NewTestConfig() + s := standalone_storage.NewStandAloneStorage(conf) + server := NewServer(s) + defer cleanUpTestData(conf) + + cf := engine_util.CfDefault + assert.Nil(t, Set(s, cf, []byte{99}, []byte{42})) + + delete := &kvrpcpb.RawDeleteRequest{ + Key: []byte{99}, + Cf: cf, + } + get := &kvrpcpb.RawGetRequest{ + Key: []byte{99}, + Cf: cf, + } + + _, err := server.RawDelete(nil, delete) + assert.Nil(t, err) + + resp, err := server.RawGet(nil, get) + assert.Nil(t, err) + assert.True(t, resp.NotFound) +} + +func TestRawDelete1(t *testing.T) { + conf := config.NewTestConfig() + s := standalone_storage.NewStandAloneStorage(conf) + server := NewServer(s) + defer cleanUpTestData(conf) + + cf := engine_util.CfDefault + + req := &kvrpcpb.RawDeleteRequest{ + Key: []byte{99}, + Cf: cf, + } + + _, err := server.RawDelete(nil, req) + assert.Nil(t, err) + + _, err = Get(s, cf, []byte{99}) + assert.Equal(t, err, badger.ErrKeyNotFound) +} + +func TestRawScan1(t *testing.T) { + conf := config.NewTestConfig() + s := standalone_storage.NewStandAloneStorage(conf) + server := NewServer(s) + defer cleanUpTestData(conf) + + cf := engine_util.CfDefault + + Set(s, cf, []byte{1}, []byte{233, 1}) + Set(s, cf, []byte{2}, []byte{233, 2}) + Set(s, cf, []byte{3}, []byte{233, 3}) + Set(s, cf, []byte{4}, []byte{233, 4}) + Set(s, cf, []byte{5}, []byte{233, 5}) + + req := &kvrpcpb.RawScanRequest{ + StartKey: []byte{1}, + Limit: 3, + Cf: cf, + } + + resp, err := server.RawScan(nil, req) + assert.Nil(t, err) + + assert.Equal(t, 3, len(resp.Kvs)) + expectedKeys := [][]byte{{1}, {2}, {3}} + for i, kv := range resp.Kvs { + assert.Equal(t, expectedKeys[i], kv.Key) + assert.Equal(t, append([]byte{233}, expectedKeys[i]...), kv.Value) + } +} + +func TestRawScanAfterRawPut1(t *testing.T) { + conf := config.NewTestConfig() + s := standalone_storage.NewStandAloneStorage(conf) + server := NewServer(s) + defer cleanUpTestData(conf) + + cf := engine_util.CfDefault + assert.Nil(t, Set(s, cf, []byte{1}, []byte{233, 1})) + assert.Nil(t, Set(s, cf, []byte{2}, []byte{233, 2})) + assert.Nil(t, Set(s, cf, []byte{3}, []byte{233, 3})) + assert.Nil(t, Set(s, cf, []byte{4}, []byte{233, 4})) + + put := &kvrpcpb.RawPutRequest{ + Key: []byte{5}, + Value: []byte{233, 5}, + Cf: cf, + } + + scan := &kvrpcpb.RawScanRequest{ + StartKey: []byte{1}, + Limit: 10, + Cf: cf, + } + + expectedKeys := [][]byte{{1}, {2}, {3}, {4}, {5}} + + _, err := server.RawPut(nil, put) + assert.Nil(t, err) + + resp, err := server.RawScan(nil, scan) + assert.Nil(t, err) + assert.Equal(t, len(resp.Kvs), len(expectedKeys)) + for i, kv := range resp.Kvs { + assert.Equal(t, expectedKeys[i], kv.Key) + assert.Equal(t, append([]byte{233}, expectedKeys[i]...), kv.Value) + } +} + +func TestRawScanAfterRawDelete1(t *testing.T) { + conf := config.NewTestConfig() + s := standalone_storage.NewStandAloneStorage(conf) + server := NewServer(s) + defer cleanUpTestData(conf) + + cf := engine_util.CfDefault + assert.Nil(t, Set(s, cf, []byte{1}, []byte{233, 1})) + assert.Nil(t, Set(s, cf, []byte{2}, []byte{233, 2})) + assert.Nil(t, Set(s, cf, []byte{3}, []byte{233, 3})) + assert.Nil(t, Set(s, cf, []byte{4}, []byte{233, 4})) + + delete := &kvrpcpb.RawDeleteRequest{ + Key: []byte{3}, + Cf: cf, + } + + scan := &kvrpcpb.RawScanRequest{ + StartKey: []byte{1}, + Limit: 10, + Cf: cf, + } + + expectedKeys := [][]byte{{1}, {2}, {4}} + + _, err := server.RawDelete(nil, delete) + assert.Nil(t, err) + + resp, err := server.RawScan(nil, scan) + assert.Nil(t, err) + assert.Equal(t, len(resp.Kvs), len(expectedKeys)) + for i, kv := range resp.Kvs { + assert.Equal(t, expectedKeys[i], kv.Key) + assert.Equal(t, append([]byte{233}, expectedKeys[i]...), kv.Value) + } +} + +func TestIterWithRawDelete1(t *testing.T) { + conf := config.NewTestConfig() + s := standalone_storage.NewStandAloneStorage(conf) + server := NewServer(s) + defer cleanUpTestData(conf) + + cf := engine_util.CfDefault + assert.Nil(t, Set(s, cf, []byte{1}, []byte{233, 1})) + assert.Nil(t, Set(s, cf, []byte{2}, []byte{233, 2})) + assert.Nil(t, Set(s, cf, []byte{3}, []byte{233, 3})) + assert.Nil(t, Set(s, cf, []byte{4}, []byte{233, 4})) + + it, err := Iter(s, cf) + assert.Nil(t, err) + + delete := &kvrpcpb.RawDeleteRequest{ + Key: []byte{3}, + Cf: cf, + } + _, err = server.RawDelete(nil, delete) + assert.Nil(t, err) + + expectedKeys := [][]byte{{1}, {2}, {3}, {4}} + i := 0 + for it.Seek([]byte{1}); it.Valid(); it.Next() { + item := it.Item() + key := item.Key() + assert.Equal(t, expectedKeys[i], key) + i++ + } +} diff --git a/kv/storage/mem_storage.go b/kv/storage/mem_storage.go new file mode 100644 index 00000000..c6e56b50 --- /dev/null +++ b/kv/storage/mem_storage.go @@ -0,0 +1,242 @@ +package storage + +import ( + "bytes" + "fmt" + + "github.com/Connor1996/badger/y" + "github.com/petar/GoLLRB/llrb" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" +) + +// MemStorage is an in-memory storage engine used for testing. Data is not written to disk, nor sent to other +// nodes. It is intended for testing only. +type MemStorage struct { + CfDefault *llrb.LLRB + CfLock *llrb.LLRB + CfWrite *llrb.LLRB +} + +func NewMemStorage() *MemStorage { + return &MemStorage{ + CfDefault: llrb.New(), + CfLock: llrb.New(), + CfWrite: llrb.New(), + } +} + +func (s *MemStorage) Start() error { + return nil +} + +func (s *MemStorage) Stop() error { + return nil +} + +func (s *MemStorage) Reader(ctx *kvrpcpb.Context) (StorageReader, error) { + return &memReader{s}, nil +} + +func (s *MemStorage) Write(ctx *kvrpcpb.Context, batch []Modify) error { + for _, m := range batch { + switch data := m.Data.(type) { + case Put: + item := memItem{data.Key, data.Value, false} + switch data.Cf { + case engine_util.CfDefault: + s.CfDefault.ReplaceOrInsert(item) + case engine_util.CfLock: + s.CfLock.ReplaceOrInsert(item) + case engine_util.CfWrite: + s.CfWrite.ReplaceOrInsert(item) + } + case Delete: + item := memItem{key: data.Key} + switch data.Cf { + case engine_util.CfDefault: + s.CfDefault.Delete(item) + case engine_util.CfLock: + s.CfLock.Delete(item) + case engine_util.CfWrite: + s.CfWrite.Delete(item) + } + } + } + + return nil +} + +func (s *MemStorage) Get(cf string, key []byte) []byte { + item := memItem{key: key} + var result llrb.Item + switch cf { + case engine_util.CfDefault: + result = s.CfDefault.Get(item) + case engine_util.CfLock: + result = s.CfLock.Get(item) + case engine_util.CfWrite: + result = s.CfWrite.Get(item) + } + + if result == nil { + return nil + } + + return result.(memItem).value +} + +func (s *MemStorage) Set(cf string, key []byte, value []byte) { + item := memItem{key, value, true} + switch cf { + case engine_util.CfDefault: + s.CfDefault.ReplaceOrInsert(item) + case engine_util.CfLock: + s.CfLock.ReplaceOrInsert(item) + case engine_util.CfWrite: + s.CfWrite.ReplaceOrInsert(item) + } +} + +func (s *MemStorage) HasChanged(cf string, key []byte) bool { + item := memItem{key: key} + var result llrb.Item + switch cf { + case engine_util.CfDefault: + result = s.CfDefault.Get(item) + case engine_util.CfLock: + result = s.CfLock.Get(item) + case engine_util.CfWrite: + result = s.CfWrite.Get(item) + } + if result == nil { + return true + } + + return !result.(memItem).fresh +} + +func (s *MemStorage) Len(cf string) int { + switch cf { + case engine_util.CfDefault: + return s.CfDefault.Len() + case engine_util.CfLock: + return s.CfLock.Len() + case engine_util.CfWrite: + return s.CfWrite.Len() + } + + return -1 +} + +// memReader is a StorageReader which reads from a MemStorage. +type memReader struct { + inner *MemStorage +} + +func (mr *memReader) GetCF(cf string, key []byte) ([]byte, error) { + item := memItem{key: key} + var result llrb.Item + switch cf { + case engine_util.CfDefault: + result = mr.inner.CfDefault.Get(item) + case engine_util.CfLock: + result = mr.inner.CfLock.Get(item) + case engine_util.CfWrite: + result = mr.inner.CfWrite.Get(item) + default: + return nil, fmt.Errorf("mem-server: bad CF %s", cf) + } + + if result == nil { + return nil, nil + } + + return result.(memItem).value, nil +} + +func (mr *memReader) IterCF(cf string) engine_util.DBIterator { + var data *llrb.LLRB + switch cf { + case engine_util.CfDefault: + data = mr.inner.CfDefault + case engine_util.CfLock: + data = mr.inner.CfLock + case engine_util.CfWrite: + data = mr.inner.CfWrite + default: + return nil + } + + min := data.Min() + if min == nil { + return &memIter{data, memItem{}} + } + return &memIter{data, min.(memItem)} +} + +func (r *memReader) Close() {} + +type memIter struct { + data *llrb.LLRB + item memItem +} + +func (it *memIter) Item() engine_util.DBItem { + return it.item +} +func (it *memIter) Valid() bool { + return it.item.key != nil +} +func (it *memIter) Next() { + first := true + oldItem := it.item + it.item = memItem{} + it.data.AscendGreaterOrEqual(oldItem, func(item llrb.Item) bool { + // Skip the first item, which will be it.item + if first { + first = false + return true + } + + it.item = item.(memItem) + return false + }) +} +func (it *memIter) Seek(key []byte) { + it.item = memItem{} + it.data.AscendGreaterOrEqual(memItem{key: key}, func(item llrb.Item) bool { + it.item = item.(memItem) + + return false + }) +} + +func (it *memIter) Close() {} + +type memItem struct { + key []byte + value []byte + fresh bool +} + +func (it memItem) Key() []byte { + return it.key +} +func (it memItem) KeyCopy(dst []byte) []byte { + return y.SafeCopy(dst, it.key) +} +func (it memItem) Value() ([]byte, error) { + return it.value, nil +} +func (it memItem) ValueSize() int { + return len(it.value) +} +func (it memItem) ValueCopy(dst []byte) ([]byte, error) { + return y.SafeCopy(dst, it.value), nil +} + +func (it memItem) Less(than llrb.Item) bool { + other := than.(memItem) + return bytes.Compare(it.key, other.key) < 0 +} diff --git a/kv/storage/modify.go b/kv/storage/modify.go new file mode 100644 index 00000000..ba8ed201 --- /dev/null +++ b/kv/storage/modify.go @@ -0,0 +1,37 @@ +package storage + +// Modify is a single modification to TinyKV's underlying storage. +type Modify struct { + Data interface{} +} + +type Put struct { + Key []byte + Value []byte + Cf string +} + +type Delete struct { + Key []byte + Cf string +} + +func (m *Modify) Key() []byte { + switch m.Data.(type) { + case Put: + return m.Data.(Put).Key + case Delete: + return m.Data.(Delete).Key + } + return nil +} + +func (m *Modify) Cf() string { + switch m.Data.(type) { + case Put: + return m.Data.(Put).Cf + case Delete: + return m.Data.(Delete).Cf + } + return "" +} diff --git a/kv/storage/raft_storage/raft_client.go b/kv/storage/raft_storage/raft_client.go new file mode 100644 index 00000000..424e49dc --- /dev/null +++ b/kv/storage/raft_storage/raft_client.go @@ -0,0 +1,133 @@ +package raft_storage + +import ( + "context" + "sync" + "time" + + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/tinykvpb" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" +) + +type raftConn struct { + streamMu sync.Mutex + stream tinykvpb.TinyKv_RaftClient + ctx context.Context + cancel context.CancelFunc +} + +func newRaftConn(addr string, cfg *config.Config) (*raftConn, error) { + cc, err := grpc.Dial(addr, grpc.WithInsecure(), + grpc.WithInitialWindowSize(2*1024*1024), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 3 * time.Second, + Timeout: 60 * time.Second, + PermitWithoutStream: true, + })) + if err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(context.Background()) + stream, err := tinykvpb.NewTinyKvClient(cc).Raft(ctx) + if err != nil { + cancel() + return nil, err + } + return &raftConn{ + stream: stream, + ctx: ctx, + cancel: cancel, + }, nil +} + +func (c *raftConn) Stop() { + c.cancel() +} + +func (c *raftConn) Send(msg *raft_serverpb.RaftMessage) error { + c.streamMu.Lock() + defer c.streamMu.Unlock() + return c.stream.Send(msg) +} + +type connKey struct { + addr string + index int +} + +type RaftClient struct { + config *config.Config + sync.RWMutex + conn *raftConn + addrs map[uint64]string +} + +func newRaftClient(config *config.Config) *RaftClient { + return &RaftClient{ + config: config, + addrs: make(map[uint64]string), + } +} + +func (c *RaftClient) getConn(addr string, regionID uint64) (*raftConn, error) { + c.RLock() + if c.conn != nil { + c.RUnlock() + return c.conn, nil + } + c.RUnlock() + newConn, err := newRaftConn(addr, c.config) + if err != nil { + return nil, err + } + c.Lock() + defer c.Unlock() + if c.conn != nil { + newConn.Stop() + return c.conn, nil + } + c.conn = newConn + return newConn, nil +} + +func (c *RaftClient) Send(storeID uint64, addr string, msg *raft_serverpb.RaftMessage) error { + conn, err := c.getConn(addr, msg.GetRegionId()) + if err != nil { + return err + } + err = conn.Send(msg) + if err == nil { + return nil + } + + log.Error("raft client failed to send") + c.Lock() + defer c.Unlock() + conn.Stop() + c.conn = nil + if oldAddr, ok := c.addrs[storeID]; ok && oldAddr == addr { + delete(c.addrs, storeID) + } + return err +} + +func (c *RaftClient) GetAddr(storeID uint64) string { + c.RLock() + defer c.RUnlock() + v, _ := c.addrs[storeID] + return v +} + +func (c *RaftClient) InsertAddr(storeID uint64, addr string) { + c.Lock() + defer c.Unlock() + c.addrs[storeID] = addr +} + +func (c *RaftClient) Flush() { + // Not support BufferHint +} diff --git a/kv/storage/raft_storage/raft_server.go b/kv/storage/raft_storage/raft_server.go new file mode 100644 index 00000000..c6ab547e --- /dev/null +++ b/kv/storage/raft_storage/raft_server.go @@ -0,0 +1,222 @@ +package raft_storage + +import ( + "context" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/kv/raftstore" + "github.com/pingcap-incubator/tinykv/kv/raftstore/message" + "github.com/pingcap-incubator/tinykv/kv/raftstore/scheduler_client" + "github.com/pingcap-incubator/tinykv/kv/raftstore/snap" + "github.com/pingcap-incubator/tinykv/kv/storage" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/kv/util/worker" + "github.com/pingcap-incubator/tinykv/proto/pkg/errorpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_cmdpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/tinykvpb" + "github.com/pingcap/errors" +) + +// RaftStorage is an implementation of `Storage` (see tikv/server.go) backed by a Raft node. It is part of a Raft network. +// By using Raft, reads and writes are consistent with other nodes in the TinyKV instance. +type RaftStorage struct { + engines *engine_util.Engines + config *config.Config + + node *raftstore.Node + snapManager *snap.SnapManager + raftRouter *raftstore.RaftstoreRouter + batchSystem *raftstore.RaftBatchSystem + resolveWorker *worker.Worker + snapWorker *worker.Worker + + wg sync.WaitGroup +} + +type RegionError struct { + RequestErr *errorpb.Error +} + +func (re *RegionError) Error() string { + return re.RequestErr.String() +} + +func (rs *RaftStorage) checkResponse(resp *raft_cmdpb.RaftCmdResponse, reqCount int) error { + if resp.Header.Error != nil { + return &RegionError{RequestErr: resp.Header.Error} + } + if len(resp.Responses) != reqCount { + return errors.Errorf("responses count %d is not equal to requests count %d", + len(resp.Responses), reqCount) + } + return nil +} + +// NewRaftStorage creates a new storage engine backed by a raftstore. +func NewRaftStorage(conf *config.Config) *RaftStorage { + dbPath := conf.DBPath + kvPath := filepath.Join(dbPath, "kv") + raftPath := filepath.Join(dbPath, "raft") + snapPath := filepath.Join(dbPath, "snap") + + os.MkdirAll(kvPath, os.ModePerm) + os.MkdirAll(raftPath, os.ModePerm) + os.Mkdir(snapPath, os.ModePerm) + + raftDB := engine_util.CreateDB("raft", conf) + kvDB := engine_util.CreateDB("kv", conf) + engines := engine_util.NewEngines(kvDB, raftDB, kvPath, raftPath) + + return &RaftStorage{engines: engines, config: conf} +} + +func (rs *RaftStorage) Write(ctx *kvrpcpb.Context, batch []storage.Modify) error { + var reqs []*raft_cmdpb.Request + for _, m := range batch { + switch m.Data.(type) { + case storage.Put: + put := m.Data.(storage.Put) + reqs = append(reqs, &raft_cmdpb.Request{ + CmdType: raft_cmdpb.CmdType_Put, + Put: &raft_cmdpb.PutRequest{ + Cf: put.Cf, + Key: put.Key, + Value: put.Value, + }}) + case storage.Delete: + delete := m.Data.(storage.Delete) + reqs = append(reqs, &raft_cmdpb.Request{ + CmdType: raft_cmdpb.CmdType_Delete, + Delete: &raft_cmdpb.DeleteRequest{ + Cf: delete.Cf, + Key: delete.Key, + }}) + } + } + + header := &raft_cmdpb.RaftRequestHeader{ + RegionId: ctx.RegionId, + Peer: ctx.Peer, + RegionEpoch: ctx.RegionEpoch, + Term: ctx.Term, + } + request := &raft_cmdpb.RaftCmdRequest{ + Header: header, + Requests: reqs, + } + cb := message.NewCallback() + if err := rs.raftRouter.SendRaftCommand(request, cb); err != nil { + return err + } + + return rs.checkResponse(cb.WaitResp(), len(reqs)) +} + +func (rs *RaftStorage) Reader(ctx *kvrpcpb.Context) (storage.StorageReader, error) { + header := &raft_cmdpb.RaftRequestHeader{ + RegionId: ctx.RegionId, + Peer: ctx.Peer, + RegionEpoch: ctx.RegionEpoch, + Term: ctx.Term, + } + request := &raft_cmdpb.RaftCmdRequest{ + Header: header, + Requests: []*raft_cmdpb.Request{{ + CmdType: raft_cmdpb.CmdType_Snap, + Snap: &raft_cmdpb.SnapRequest{}, + }}, + } + cb := message.NewCallback() + if err := rs.raftRouter.SendRaftCommand(request, cb); err != nil { + return nil, err + } + + resp := cb.WaitResp() + if err := rs.checkResponse(resp, 1); err != nil { + if cb.Txn != nil { + cb.Txn.Discard() + } + return nil, err + } + if cb.Txn == nil { + panic("can not found region snap") + } + if len(resp.Responses) != 1 { + panic("wrong response count for snap cmd") + } + return NewRegionReader(cb.Txn, *resp.Responses[0].GetSnap().Region), nil +} + +func (rs *RaftStorage) Raft(stream tinykvpb.TinyKv_RaftServer) error { + for { + msg, err := stream.Recv() + if err != nil { + return err + } + rs.raftRouter.SendRaftMessage(msg) + } +} + +func (rs *RaftStorage) Snapshot(stream tinykvpb.TinyKv_SnapshotServer) error { + var err error + done := make(chan struct{}) + rs.snapWorker.Sender() <- &recvSnapTask{ + stream: stream, + callback: func(e error) { + err = e + close(done) + }, + } + <-done + return err +} + +func (rs *RaftStorage) Start() error { + cfg := rs.config + schedulerClient, err := scheduler_client.NewClient(strings.Split(cfg.SchedulerAddr, ","), "") + if err != nil { + return err + } + rs.raftRouter, rs.batchSystem = raftstore.CreateRaftBatchSystem(cfg) + + rs.resolveWorker = worker.NewWorker("resolver", &rs.wg) + resolveSender := rs.resolveWorker.Sender() + resolveRunner := newResolverRunner(schedulerClient) + rs.resolveWorker.Start(resolveRunner) + + rs.snapManager = snap.NewSnapManager(cfg.DBPath + "snap") + rs.snapWorker = worker.NewWorker("snap-worker", &rs.wg) + snapSender := rs.snapWorker.Sender() + snapRunner := newSnapRunner(rs.snapManager, rs.config, rs.raftRouter) + rs.snapWorker.Start(snapRunner) + + raftClient := newRaftClient(cfg) + trans := NewServerTransport(raftClient, snapSender, rs.raftRouter, resolveSender) + + rs.node = raftstore.NewNode(rs.batchSystem, rs.config, schedulerClient) + err = rs.node.Start(context.TODO(), rs.engines, trans, rs.snapManager) + if err != nil { + return err + } + + return nil +} + +func (rs *RaftStorage) Stop() error { + rs.snapWorker.Stop() + rs.node.Stop() + rs.resolveWorker.Stop() + rs.wg.Wait() + if err := rs.engines.Raft.Close(); err != nil { + return err + } + if err := rs.engines.Kv.Close(); err != nil { + return err + } + return nil +} diff --git a/kv/storage/raft_storage/region_reader.go b/kv/storage/raft_storage/region_reader.go new file mode 100644 index 00000000..f55bbe6e --- /dev/null +++ b/kv/storage/raft_storage/region_reader.go @@ -0,0 +1,86 @@ +package raft_storage + +import ( + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/raftstore/util" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" +) + +type RegionReader struct { + txn *badger.Txn + region *metapb.Region +} + +func NewRegionReader(txn *badger.Txn, region metapb.Region) *RegionReader { + return &RegionReader{ + txn: txn, + region: ®ion, + } +} + +func (r *RegionReader) GetCF(cf string, key []byte) ([]byte, error) { + if err := util.CheckKeyInRegion(key, r.region); err != nil { + return nil, err + } + return engine_util.GetCFFromTxn(r.txn, cf, key) +} + +func (r *RegionReader) IterCF(cf string) engine_util.DBIterator { + return NewRegionIterator(engine_util.NewCFIterator(cf, r.txn), r.region) +} + +func (r *RegionReader) Close() { + r.txn.Discard() +} + +// RegionIterator wraps a db iterator and only allow it to iterate in the region. It behaves as if underlying +// db only contains one region. +type RegionIterator struct { + iter *engine_util.BadgerIterator + region *metapb.Region +} + +func NewRegionIterator(iter *engine_util.BadgerIterator, region *metapb.Region) *RegionIterator { + return &RegionIterator{ + iter: iter, + region: region, + } +} + +func (it *RegionIterator) Item() engine_util.DBItem { + return it.iter.Item() +} + +func (it *RegionIterator) Valid() bool { + if !it.iter.Valid() || engine_util.ExceedEndKey(it.iter.Item().Key(), it.region.EndKey) { + return false + } + return true +} + +func (it *RegionIterator) ValidForPrefix(prefix []byte) bool { + if !it.iter.ValidForPrefix(prefix) || engine_util.ExceedEndKey(it.iter.Item().Key(), it.region.EndKey) { + return false + } + return true +} + +func (it *RegionIterator) Close() { + it.iter.Close() +} + +func (it *RegionIterator) Next() { + it.iter.Next() +} + +func (it *RegionIterator) Seek(key []byte) { + if err := util.CheckKeyInRegion(key, it.region); err != nil { + panic(err) + } + it.iter.Seek(key) +} + +func (it *RegionIterator) Rewind() { + it.iter.Rewind() +} diff --git a/kv/storage/raft_storage/resolver.go b/kv/storage/raft_storage/resolver.go new file mode 100644 index 00000000..c451239d --- /dev/null +++ b/kv/storage/raft_storage/resolver.go @@ -0,0 +1,67 @@ +package raft_storage + +import ( + "context" + "time" + + "github.com/pingcap-incubator/tinykv/kv/util/worker" + + "github.com/pingcap-incubator/tinykv/kv/raftstore/scheduler_client" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap/errors" +) + +// Handle will resolve t's storeID into the address of the TinyKV node which should handle t. t's callback is then +// called with that address. +func (r *resolverRunner) Handle(t worker.Task) { + data := t.(*resolveAddrTask) + data.callback(r.getAddr(data.storeID)) +} + +const storeAddressRefreshSeconds = 60 + +type storeAddr struct { + addr string + lastUpdate time.Time +} + +type resolverRunner struct { + schedulerClient scheduler_client.Client + storeAddrs map[uint64]storeAddr +} + +type resolveAddrTask struct { + storeID uint64 + callback func(addr string, err error) +} + +func newResolverRunner(schedulerClient scheduler_client.Client) *resolverRunner { + return &resolverRunner{ + schedulerClient: schedulerClient, + storeAddrs: make(map[uint64]storeAddr), + } +} + +func (r *resolverRunner) getAddr(id uint64) (string, error) { + if sa, ok := r.storeAddrs[id]; ok { + if time.Since(sa.lastUpdate).Seconds() < storeAddressRefreshSeconds { + return sa.addr, nil + } + } + store, err := r.schedulerClient.GetStore(context.TODO(), id) + if err != nil { + return "", err + } + if store.GetState() == metapb.StoreState_Tombstone { + return "", errors.Errorf("store %d has been removed", id) + } + addr := store.GetAddress() + if addr == "" { + return "", errors.Errorf("invalid empty address for store %d", id) + } + r.storeAddrs[id] = storeAddr{ + addr: addr, + lastUpdate: time.Now(), + } + return addr, nil +} diff --git a/kv/storage/raft_storage/snap_runner.go b/kv/storage/raft_storage/snap_runner.go new file mode 100644 index 00000000..9ac14374 --- /dev/null +++ b/kv/storage/raft_storage/snap_runner.go @@ -0,0 +1,182 @@ +package raft_storage + +import ( + "bytes" + "context" + "io" + "time" + + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/kv/raftstore/message" + "github.com/pingcap-incubator/tinykv/kv/raftstore/snap" + "github.com/pingcap-incubator/tinykv/kv/util/worker" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/tinykvpb" + "github.com/pingcap/errors" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" +) + +type sendSnapTask struct { + addr string + msg *raft_serverpb.RaftMessage + callback func(error) +} + +type recvSnapTask struct { + stream tinykvpb.TinyKv_SnapshotServer + callback func(error) +} + +type snapRunner struct { + config *config.Config + snapManager *snap.SnapManager + router message.RaftRouter +} + +func newSnapRunner(snapManager *snap.SnapManager, config *config.Config, router message.RaftRouter) *snapRunner { + return &snapRunner{ + config: config, + snapManager: snapManager, + router: router, + } +} + +func (r *snapRunner) Handle(t worker.Task) { + switch t.(type) { + case *sendSnapTask: + r.send(t.(*sendSnapTask)) + case *recvSnapTask: + r.recv(t.(*recvSnapTask)) + } +} + +func (r *snapRunner) send(t *sendSnapTask) { + t.callback(r.sendSnap(t.addr, t.msg)) +} + +const snapChunkLen = 1024 * 1024 + +func (r *snapRunner) sendSnap(addr string, msg *raft_serverpb.RaftMessage) error { + start := time.Now() + msgSnap := msg.GetMessage().GetSnapshot() + snapKey, err := snap.SnapKeyFromSnap(msgSnap) + if err != nil { + return err + } + + r.snapManager.Register(snapKey, snap.SnapEntrySending) + defer r.snapManager.Deregister(snapKey, snap.SnapEntrySending) + + snap, err := r.snapManager.GetSnapshotForSending(snapKey) + if err != nil { + return err + } + if !snap.Exists() { + return errors.Errorf("missing snap file: %v", snap.Path()) + } + + cc, err := grpc.Dial(addr, grpc.WithInsecure(), + grpc.WithInitialWindowSize(2*1024*1024), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 3 * time.Second, + Timeout: 60 * time.Second, + })) + if err != nil { + return err + } + client := tinykvpb.NewTinyKvClient(cc) + stream, err := client.Snapshot(context.TODO()) + if err != nil { + return err + } + err = stream.Send(&raft_serverpb.SnapshotChunk{Message: msg}) + if err != nil { + return err + } + + buf := make([]byte, snapChunkLen) + for remain := snap.TotalSize(); remain > 0; remain -= uint64(len(buf)) { + if remain < uint64(len(buf)) { + buf = buf[:remain] + } + _, err := io.ReadFull(snap, buf) + if err != nil { + return errors.Errorf("failed to read snapshot chunk: %v", err) + } + err = stream.Send(&raft_serverpb.SnapshotChunk{Data: buf}) + if err != nil { + return err + } + } + _, err = stream.CloseAndRecv() + if err != nil { + return err + } + + log.Infof("sent snapshot. regionID: %v, snapKey: %v, size: %v, duration: %s", snapKey.RegionID, snapKey, snap.TotalSize(), time.Since(start)) + return nil +} + +func (r *snapRunner) recv(t *recvSnapTask) { + msg, err := r.recvSnap(t.stream) + if err == nil { + r.router.SendRaftMessage(msg) + } + t.callback(err) +} + +func (r *snapRunner) recvSnap(stream tinykvpb.TinyKv_SnapshotServer) (*raft_serverpb.RaftMessage, error) { + head, err := stream.Recv() + if err != nil { + return nil, err + } + if head.GetMessage() == nil { + return nil, errors.New("no raft message in the first chunk") + } + message := head.GetMessage().GetMessage() + snapKey, err := snap.SnapKeyFromSnap(message.GetSnapshot()) + if err != nil { + return nil, errors.Errorf("failed to create snap key: %v", err) + } + + data := message.GetSnapshot().GetData() + snapshot, err := r.snapManager.GetSnapshotForReceiving(snapKey, data) + if err != nil { + return nil, errors.Errorf("%v failed to create snapshot file: %v", snapKey, err) + } + if snapshot.Exists() { + log.Infof("snapshot file already exists, skip receiving. snapKey: %v, file: %v", snapKey, snapshot.Path()) + stream.SendAndClose(&raft_serverpb.Done{}) + return head.GetMessage(), nil + } + r.snapManager.Register(snapKey, snap.SnapEntryReceiving) + defer r.snapManager.Deregister(snapKey, snap.SnapEntryReceiving) + + for { + chunk, err := stream.Recv() + if err != nil { + if err == io.EOF { + break + } + return nil, err + } + data := chunk.GetData() + if len(data) == 0 { + return nil, errors.Errorf("%v receive chunk with empty data", snapKey) + } + _, err = bytes.NewReader(data).WriteTo(snapshot) + if err != nil { + return nil, errors.Errorf("%v failed to write snapshot file %v: %v", snapKey, snapshot.Path(), err) + } + } + + err = snapshot.Save() + if err != nil { + return nil, err + } + + stream.SendAndClose(&raft_serverpb.Done{}) + return head.GetMessage(), nil +} diff --git a/kv/storage/raft_storage/transport.go b/kv/storage/raft_storage/transport.go new file mode 100644 index 00000000..3a1c1e53 --- /dev/null +++ b/kv/storage/raft_storage/transport.go @@ -0,0 +1,95 @@ +package raft_storage + +import ( + "sync" + + "github.com/pingcap-incubator/tinykv/kv/raftstore/message" + "github.com/pingcap-incubator/tinykv/kv/util/worker" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" +) + +type ServerTransport struct { + raftClient *RaftClient + raftRouter message.RaftRouter + resolverScheduler chan<- worker.Task + snapScheduler chan<- worker.Task + resolving sync.Map +} + +func NewServerTransport(raftClient *RaftClient, snapScheduler chan<- worker.Task, raftRouter message.RaftRouter, resolverScheduler chan<- worker.Task) *ServerTransport { + return &ServerTransport{ + raftClient: raftClient, + raftRouter: raftRouter, + resolverScheduler: resolverScheduler, + snapScheduler: snapScheduler, + } +} + +func (t *ServerTransport) Send(msg *raft_serverpb.RaftMessage) error { + storeID := msg.GetToPeer().GetStoreId() + t.SendStore(storeID, msg) + return nil +} + +func (t *ServerTransport) SendStore(storeID uint64, msg *raft_serverpb.RaftMessage) { + addr := t.raftClient.GetAddr(storeID) + if addr != "" { + t.WriteData(storeID, addr, msg) + return + } + if _, ok := t.resolving.Load(storeID); ok { + log.Debugf("store address is being resolved, msg dropped. storeID: %v, msg: %s", storeID, msg) + return + } + log.Debug("begin to resolve store address. storeID: %v", storeID) + t.resolving.Store(storeID, struct{}{}) + t.Resolve(storeID, msg) +} + +func (t *ServerTransport) Resolve(storeID uint64, msg *raft_serverpb.RaftMessage) { + callback := func(addr string, err error) { + // clear resolving + t.resolving.Delete(storeID) + if err != nil { + log.Errorf("resolve store address failed. storeID: %v, err: %v", storeID, err) + return + } + t.raftClient.InsertAddr(storeID, addr) + t.WriteData(storeID, addr, msg) + t.raftClient.Flush() + } + t.resolverScheduler <- &resolveAddrTask{ + storeID: storeID, + callback: callback, + } +} + +func (t *ServerTransport) WriteData(storeID uint64, addr string, msg *raft_serverpb.RaftMessage) { + if msg.GetMessage().GetSnapshot() != nil { + t.SendSnapshotSock(addr, msg) + return + } + if err := t.raftClient.Send(storeID, addr, msg); err != nil { + log.Errorf("send raft msg err. err: %v", err) + } +} + +func (t *ServerTransport) SendSnapshotSock(addr string, msg *raft_serverpb.RaftMessage) { + callback := func(err error) { + regionID := msg.GetRegionId() + toPeerID := msg.GetToPeer().GetId() + toStoreID := msg.GetToPeer().GetStoreId() + log.Debugf("send snapshot. toPeerID: %v, toStoreID: %v, regionID: %v, status: %v", toPeerID, toStoreID, regionID, err) + } + + t.snapScheduler <- &sendSnapTask{ + addr: addr, + msg: msg, + callback: callback, + } +} + +func (t *ServerTransport) Flush() { + t.raftClient.Flush() +} diff --git a/kv/storage/standalone_storage/standalone_storage.go b/kv/storage/standalone_storage/standalone_storage.go new file mode 100644 index 00000000..32a77336 --- /dev/null +++ b/kv/storage/standalone_storage/standalone_storage.go @@ -0,0 +1,38 @@ +package standalone_storage + +import ( + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/kv/storage" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" +) + +// StandAloneStorage is an implementation of `Storage` for a single-node TinyKV instance. It does not +// communicate with other nodes and all data is stored locally. +type StandAloneStorage struct { + // Your Data Here (1). +} + +func NewStandAloneStorage(conf *config.Config) *StandAloneStorage { + // Your Code Here (1). + return nil +} + +func (s *StandAloneStorage) Start() error { + // Your Code Here (1). + return nil +} + +func (s *StandAloneStorage) Stop() error { + // Your Code Here (1). + return nil +} + +func (s *StandAloneStorage) Reader(ctx *kvrpcpb.Context) (storage.StorageReader, error) { + // Your Code Here (1). + return nil, nil +} + +func (s *StandAloneStorage) Write(ctx *kvrpcpb.Context, batch []storage.Modify) error { + // Your Code Here (1). + return nil +} diff --git a/kv/storage/storage.go b/kv/storage/storage.go new file mode 100644 index 00000000..75720172 --- /dev/null +++ b/kv/storage/storage.go @@ -0,0 +1,21 @@ +package storage + +import ( + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" +) + +// Storage represents the internal-facing server part of TinyKV, it handles sending and receiving from other +// TinyKV nodes. As part of that responsibility, it also reads and writes data to disk (or semi-permanent memory). +type Storage interface { + Start() error + Stop() error + Write(ctx *kvrpcpb.Context, batch []Modify) error + Reader(ctx *kvrpcpb.Context) (StorageReader, error) +} + +type StorageReader interface { + GetCF(cf string, key []byte) ([]byte, error) + IterCF(cf string) engine_util.DBIterator + Close() +} diff --git a/kv/test_raftstore/cluster.go b/kv/test_raftstore/cluster.go new file mode 100644 index 00000000..10bb7d36 --- /dev/null +++ b/kv/test_raftstore/cluster.go @@ -0,0 +1,466 @@ +package test_raftstore + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "time" + + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/kv/raftstore" + "github.com/pingcap-incubator/tinykv/kv/storage/raft_storage" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_cmdpb" +) + +type Simulator interface { + RunStore(raftConf *config.Config, engine *engine_util.Engines, ctx context.Context) error + StopStore(storeID uint64) + AddFilter(filter Filter) + ClearFilters() + GetStoreIds() []uint64 + CallCommandOnStore(storeID uint64, request *raft_cmdpb.RaftCmdRequest, timeout time.Duration) (*raft_cmdpb.RaftCmdResponse, *badger.Txn) +} + +type Cluster struct { + schedulerClient *MockSchedulerClient + count int + engines map[uint64]*engine_util.Engines + snapPaths map[uint64]string + dirs []string + simulator Simulator + cfg *config.Config +} + +func NewCluster(count int, schedulerClient *MockSchedulerClient, simulator Simulator, cfg *config.Config) *Cluster { + return &Cluster{ + count: count, + schedulerClient: schedulerClient, + engines: make(map[uint64]*engine_util.Engines), + snapPaths: make(map[uint64]string), + simulator: simulator, + cfg: cfg, + } +} + +func (c *Cluster) Start() { + ctx := context.TODO() + clusterID := c.schedulerClient.GetClusterID(ctx) + + for storeID := uint64(1); storeID <= uint64(c.count); storeID++ { + dbPath, err := ioutil.TempDir("", "test-raftstore") + if err != nil { + panic(err) + } + c.cfg.DBPath = dbPath + kvPath := filepath.Join(dbPath, "kv") + raftPath := filepath.Join(dbPath, "raft") + snapPath := filepath.Join(dbPath, "snap") + c.snapPaths[storeID] = snapPath + c.dirs = append(c.dirs, []string{kvPath, raftPath, snapPath}...) + + err = os.MkdirAll(kvPath, os.ModePerm) + if err != nil { + panic(err) + } + err = os.MkdirAll(raftPath, os.ModePerm) + if err != nil { + panic(err) + } + err = os.MkdirAll(snapPath, os.ModePerm) + if err != nil { + panic(err) + } + + raftDB := engine_util.CreateDB("raft", c.cfg) + kvDB := engine_util.CreateDB("kv", c.cfg) + engine := engine_util.NewEngines(kvDB, raftDB, kvPath, raftPath) + c.engines[storeID] = engine + } + + regionEpoch := &metapb.RegionEpoch{ + Version: raftstore.InitEpochVer, + ConfVer: raftstore.InitEpochConfVer, + } + firstRegion := &metapb.Region{ + Id: 1, + StartKey: []byte{}, + EndKey: []byte{}, + RegionEpoch: regionEpoch, + } + + for storeID, engine := range c.engines { + peer := NewPeer(storeID, storeID) + firstRegion.Peers = append(firstRegion.Peers, peer) + err := raftstore.BootstrapStore(engine, clusterID, storeID) + if err != nil { + panic(err) + } + } + + for _, engine := range c.engines { + raftstore.PrepareBootstrapCluster(engine, firstRegion) + } + + store := &metapb.Store{ + Id: 1, + Address: "", + } + resp, err := c.schedulerClient.Bootstrap(context.TODO(), store) + if err != nil { + panic(err) + } + if resp.Header != nil && resp.Header.Error != nil { + panic(resp.Header.Error) + } + + for storeID, engine := range c.engines { + store := &metapb.Store{ + Id: storeID, + Address: "", + } + err := c.schedulerClient.PutStore(context.TODO(), store) + if err != nil { + panic(err) + } + raftstore.ClearPrepareBootstrapState(engine) + } + + for storeID := range c.engines { + c.StartServer(storeID) + } +} + +func (c *Cluster) Shutdown() { + for _, storeID := range c.simulator.GetStoreIds() { + c.simulator.StopStore(storeID) + } + for _, engine := range c.engines { + engine.Close() + } + for _, dir := range c.dirs { + os.RemoveAll(dir) + } +} + +func (c *Cluster) AddFilter(filter Filter) { + c.simulator.AddFilter(filter) +} + +func (c *Cluster) ClearFilters() { + c.simulator.ClearFilters() +} + +func (c *Cluster) StopServer(storeID uint64) { + c.simulator.StopStore(storeID) +} + +func (c *Cluster) StartServer(storeID uint64) { + engine := c.engines[storeID] + err := c.simulator.RunStore(c.cfg, engine, context.TODO()) + if err != nil { + panic(err) + } +} + +func (c *Cluster) AllocPeer(storeID uint64) *metapb.Peer { + id, err := c.schedulerClient.AllocID(context.TODO()) + if err != nil { + panic(err) + } + return NewPeer(storeID, id) +} + +func (c *Cluster) Request(key []byte, reqs []*raft_cmdpb.Request, timeout time.Duration) (*raft_cmdpb.RaftCmdResponse, *badger.Txn) { + startTime := time.Now() + for i := 0; i < 10 || time.Now().Sub(startTime) < timeout; i++ { + region := c.GetRegion(key) + regionID := region.GetId() + req := NewRequest(regionID, region.RegionEpoch, reqs) + resp, txn := c.CallCommandOnLeader(&req, timeout) + if resp == nil { + // it should be timeouted innerly + SleepMS(100) + continue + } + if resp.Header.Error != nil { + SleepMS(100) + continue + } + return resp, txn + } + panic("request timeout") +} + +func (c *Cluster) CallCommand(request *raft_cmdpb.RaftCmdRequest, timeout time.Duration) (*raft_cmdpb.RaftCmdResponse, *badger.Txn) { + storeID := request.Header.Peer.StoreId + return c.simulator.CallCommandOnStore(storeID, request, timeout) +} + +func (c *Cluster) CallCommandOnLeader(request *raft_cmdpb.RaftCmdRequest, timeout time.Duration) (*raft_cmdpb.RaftCmdResponse, *badger.Txn) { + startTime := time.Now() + regionID := request.Header.RegionId + leader := c.LeaderOfRegion(regionID) + for { + if time.Now().Sub(startTime) > timeout { + return nil, nil + } + if leader == nil { + panic(fmt.Sprintf("can't get leader of region %d", regionID)) + } + request.Header.Peer = leader + resp, txn := c.CallCommand(request, 1*time.Second) + if resp == nil { + log.Warnf("can't call command %s on leader %d of region %d", request.String(), leader.GetId(), regionID) + newLeader := c.LeaderOfRegion(regionID) + if leader == newLeader { + region, _, err := c.schedulerClient.GetRegionByID(context.TODO(), regionID) + if err != nil { + return nil, nil + } + peers := region.GetPeers() + leader = peers[rand.Int()%len(peers)] + log.Debugf("leader info maybe wrong, use random leader %d of region %d", leader.GetId(), regionID) + } else { + leader = newLeader + log.Debugf("use new leader %d of region %d", leader.GetId(), regionID) + } + continue + } + if resp.Header.Error != nil { + err := resp.Header.Error + if err.GetStaleCommand() != nil || err.GetEpochNotMatch() != nil || err.GetNotLeader() != nil { + log.Debugf("encouter retryable err %+v", resp) + if err.GetNotLeader() != nil && err.GetNotLeader().Leader != nil { + leader = err.GetNotLeader().Leader + } else { + leader = c.LeaderOfRegion(regionID) + } + continue + } + } + return resp, txn + } +} + +func (c *Cluster) LeaderOfRegion(regionID uint64) *metapb.Peer { + for i := 0; i < 500; i++ { + _, leader, err := c.schedulerClient.GetRegionByID(context.TODO(), regionID) + if err == nil && leader != nil { + return leader + } + SleepMS(10) + } + return nil +} + +func (c *Cluster) GetRegion(key []byte) *metapb.Region { + for i := 0; i < 100; i++ { + region, _, _ := c.schedulerClient.GetRegion(context.TODO(), key) + if region != nil { + return region + } + // We may meet range gap after split, so here we will + // retry to get the region again. + SleepMS(20) + } + panic(fmt.Sprintf("find no region for %s", hex.EncodeToString(key))) +} + +func (c *Cluster) GetRandomRegion() *metapb.Region { + return c.schedulerClient.getRandomRegion() +} + +func (c *Cluster) GetStoreIdsOfRegion(regionID uint64) []uint64 { + region, _, err := c.schedulerClient.GetRegionByID(context.TODO(), regionID) + if err != nil { + panic(err) + } + peers := region.GetPeers() + storeIds := make([]uint64, len(peers)) + for i, peer := range peers { + storeIds[i] = peer.GetStoreId() + } + return storeIds +} + +func (c *Cluster) MustPut(key, value []byte) { + c.MustPutCF(engine_util.CfDefault, key, value) +} + +func (c *Cluster) MustPutCF(cf string, key, value []byte) { + req := NewPutCfCmd(cf, key, value) + resp, _ := c.Request(key, []*raft_cmdpb.Request{req}, 5*time.Second) + if resp.Header.Error != nil { + panic(resp.Header.Error) + } + if len(resp.Responses) != 1 { + panic("len(resp.Responses) != 1") + } + if resp.Responses[0].CmdType != raft_cmdpb.CmdType_Put { + panic("resp.Responses[0].CmdType != raft_cmdpb.CmdType_Put") + } +} + +func (c *Cluster) MustGet(key []byte, value []byte) { + v := c.Get(key) + if !bytes.Equal(v, value) { + panic(fmt.Sprintf("expected value %s, but got %s", value, v)) + } +} + +func (c *Cluster) Get(key []byte) []byte { + return c.GetCF(engine_util.CfDefault, key) +} + +func (c *Cluster) GetCF(cf string, key []byte) []byte { + req := NewGetCfCmd(cf, key) + resp, _ := c.Request(key, []*raft_cmdpb.Request{req}, 5*time.Second) + if resp.Header.Error != nil { + panic(resp.Header.Error) + } + if len(resp.Responses) != 1 { + panic("len(resp.Responses) != 1") + } + if resp.Responses[0].CmdType != raft_cmdpb.CmdType_Get { + panic("resp.Responses[0].CmdType != raft_cmdpb.CmdType_Get") + } + return resp.Responses[0].Get.Value +} + +func (c *Cluster) MustDelete(key []byte) { + c.MustDeleteCF(engine_util.CfDefault, key) +} + +func (c *Cluster) MustDeleteCF(cf string, key []byte) { + req := NewDeleteCfCmd(cf, key) + resp, _ := c.Request(key, []*raft_cmdpb.Request{req}, 5*time.Second) + if resp.Header.Error != nil { + panic(resp.Header.Error) + } + if len(resp.Responses) != 1 { + panic("len(resp.Responses) != 1") + } + if resp.Responses[0].CmdType != raft_cmdpb.CmdType_Delete { + panic("resp.Responses[0].CmdType != raft_cmdpb.CmdType_Delete") + } +} + +func (c *Cluster) Scan(start, end []byte) [][]byte { + req := NewSnapCmd() + values := make([][]byte, 0) + key := start + for (len(end) != 0 && bytes.Compare(key, end) < 0) || (len(key) == 0 && len(end) == 0) { + resp, txn := c.Request(key, []*raft_cmdpb.Request{req}, 5*time.Second) + if resp.Header.Error != nil { + panic(resp.Header.Error) + } + if len(resp.Responses) != 1 { + panic("len(resp.Responses) != 1") + } + if resp.Responses[0].CmdType != raft_cmdpb.CmdType_Snap { + panic("resp.Responses[0].CmdType != raft_cmdpb.CmdType_Snap") + } + region := resp.Responses[0].GetSnap().Region + iter := raft_storage.NewRegionReader(txn, *region).IterCF(engine_util.CfDefault) + for iter.Seek(key); iter.Valid(); iter.Next() { + if engine_util.ExceedEndKey(iter.Item().Key(), end) { + break + } + value, err := iter.Item().ValueCopy(nil) + if err != nil { + panic(err) + } + values = append(values, value) + } + + key = region.EndKey + if len(key) == 0 { + break + } + } + + return values +} + +func (c *Cluster) TransferLeader(regionID uint64, leader *metapb.Peer) { + region, _, err := c.schedulerClient.GetRegionByID(context.TODO(), regionID) + if err != nil { + panic(err) + } + epoch := region.RegionEpoch + transferLeader := NewAdminRequest(regionID, epoch, NewTransferLeaderCmd(leader)) + resp, _ := c.CallCommandOnLeader(transferLeader, 5*time.Second) + if resp.AdminResponse.CmdType != raft_cmdpb.AdminCmdType_TransferLeader { + panic("resp.AdminResponse.CmdType != raft_cmdpb.AdminCmdType_TransferLeader") + } +} + +func (c *Cluster) MustTransferLeader(regionID uint64, leader *metapb.Peer) { + timer := time.Now() + for { + currentLeader := c.LeaderOfRegion(regionID) + if currentLeader.Id == leader.Id && + currentLeader.StoreId == leader.StoreId { + return + } + if time.Now().Sub(timer) > 5*time.Second { + panic(fmt.Sprintf("failed to transfer leader to [%d] %s", regionID, leader.String())) + } + c.TransferLeader(regionID, leader) + } +} + +func (c *Cluster) MustAddPeer(regionID uint64, peer *metapb.Peer) { + c.schedulerClient.AddPeer(regionID, peer) + c.MustHavePeer(regionID, peer) +} + +func (c *Cluster) MustRemovePeer(regionID uint64, peer *metapb.Peer) { + c.schedulerClient.RemovePeer(regionID, peer) + c.MustNonePeer(regionID, peer) +} + +func (c *Cluster) MustHavePeer(regionID uint64, peer *metapb.Peer) { + for i := 0; i < 500; i++ { + region, _, err := c.schedulerClient.GetRegionByID(context.TODO(), regionID) + if err != nil { + panic(err) + } + if region != nil { + if p := FindPeer(region, peer.GetStoreId()); p != nil { + if p.GetId() == peer.GetId() { + return + } + } + } + SleepMS(10) + } +} + +func (c *Cluster) MustNonePeer(regionID uint64, peer *metapb.Peer) { + for i := 0; i < 500; i++ { + region, _, err := c.schedulerClient.GetRegionByID(context.TODO(), regionID) + if err != nil { + panic(err) + } + if region != nil { + if p := FindPeer(region, peer.GetStoreId()); p != nil { + if p.GetId() != peer.GetId() { + return + } + } else { + return + } + } + SleepMS(10) + } +} diff --git a/kv/test_raftstore/filter.go b/kv/test_raftstore/filter.go new file mode 100644 index 00000000..3dc9f17e --- /dev/null +++ b/kv/test_raftstore/filter.go @@ -0,0 +1,45 @@ +package test_raftstore + +import ( + "math/rand" + + rspb "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" +) + +type Filter interface { + Before(msgs *rspb.RaftMessage) bool + After() +} + +type PartitionFilter struct { + s1 []uint64 + s2 []uint64 +} + +func (f *PartitionFilter) Before(msg *rspb.RaftMessage) bool { + inS1 := false + inS2 := false + for _, storeID := range f.s1 { + if msg.FromPeer.StoreId == storeID || msg.ToPeer.StoreId == storeID { + inS1 = true + break + } + } + for _, storeID := range f.s2 { + if msg.FromPeer.StoreId == storeID || msg.ToPeer.StoreId == storeID { + inS2 = true + break + } + } + return !(inS1 && inS2) +} + +func (f *PartitionFilter) After() {} + +type DropFilter struct{} + +func (f *DropFilter) Before(msg *rspb.RaftMessage) bool { + return (rand.Int() % 1000) > 100 +} + +func (f *DropFilter) After() {} diff --git a/kv/test_raftstore/node.go b/kv/test_raftstore/node.go new file mode 100644 index 00000000..742feaed --- /dev/null +++ b/kv/test_raftstore/node.go @@ -0,0 +1,218 @@ +package test_raftstore + +import ( + "context" + "errors" + "fmt" + "io" + "sync" + "time" + + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/kv/raftstore" + "github.com/pingcap-incubator/tinykv/kv/raftstore/message" + "github.com/pingcap-incubator/tinykv/kv/raftstore/scheduler_client" + "github.com/pingcap-incubator/tinykv/kv/raftstore/snap" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_cmdpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" +) + +type MockTransport struct { + sync.RWMutex + + filters []Filter + routers map[uint64]message.RaftRouter + snapMgrs map[uint64]*snap.SnapManager +} + +func NewMockTransport() *MockTransport { + return &MockTransport{ + routers: make(map[uint64]message.RaftRouter), + snapMgrs: make(map[uint64]*snap.SnapManager), + } +} + +func (t *MockTransport) AddStore(storeID uint64, raftRouter message.RaftRouter, snapMgr *snap.SnapManager) { + t.Lock() + defer t.Unlock() + + t.routers[storeID] = raftRouter + t.snapMgrs[storeID] = snapMgr +} + +func (t *MockTransport) RemoveStore(storeID uint64) { + t.Lock() + defer t.Unlock() + + delete(t.routers, storeID) + delete(t.snapMgrs, storeID) +} + +func (t *MockTransport) AddFilter(filter Filter) { + t.Lock() + defer t.Unlock() + + t.filters = append(t.filters, filter) +} + +func (t *MockTransport) ClearFilters() { + t.Lock() + defer t.Unlock() + + t.filters = nil +} + +func (t *MockTransport) Send(msg *raft_serverpb.RaftMessage) error { + t.RLock() + defer t.RUnlock() + + for _, filter := range t.filters { + if !filter.Before(msg) { + return errors.New(fmt.Sprintf("message %+v is dropped", msg)) + } + } + + fromStore := msg.GetFromPeer().GetStoreId() + toStore := msg.GetToPeer().GetStoreId() + + isSnapshot := msg.GetMessage().GetMsgType() == eraftpb.MessageType_MsgSnapshot + if isSnapshot { + snapshot := msg.Message.Snapshot + key, err := snap.SnapKeyFromSnap(snapshot) + if err != nil { + return err + } + + fromSnapMgr, found := t.snapMgrs[fromStore] + if !found { + return errors.New(fmt.Sprintf("store %d is closed", fromStore)) + } + fromSnapMgr.Register(key, snap.SnapEntrySending) + fromSnap, err := fromSnapMgr.GetSnapshotForSending(key) + if err != nil { + return err + } + + toSnapMgr, found := t.snapMgrs[toStore] + if !found { + return errors.New(fmt.Sprintf("store %d is closed", toStore)) + } + toSnapMgr.Register(key, snap.SnapEntryReceiving) + toSnap, err := toSnapMgr.GetSnapshotForReceiving(key, snapshot.GetData()) + if err != nil { + return err + } + + io.Copy(toSnap, fromSnap) + toSnap.Save() + + toSnapMgr.Deregister(key, snap.SnapEntryReceiving) + fromSnapMgr.Deregister(key, snap.SnapEntrySending) + } + + router, found := t.routers[toStore] + if !found { + return errors.New(fmt.Sprintf("store %d is closed", toStore)) + } + router.SendRaftMessage(msg) + + for _, filter := range t.filters { + filter.After() + } + + return nil +} + +type NodeSimulator struct { + sync.RWMutex + + trans *MockTransport + schedulerClient scheduler_client.Client + nodes map[uint64]*raftstore.Node +} + +func NewNodeSimulator(schedulerClient scheduler_client.Client) *NodeSimulator { + trans := NewMockTransport() + return &NodeSimulator{ + trans: trans, + schedulerClient: schedulerClient, + nodes: make(map[uint64]*raftstore.Node), + } +} + +func (c *NodeSimulator) RunStore(cfg *config.Config, engine *engine_util.Engines, ctx context.Context) error { + c.Lock() + defer c.Unlock() + + raftRouter, batchSystem := raftstore.CreateRaftBatchSystem(cfg) + snapManager := snap.NewSnapManager(cfg.DBPath + "/snap") + node := raftstore.NewNode(batchSystem, cfg, c.schedulerClient) + + err := node.Start(ctx, engine, c.trans, snapManager) + if err != nil { + return err + } + + storeID := node.GetStoreID() + c.nodes[storeID] = node + c.trans.AddStore(storeID, raftRouter, snapManager) + + return nil +} + +func (c *NodeSimulator) StopStore(storeID uint64) { + c.Lock() + defer c.Unlock() + + node := c.nodes[storeID] + if node == nil { + panic(fmt.Sprintf("Can not find store %d", storeID)) + } + node.Stop() + delete(c.nodes, storeID) + c.trans.RemoveStore(storeID) +} + +func (c *NodeSimulator) AddFilter(filter Filter) { + c.Lock() + defer c.Unlock() + c.trans.AddFilter(filter) +} + +func (c *NodeSimulator) ClearFilters() { + c.Lock() + defer c.Unlock() + c.trans.ClearFilters() +} + +func (c *NodeSimulator) GetStoreIds() []uint64 { + c.RLock() + defer c.RUnlock() + storeIDs := make([]uint64, 0, len(c.nodes)) + for storeID := range c.nodes { + storeIDs = append(storeIDs, storeID) + } + return storeIDs +} + +func (c *NodeSimulator) CallCommandOnStore(storeID uint64, request *raft_cmdpb.RaftCmdRequest, timeout time.Duration) (*raft_cmdpb.RaftCmdResponse, *badger.Txn) { + c.RLock() + router := c.trans.routers[storeID] + if router == nil { + log.Fatalf("Can not find node %d", storeID) + } + c.RUnlock() + + cb := message.NewCallback() + err := router.SendRaftCommand(request, cb) + if err != nil { + return nil, nil + } + + resp := cb.WaitRespWithTimeout(timeout) + return resp, cb.Txn +} diff --git a/kv/test_raftstore/scheduler.go b/kv/test_raftstore/scheduler.go new file mode 100644 index 00000000..cf9c0f86 --- /dev/null +++ b/kv/test_raftstore/scheduler.go @@ -0,0 +1,565 @@ +package test_raftstore + +import ( + "bytes" + "context" + "fmt" + "sync" + + "github.com/google/btree" + "github.com/pingcap-incubator/tinykv/kv/raftstore/util" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap/errors" +) + +var _ btree.Item = ®ionItem{} + +type regionItem struct { + region metapb.Region +} + +// Less returns true if the region start key is less than the other. +func (r *regionItem) Less(other btree.Item) bool { + left := r.region.GetStartKey() + right := other.(*regionItem).region.GetStartKey() + return bytes.Compare(left, right) < 0 +} + +func (r *regionItem) Contains(key []byte) bool { + start, end := r.region.GetStartKey(), r.region.GetEndKey() + return bytes.Compare(key, start) >= 0 && (len(end) == 0 || bytes.Compare(key, end) < 0) +} + +type OperatorType int64 + +const ( + OperatorTypeAddPeer = 1 + OperatorTypeRemovePeer = 2 + OperatorTypeTransferLeader = 3 +) + +type Operator struct { + Type OperatorType + Data interface{} +} + +type OpAddPeer struct { + peer *metapb.Peer + pending bool +} + +type OpRemovePeer struct { + peer *metapb.Peer +} + +type OpTransferLeader struct { + peer *metapb.Peer +} + +type Store struct { + store metapb.Store + heartbeatResponseHandler func(*schedulerpb.RegionHeartbeatResponse) +} + +func NewStore(store *metapb.Store) *Store { + return &Store{ + store: *store, + heartbeatResponseHandler: nil, + } +} + +type MockSchedulerClient struct { + sync.RWMutex + + clusterID uint64 + + meta metapb.Cluster + stores map[uint64]*Store + regionsRange *btree.BTree // key -> region + regionsKey map[uint64][]byte // regionID -> startKey + + baseID uint64 + + operators map[uint64]*Operator + leaders map[uint64]*metapb.Peer // regionID -> peer + pendingPeers map[uint64]*metapb.Peer // peerID -> peer + + bootstrapped bool +} + +func NewMockSchedulerClient(clusterID uint64, baseID uint64) *MockSchedulerClient { + return &MockSchedulerClient{ + clusterID: clusterID, + meta: metapb.Cluster{ + Id: clusterID, + }, + stores: make(map[uint64]*Store), + regionsRange: btree.New(2), + regionsKey: make(map[uint64][]byte), + baseID: baseID, + operators: make(map[uint64]*Operator), + leaders: make(map[uint64]*metapb.Peer), + pendingPeers: make(map[uint64]*metapb.Peer), + } +} + +// Implement SchedulerClient interface +func (m *MockSchedulerClient) GetClusterID(ctx context.Context) uint64 { + m.RLock() + defer m.RUnlock() + return m.clusterID +} + +func (m *MockSchedulerClient) AllocID(ctx context.Context) (uint64, error) { + m.Lock() + defer m.Unlock() + ret := m.baseID + m.baseID++ + return ret, nil +} + +func (m *MockSchedulerClient) Bootstrap(ctx context.Context, store *metapb.Store) (*schedulerpb.BootstrapResponse, error) { + m.Lock() + defer m.Unlock() + + resp := &schedulerpb.BootstrapResponse{ + Header: &schedulerpb.ResponseHeader{ClusterId: m.clusterID}, + } + + if m.bootstrapped == true || len(m.regionsKey) != 0 { + m.bootstrapped = true + resp.Header.Error = &schedulerpb.Error{ + Type: schedulerpb.ErrorType_ALREADY_BOOTSTRAPPED, + Message: "cluster is already bootstrapped", + } + return resp, nil + } + + m.stores[store.GetId()] = NewStore(store) + m.bootstrapped = true + return resp, nil +} + +func (m *MockSchedulerClient) IsBootstrapped(ctx context.Context) (bool, error) { + m.RLock() + defer m.RUnlock() + return m.bootstrapped, nil +} + +func (m *MockSchedulerClient) checkBootstrap() error { + if bootstrapped, _ := m.IsBootstrapped(context.TODO()); !bootstrapped { + return errors.New("not bootstrapped") + } + return nil +} + +func (m *MockSchedulerClient) PutStore(ctx context.Context, store *metapb.Store) error { + if err := m.checkBootstrap(); err != nil { + return err + } + m.Lock() + defer m.Unlock() + + s := NewStore(store) + m.stores[store.GetId()] = s + return nil +} + +func (m *MockSchedulerClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) { + if err := m.checkBootstrap(); err != nil { + return nil, err + } + m.RLock() + defer m.RUnlock() + + s, ok := m.stores[storeID] + if !ok { + return nil, errors.Errorf("store %d not found", storeID) + } + return &s.store, nil +} + +func (m *MockSchedulerClient) GetRegion(ctx context.Context, key []byte) (*metapb.Region, *metapb.Peer, error) { + if err := m.checkBootstrap(); err != nil { + return nil, nil, err + } + m.RLock() + defer m.RUnlock() + region, leader := m.getRegionLocked(key) + return region, leader, nil +} + +func (m *MockSchedulerClient) getRegionLocked(key []byte) (*metapb.Region, *metapb.Peer) { + result := m.findRegion(key) + if result == nil { + return nil, nil + } + + leader := m.leaders[result.region.GetId()] + return &result.region, leader +} + +func (m *MockSchedulerClient) GetRegionByID(ctx context.Context, regionID uint64) (*metapb.Region, *metapb.Peer, error) { + if err := m.checkBootstrap(); err != nil { + return nil, nil, err + } + m.RLock() + defer m.RUnlock() + return m.getRegionByIDLocked(regionID) +} + +func (m *MockSchedulerClient) getRegionByIDLocked(regionID uint64) (*metapb.Region, *metapb.Peer, error) { + startKey := m.regionsKey[regionID] + region, leader := m.getRegionLocked(startKey) + return region, leader, nil +} + +func (m *MockSchedulerClient) AskSplit(ctx context.Context, region *metapb.Region) (*schedulerpb.AskSplitResponse, error) { + resp := new(schedulerpb.AskSplitResponse) + resp.Header = &schedulerpb.ResponseHeader{ClusterId: m.clusterID} + curRegion, _, err := m.GetRegionByID(ctx, region.GetId()) + if err != nil { + return resp, err + } + if util.IsEpochStale(region.RegionEpoch, curRegion.RegionEpoch) { + return resp, errors.New("epoch is stale") + } + + id, _ := m.AllocID(ctx) + resp.NewRegionId = id + + for range region.GetPeers() { + id, _ := m.AllocID(ctx) + resp.NewPeerIds = append(resp.NewPeerIds, id) + } + + return resp, nil +} + +func (m *MockSchedulerClient) StoreHeartbeat(ctx context.Context, stats *schedulerpb.StoreStats) error { + if err := m.checkBootstrap(); err != nil { + return err + } + // nothing need to do + return nil +} + +func (m *MockSchedulerClient) RegionHeartbeat(req *schedulerpb.RegionHeartbeatRequest) error { + if err := m.checkBootstrap(); err != nil { + return err + } + + m.Lock() + defer m.Unlock() + + regionID := req.Region.GetId() + for _, p := range req.Region.GetPeers() { + delete(m.pendingPeers, p.GetId()) + } + for _, p := range req.GetPendingPeers() { + m.pendingPeers[p.GetId()] = p + } + m.leaders[regionID] = req.Leader + + if err := m.handleHeartbeatVersion(req.Region); err != nil { + return err + } + if err := m.handleHeartbeatConfVersion(req.Region); err != nil { + return err + } + + resp := &schedulerpb.RegionHeartbeatResponse{ + Header: &schedulerpb.ResponseHeader{ClusterId: m.clusterID}, + RegionId: regionID, + RegionEpoch: req.Region.GetRegionEpoch(), + TargetPeer: req.Leader, + } + if op := m.operators[regionID]; op != nil { + if m.tryFinished(op, req.Region, req.Leader) { + delete(m.operators, regionID) + } else { + m.makeRegionHeartbeatResponse(op, resp) + } + log.Debugf("[region %d] schedule %v", regionID, op) + } + + store := m.stores[req.Leader.GetStoreId()] + store.heartbeatResponseHandler(resp) + return nil +} + +func (m *MockSchedulerClient) handleHeartbeatVersion(region *metapb.Region) error { + if engine_util.ExceedEndKey(region.GetStartKey(), region.GetEndKey()) { + panic("start key > end key") + } + + for { + searchRegion, _ := m.getRegionLocked(region.GetStartKey()) + if searchRegion == nil { + m.addRegionLocked(region) + return nil + } else { + if bytes.Equal(searchRegion.GetStartKey(), region.GetStartKey()) && + bytes.Equal(searchRegion.GetEndKey(), region.GetEndKey()) { + // the two regions' range are same, must check epoch + if util.IsEpochStale(region.RegionEpoch, searchRegion.RegionEpoch) { + return errors.New("epoch is stale") + } + if searchRegion.RegionEpoch.Version < region.RegionEpoch.Version { + m.removeRegionLocked(searchRegion) + m.addRegionLocked(region) + } + return nil + } + + if engine_util.ExceedEndKey(searchRegion.GetStartKey(), region.GetEndKey()) { + // No range covers [start, end) now, insert directly. + m.addRegionLocked(region) + return nil + } else { + // overlap, remove old, insert new. + // E.g, 1 [a, c) -> 1 [a, b) + 2 [b, c), either new 1 or 2 reports, the region + // is overlapped with origin [a, c). + if region.GetRegionEpoch().GetVersion() <= searchRegion.GetRegionEpoch().GetVersion() { + return errors.New("epoch is stale") + } + m.removeRegionLocked(searchRegion) + } + } + } +} + +func (m *MockSchedulerClient) handleHeartbeatConfVersion(region *metapb.Region) error { + searchRegion, _ := m.getRegionLocked(region.GetStartKey()) + if util.IsEpochStale(region.RegionEpoch, searchRegion.RegionEpoch) { + return errors.New("epoch is stale") + } + + regionPeerLen := len(region.GetPeers()) + searchRegionPeerLen := len(searchRegion.GetPeers()) + + if region.RegionEpoch.ConfVer > searchRegion.RegionEpoch.ConfVer { + // If ConfVer changed, TinyKV has added/removed one peer already. + // So scheduler and TinyKV can't have same peer count and can only have + // only one different peer. + if searchRegionPeerLen > regionPeerLen { + if searchRegionPeerLen-regionPeerLen != 1 { + panic("should only one conf change") + } + fmt.Println(searchRegion, region) + if len(GetDiffPeers(searchRegion, region)) != 1 { + panic("should only one different peer") + } + if len(GetDiffPeers(region, searchRegion)) != 0 { + panic("should include all peers") + } + } else if searchRegionPeerLen < regionPeerLen { + if regionPeerLen-searchRegionPeerLen != 1 { + panic("should only one conf change") + } + if len(GetDiffPeers(region, searchRegion)) != 1 { + panic("should only one different peer") + } + if len(GetDiffPeers(searchRegion, region)) != 0 { + panic("should include all peers") + } + } else { + MustSamePeers(searchRegion, region) + if searchRegion.RegionEpoch.ConfVer+1 != region.RegionEpoch.ConfVer { + panic("unmatched conf version") + } + if searchRegion.RegionEpoch.Version+1 != region.RegionEpoch.Version { + panic("unmatched version") + } + } + + // update the region. + if m.regionsRange.ReplaceOrInsert(®ionItem{region: *region}) == nil { + panic("update inexistent region ") + } + } else { + MustSamePeers(searchRegion, region) + } + return nil +} + +func (m *MockSchedulerClient) tryFinished(op *Operator, region *metapb.Region, leader *metapb.Peer) bool { + switch op.Type { + case OperatorTypeAddPeer: + add := op.Data.(OpAddPeer) + if !add.pending { + for _, p := range region.GetPeers() { + if add.peer.GetId() == p.GetId() { + add.pending = true + } else { + // TinyKV rejects AddNode. + return false + } + } + } else { + _, found := m.pendingPeers[add.peer.GetId()] + return !found + } + case OperatorTypeRemovePeer: + remove := op.Data.(OpRemovePeer) + for _, p := range region.GetPeers() { + if remove.peer.GetId() == p.GetId() { + return false + } + } + return true + case OperatorTypeTransferLeader: + transfer := op.Data.(OpTransferLeader) + return leader.GetId() == transfer.peer.GetId() + } + panic("unreachable") +} + +func (m *MockSchedulerClient) makeRegionHeartbeatResponse(op *Operator, resp *schedulerpb.RegionHeartbeatResponse) { + switch op.Type { + case OperatorTypeAddPeer: + add := op.Data.(OpAddPeer) + if !add.pending { + resp.ChangePeer = &schedulerpb.ChangePeer{ + ChangeType: eraftpb.ConfChangeType_AddNode, + Peer: add.peer, + } + } + case OperatorTypeRemovePeer: + remove := op.Data.(OpRemovePeer) + resp.ChangePeer = &schedulerpb.ChangePeer{ + ChangeType: eraftpb.ConfChangeType_RemoveNode, + Peer: remove.peer, + } + case OperatorTypeTransferLeader: + transfer := op.Data.(OpTransferLeader) + resp.TransferLeader = &schedulerpb.TransferLeader{ + Peer: transfer.peer, + } + } +} + +func (m *MockSchedulerClient) SetRegionHeartbeatResponseHandler(storeID uint64, h func(*schedulerpb.RegionHeartbeatResponse)) { + if h == nil { + h = func(*schedulerpb.RegionHeartbeatResponse) {} + } + m.Lock() + defer m.Unlock() + store := m.stores[storeID] + store.heartbeatResponseHandler = h +} + +func (m *MockSchedulerClient) Close() { + // do nothing +} + +func (m *MockSchedulerClient) findRegion(key []byte) *regionItem { + item := ®ionItem{region: metapb.Region{StartKey: key}} + + var result *regionItem + m.regionsRange.DescendLessOrEqual(item, func(i btree.Item) bool { + result = i.(*regionItem) + return false + }) + + if result == nil || !result.Contains(key) { + return nil + } + + return result +} + +func (m *MockSchedulerClient) addRegionLocked(region *metapb.Region) { + m.regionsKey[region.GetId()] = region.GetStartKey() + m.regionsRange.ReplaceOrInsert(®ionItem{region: *region}) +} + +func (m *MockSchedulerClient) removeRegionLocked(region *metapb.Region) { + delete(m.regionsKey, region.GetId()) + result := m.findRegion(region.GetStartKey()) + if result == nil || result.region.GetId() != region.GetId() { + return + } + m.regionsRange.Delete(result) +} + +// Extra API for tests +func (m *MockSchedulerClient) AddPeer(regionID uint64, peer *metapb.Peer) { + m.scheduleOperator(regionID, &Operator{ + Type: OperatorTypeAddPeer, + Data: OpAddPeer{ + peer: peer, + pending: false, + }, + }) +} + +func (m *MockSchedulerClient) RemovePeer(regionID uint64, peer *metapb.Peer) { + m.scheduleOperator(regionID, &Operator{ + Type: OperatorTypeRemovePeer, + Data: OpRemovePeer{ + peer: peer, + }, + }) +} + +func (m *MockSchedulerClient) TransferLeader(regionID uint64, peer *metapb.Peer) { + m.scheduleOperator(regionID, &Operator{ + Type: OperatorTypeTransferLeader, + Data: OpTransferLeader{ + peer: peer, + }, + }) +} + +func (m *MockSchedulerClient) getRandomRegion() *metapb.Region { + m.RLock() + defer m.RUnlock() + + for regionID := range m.leaders { + region, _, _ := m.getRegionByIDLocked(regionID) + return region + } + return nil +} + +func (m *MockSchedulerClient) scheduleOperator(regionID uint64, op *Operator) { + m.Lock() + defer m.Unlock() + m.operators[regionID] = op +} + +// Utilities +func MustSamePeers(left *metapb.Region, right *metapb.Region) { + if len(left.GetPeers()) != len(right.GetPeers()) { + panic("unmatched peers length") + } + for _, p := range left.GetPeers() { + if FindPeer(right, p.GetStoreId()) == nil { + panic("not found the peer") + } + } +} + +func GetDiffPeers(left *metapb.Region, right *metapb.Region) []*metapb.Peer { + peers := make([]*metapb.Peer, 0, 1) + for _, p := range left.GetPeers() { + if FindPeer(right, p.GetStoreId()) == nil { + peers = append(peers, p) + } + } + return peers +} + +func FindPeer(region *metapb.Region, storeID uint64) *metapb.Peer { + for _, p := range region.GetPeers() { + if p.GetStoreId() == storeID { + return p + } + } + return nil +} diff --git a/kv/test_raftstore/test_test.go b/kv/test_raftstore/test_test.go new file mode 100644 index 00000000..7dc07aa7 --- /dev/null +++ b/kv/test_raftstore/test_test.go @@ -0,0 +1,710 @@ +package test_raftstore + +import ( + "bytes" + "fmt" + "math/rand" + _ "net/http/pprof" + "strconv" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/kv/raftstore/meta" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_cmdpb" + "github.com/stretchr/testify/assert" +) + +// a client runs the function f and then signals it is done +func runClient(t *testing.T, me int, ca chan bool, fn func(me int, t *testing.T)) { + ok := false + defer func() { ca <- ok }() + fn(me, t) + ok = true +} + +// spawn ncli clients and wait until they are all done +func SpawnClientsAndWait(t *testing.T, ch chan bool, ncli int, fn func(me int, t *testing.T)) { + defer func() { ch <- true }() + ca := make([]chan bool, ncli) + for cli := 0; cli < ncli; cli++ { + ca[cli] = make(chan bool) + go runClient(t, cli, ca[cli], fn) + } + // log.Printf("SpawnClientsAndWait: waiting for clients") + for cli := 0; cli < ncli; cli++ { + ok := <-ca[cli] + // log.Infof("SpawnClientsAndWait: client %d is done\n", cli) + if ok == false { + t.Fatalf("failure") + } + } + +} + +// predict effect of Append(k, val) if old value is prev. +func NextValue(prev string, val string) string { + return prev + val +} + +// check that for a specific client all known appends are present in a value, +// and in order +func checkClntAppends(t *testing.T, clnt int, v string, count int) { + lastoff := -1 + for j := 0; j < count; j++ { + wanted := "x " + strconv.Itoa(clnt) + " " + strconv.Itoa(j) + " y" + off := strings.Index(v, wanted) + if off < 0 { + t.Fatalf("%v missing element %v in Append result %v", clnt, wanted, v) + } + off1 := strings.LastIndex(v, wanted) + if off1 != off { + t.Fatalf("duplicate element %v in Append result", wanted) + } + if off <= lastoff { + t.Fatalf("wrong order for element %v in Append result", wanted) + } + lastoff = off + } +} + +// check that all known appends are present in a value, +// and are in order for each concurrent client. +func checkConcurrentAppends(t *testing.T, v string, counts []int) { + nclients := len(counts) + for i := 0; i < nclients; i++ { + lastoff := -1 + for j := 0; j < counts[i]; j++ { + wanted := "x " + strconv.Itoa(i) + " " + strconv.Itoa(j) + " y" + off := strings.Index(v, wanted) + if off < 0 { + t.Fatalf("%v missing element %v in Append result %v", i, wanted, v) + } + off1 := strings.LastIndex(v, wanted) + if off1 != off { + t.Fatalf("duplicate element %v in Append result", wanted) + } + if off <= lastoff { + t.Fatalf("wrong order for element %v in Append result", wanted) + } + lastoff = off + } + } +} + +// repartition the servers periodically +func partitioner(t *testing.T, cluster *Cluster, ch chan bool, done *int32, unreliable bool, electionTimeout time.Duration) { + defer func() { ch <- true }() + for atomic.LoadInt32(done) == 0 { + a := make([]int, cluster.count) + for i := 0; i < cluster.count; i++ { + a[i] = (rand.Int() % 2) + } + pa := make([][]uint64, 2) + for i := 0; i < 2; i++ { + pa[i] = make([]uint64, 0) + for j := 1; j <= cluster.count; j++ { + if a[j-1] == i { + pa[i] = append(pa[i], uint64(j)) + } + } + } + cluster.ClearFilters() + log.Infof("partition: %v, %v", pa[0], pa[1]) + cluster.AddFilter(&PartitionFilter{ + s1: pa[0], + s2: pa[1], + }) + if unreliable { + cluster.AddFilter(&DropFilter{}) + } + time.Sleep(electionTimeout + time.Duration(rand.Int63()%200)*time.Millisecond) + } +} + +func confchanger(t *testing.T, cluster *Cluster, ch chan bool, done *int32) { + defer func() { ch <- true }() + count := uint64(cluster.count) + for atomic.LoadInt32(done) == 0 { + region := cluster.GetRandomRegion() + store := rand.Uint64()%count + 1 + if p := FindPeer(region, store); p != nil { + if len(region.GetPeers()) > 1 { + cluster.MustRemovePeer(region.GetId(), p) + } + } else { + cluster.MustAddPeer(region.GetId(), cluster.AllocPeer(store)) + } + time.Sleep(time.Duration(rand.Int63()%200) * time.Millisecond) + } +} + +// Basic test is as follows: one or more clients submitting Put/Scan +// operations to set of servers for some period of time. After the period is +// over, test checks that all sequential values are present and in order for a +// particular key and perform Delete to clean up. +// - If unreliable is set, RPCs may fail. +// - If crash is set, the servers restart after the period is over. +// - If partitions is set, the test repartitions the network concurrently between the servers. +// - If maxraftlog is a positive number, the count of the persistent log for Raft shouldn't exceed 2*maxraftlog. +// - If confchangee is set, the cluster will schedule random conf change concurrently. +// - If split is set, split region when size exceed 1024 bytes. +func GenericTest(t *testing.T, part string, nclients int, unreliable bool, crash bool, partitions bool, maxraftlog int, confchange bool, split bool) { + title := "Test: " + if unreliable { + // the network drops RPC requests and replies. + title = title + "unreliable net, " + } + if crash { + // peers re-start, and thus persistence must work. + title = title + "restarts, " + } + if partitions { + // the network may partition + title = title + "partitions, " + } + if maxraftlog != -1 { + title = title + "snapshots, " + } + if nclients > 1 { + title = title + "many clients" + } else { + title = title + "one client" + } + title = title + " (" + part + ")" // 3A or 3B + + nservers := 5 + cfg := config.NewTestConfig() + if maxraftlog != -1 { + cfg.RaftLogGcCountLimit = uint64(maxraftlog) + } + if split { + cfg.RegionMaxSize = 800 + cfg.RegionSplitSize = 500 + } + cluster := NewTestCluster(nservers, cfg) + cluster.Start() + defer cluster.Shutdown() + + electionTimeout := cfg.RaftBaseTickInterval * time.Duration(cfg.RaftElectionTimeoutTicks) + done_partitioner := int32(0) + done_confchanger := int32(0) + done_clients := int32(0) + ch_partitioner := make(chan bool) + ch_confchange := make(chan bool) + ch_clients := make(chan bool) + clnts := make([]chan int, nclients) + for i := 0; i < nclients; i++ { + clnts[i] = make(chan int, 1) + } + for i := 0; i < 3; i++ { + // log.Printf("Iteration %v\n", i) + atomic.StoreInt32(&done_clients, 0) + atomic.StoreInt32(&done_partitioner, 0) + go SpawnClientsAndWait(t, ch_clients, nclients, func(cli int, t *testing.T) { + j := 0 + defer func() { + clnts[cli] <- j + }() + last := "" + for atomic.LoadInt32(&done_clients) == 0 { + if (rand.Int() % 1000) < 500 { + key := strconv.Itoa(cli) + " " + fmt.Sprintf("%08d", j) + value := "x " + strconv.Itoa(cli) + " " + strconv.Itoa(j) + " y" + // log.Infof("%d: client new put %v,%v\n", cli, key, value) + cluster.MustPut([]byte(key), []byte(value)) + last = NextValue(last, value) + j++ + } else { + start := strconv.Itoa(cli) + " " + fmt.Sprintf("%08d", 0) + end := strconv.Itoa(cli) + " " + fmt.Sprintf("%08d", j) + // log.Infof("%d: client new scan %v-%v\n", cli, start, end) + values := cluster.Scan([]byte(start), []byte(end)) + v := string(bytes.Join(values, []byte(""))) + if v != last { + log.Fatalf("get wrong value, client %v\nwant:%v\ngot: %v\n", cli, last, v) + } + } + } + }) + + if partitions { + // Allow the clients to perform some operations without interruption + time.Sleep(300 * time.Millisecond) + go partitioner(t, cluster, ch_partitioner, &done_partitioner, unreliable, electionTimeout) + } + if confchange { + // Allow the clients to perfrom some operations without interruption + time.Sleep(100 * time.Millisecond) + go confchanger(t, cluster, ch_confchange, &done_confchanger) + } + time.Sleep(2 * time.Second) + atomic.StoreInt32(&done_clients, 1) // tell clients to quit + atomic.StoreInt32(&done_partitioner, 1) // tell partitioner to quit + atomic.StoreInt32(&done_confchanger, 1) // tell confchanger to quit + if partitions { + // log.Printf("wait for partitioner\n") + <-ch_partitioner + // reconnect network and submit a request. A client may + // have submitted a request in a minority. That request + // won't return until that server discovers a new term + // has started. + cluster.ClearFilters() + // wait for a while so that we have a new term + time.Sleep(electionTimeout) + } + + // log.Printf("wait for clients\n") + <-ch_clients + + if crash { + log.Warnf("shutdown servers\n") + for i := 1; i <= nservers; i++ { + cluster.StopServer(uint64(i)) + } + // Wait for a while for servers to shutdown, since + // shutdown isn't a real crash and isn't instantaneous + time.Sleep(electionTimeout) + log.Warnf("restart servers\n") + // crash and re-start all + for i := 1; i <= nservers; i++ { + cluster.StartServer(uint64(i)) + } + } + + for cli := 0; cli < nclients; cli++ { + // log.Printf("read from clients %d\n", cli) + j := <-clnts[cli] + + // if j < 10 { + // log.Printf("Warning: client %d managed to perform only %d put operations in 1 sec?\n", i, j) + // } + start := strconv.Itoa(cli) + " " + fmt.Sprintf("%08d", 0) + end := strconv.Itoa(cli) + " " + fmt.Sprintf("%08d", j) + values := cluster.Scan([]byte(start), []byte(end)) + v := string(bytes.Join(values, []byte(""))) + checkClntAppends(t, cli, v, j) + + for k := 0; k < j; k++ { + key := strconv.Itoa(cli) + " " + fmt.Sprintf("%08d", k) + cluster.MustDelete([]byte(key)) + } + } + + if maxraftlog > 0 { + // Check maximum after the servers have processed all client + // requests and had time to checkpoint. + key := []byte("") + for { + region := cluster.GetRegion(key) + if region == nil { + panic("region is not found") + } + for _, engine := range cluster.engines { + state, err := meta.GetApplyState(engine.Kv, region.GetId()) + if err == badger.ErrKeyNotFound { + continue + } + if err != nil { + panic(err) + } + truncatedIdx := state.TruncatedState.Index + appliedIdx := state.AppliedIndex + if appliedIdx-truncatedIdx > 2*uint64(maxraftlog) { + t.Fatalf("logs were not trimmed (%v - %v > 2*%v)", appliedIdx, truncatedIdx, maxraftlog) + } + } + + key = region.EndKey + if len(key) == 0 { + break + } + } + } + + if split { + r := cluster.GetRegion([]byte("")) + if len(r.GetEndKey()) == 0 { + t.Fatalf("region is not split") + } + } + } +} + +func TestBasic2B(t *testing.T) { + // Test: one client (2B) ... + GenericTest(t, "2B", 1, false, false, false, -1, false, false) +} + +func TestConcurrent2B(t *testing.T) { + // Test: many clients (2B) ... + GenericTest(t, "2B", 5, false, false, false, -1, false, false) +} + +func TestUnreliable2B(t *testing.T) { + // Test: unreliable net, many clients (2B) ... + GenericTest(t, "2B", 5, true, false, false, -1, false, false) +} + +// Submit a request in the minority partition and check that the requests +// doesn't go through until the partition heals. The leader in the original +// network ends up in the minority partition. +func TestOnePartition2B(t *testing.T) { + cfg := config.NewTestConfig() + cluster := NewTestCluster(5, cfg) + cluster.Start() + defer cluster.Shutdown() + + region := cluster.GetRegion([]byte("")) + leader := cluster.LeaderOfRegion(region.GetId()) + s1 := []uint64{leader.GetStoreId()} + s2 := []uint64{} + for _, p := range region.GetPeers() { + if p.GetId() == leader.GetId() { + continue + } + if len(s1) < 3 { + s1 = append(s1, p.GetStoreId()) + } else { + s2 = append(s2, p.GetStoreId()) + } + } + + // leader in majority, partition doesn't affect write/read + cluster.AddFilter(&PartitionFilter{ + s1: s1, + s2: s2, + }) + cluster.MustPut([]byte("k1"), []byte("v1")) + cluster.MustGet([]byte("k1"), []byte("v1")) + MustGetNone(cluster.engines[s2[0]], []byte("k1")) + MustGetNone(cluster.engines[s2[1]], []byte("k1")) + cluster.ClearFilters() + + // old leader in minority, new leader should be elected + s2 = append(s2, s1[2]) + s1 = s1[:2] + cluster.AddFilter(&PartitionFilter{ + s1: s1, + s2: s2, + }) + cluster.MustGet([]byte("k1"), []byte("v1")) + cluster.MustPut([]byte("k1"), []byte("changed")) + MustGetEqual(cluster.engines[s1[0]], []byte("k1"), []byte("v1")) + MustGetEqual(cluster.engines[s1[1]], []byte("k1"), []byte("v1")) + cluster.ClearFilters() + + // when partition heals, old leader should sync data + cluster.MustPut([]byte("k2"), []byte("v2")) + MustGetEqual(cluster.engines[s1[0]], []byte("k2"), []byte("v2")) + MustGetEqual(cluster.engines[s1[0]], []byte("k1"), []byte("changed")) +} + +func TestManyPartitionsOneClient2B(t *testing.T) { + // Test: partitions, one client (2B) ... + GenericTest(t, "2B", 1, false, false, true, -1, false, false) +} + +func TestManyPartitionsManyClients2B(t *testing.T) { + // Test: partitions, many clients (2B) ... + GenericTest(t, "2B", 5, false, false, true, -1, false, false) +} + +func TestPersistOneClient2B(t *testing.T) { + // Test: restarts, one client (2B) ... + GenericTest(t, "2B", 1, false, true, false, -1, false, false) +} + +func TestPersistConcurrent2B(t *testing.T) { + // Test: restarts, many clients (2B) ... + GenericTest(t, "2B", 5, false, true, false, -1, false, false) +} + +func TestPersistConcurrentUnreliable2B(t *testing.T) { + // Test: unreliable net, restarts, many clients (2B) ... + GenericTest(t, "2B", 5, true, true, false, -1, false, false) +} + +func TestPersistPartition2B(t *testing.T) { + // Test: restarts, partitions, many clients (2B) ... + GenericTest(t, "2B", 5, false, true, true, -1, false, false) +} + +func TestPersistPartitionUnreliable2B(t *testing.T) { + // Test: unreliable net, restarts, partitions, many clients (3A) ... + GenericTest(t, "2B", 5, true, true, true, -1, false, false) +} + +func TestOneSnapshot2C(t *testing.T) { + cfg := config.NewTestConfig() + cfg.RaftLogGcCountLimit = 10 + cluster := NewTestCluster(3, cfg) + cluster.Start() + defer cluster.Shutdown() + + cf := engine_util.CfLock + cluster.MustPutCF(cf, []byte("k1"), []byte("v1")) + cluster.MustPutCF(cf, []byte("k2"), []byte("v2")) + + MustGetCfEqual(cluster.engines[1], cf, []byte("k1"), []byte("v1")) + MustGetCfEqual(cluster.engines[1], cf, []byte("k2"), []byte("v2")) + + for _, engine := range cluster.engines { + state, err := meta.GetApplyState(engine.Kv, 1) + if err != nil { + t.Fatal(err) + } + if state.TruncatedState.Index != meta.RaftInitLogIndex || + state.TruncatedState.Term != meta.RaftInitLogTerm { + t.Fatalf("unexpected truncated state %v", state.TruncatedState) + } + } + + cluster.AddFilter( + &PartitionFilter{ + s1: []uint64{1}, + s2: []uint64{2, 3}, + }, + ) + + // write some data to trigger snapshot + for i := 100; i < 115; i++ { + cluster.MustPutCF(cf, []byte(fmt.Sprintf("k%d", i)), []byte(fmt.Sprintf("v%d", i))) + } + cluster.MustDeleteCF(cf, []byte("k2")) + time.Sleep(500 * time.Millisecond) + MustGetCfNone(cluster.engines[1], cf, []byte("k100")) + cluster.ClearFilters() + + // Now snapshot must applied on + MustGetCfEqual(cluster.engines[1], cf, []byte("k1"), []byte("v1")) + MustGetCfEqual(cluster.engines[1], cf, []byte("k100"), []byte("v100")) + MustGetCfNone(cluster.engines[1], cf, []byte("k2")) + + cluster.StopServer(1) + cluster.StartServer(1) + + MustGetCfEqual(cluster.engines[1], cf, []byte("k1"), []byte("v1")) + for _, engine := range cluster.engines { + state, err := meta.GetApplyState(engine.Kv, 1) + if err != nil { + t.Fatal(err) + } + truncatedIdx := state.TruncatedState.Index + appliedIdx := state.AppliedIndex + if appliedIdx-truncatedIdx > 2*uint64(cfg.RaftLogGcCountLimit) { + t.Fatalf("logs were not trimmed (%v - %v > 2*%v)", appliedIdx, truncatedIdx, cfg.RaftLogGcCountLimit) + } + } +} + +func TestSnapshotRecover2C(t *testing.T) { + // Test: restarts, snapshots, one client (2C) ... + GenericTest(t, "2C", 1, false, true, false, 100, false, false) +} + +func TestSnapshotRecoverManyClients2C(t *testing.T) { + // Test: restarts, snapshots, many clients (2C) ... + GenericTest(t, "2C", 20, false, true, false, 100, false, false) +} + +func TestSnapshotUnreliable2C(t *testing.T) { + // Test: unreliable net, snapshots, many clients (2C) ... + GenericTest(t, "2C", 5, true, false, false, 100, false, false) +} + +func TestSnapshotUnreliableRecover2C(t *testing.T) { + // Test: unreliable net, restarts, snapshots, many clients (2C) ... + GenericTest(t, "2C", 5, true, true, false, 100, false, false) +} + +func TestSnapshotUnreliableRecoverConcurrentPartition2C(t *testing.T) { + // Test: unreliable net, restarts, partitions, snapshots, many clients (2C) ... + GenericTest(t, "2C", 5, true, true, true, 100, false, false) +} + +func TestTransferLeader3B(t *testing.T) { + cfg := config.NewTestConfig() + cluster := NewTestCluster(5, cfg) + cluster.Start() + defer cluster.Shutdown() + + regionID := cluster.GetRegion([]byte("")).GetId() + cluster.MustTransferLeader(regionID, NewPeer(1, 1)) + cluster.MustTransferLeader(regionID, NewPeer(2, 2)) + cluster.MustTransferLeader(regionID, NewPeer(3, 3)) + cluster.MustTransferLeader(regionID, NewPeer(4, 4)) + cluster.MustTransferLeader(regionID, NewPeer(5, 5)) +} + +func TestBasicConfChange3B(t *testing.T) { + cfg := config.NewTestConfig() + cluster := NewTestCluster(5, cfg) + cluster.Start() + defer cluster.Shutdown() + + cluster.MustRemovePeer(1, NewPeer(2, 2)) + cluster.MustRemovePeer(1, NewPeer(3, 3)) + cluster.MustRemovePeer(1, NewPeer(4, 4)) + cluster.MustRemovePeer(1, NewPeer(5, 5)) + + // now region 1 only has peer: (1, 1) + cluster.MustPut([]byte("k1"), []byte("v1")) + MustGetNone(cluster.engines[2], []byte("k1")) + + // add peer (2, 2) to region 1 + cluster.MustAddPeer(1, NewPeer(2, 2)) + cluster.MustPut([]byte("k2"), []byte("v2")) + cluster.MustGet([]byte("k2"), []byte("v2")) + MustGetEqual(cluster.engines[2], []byte("k1"), []byte("v1")) + MustGetEqual(cluster.engines[2], []byte("k2"), []byte("v2")) + + epoch := cluster.GetRegion([]byte("k1")).GetRegionEpoch() + assert.True(t, epoch.GetConfVer() > 1) + + // peer 5 must not exist + MustGetNone(cluster.engines[5], []byte("k1")) + + // add peer (3, 3) to region 1 + cluster.MustAddPeer(1, NewPeer(3, 3)) + cluster.MustRemovePeer(1, NewPeer(2, 2)) + + cluster.MustPut([]byte("k3"), []byte("v3")) + cluster.MustGet([]byte("k3"), []byte("v3")) + MustGetEqual(cluster.engines[3], []byte("k1"), []byte("v1")) + MustGetEqual(cluster.engines[3], []byte("k2"), []byte("v2")) + MustGetEqual(cluster.engines[3], []byte("k3"), []byte("v3")) + + // peer 2 has nothing + MustGetNone(cluster.engines[2], []byte("k1")) + MustGetNone(cluster.engines[2], []byte("k2")) + + cluster.MustAddPeer(1, NewPeer(2, 2)) + MustGetEqual(cluster.engines[2], []byte("k1"), []byte("v1")) + MustGetEqual(cluster.engines[2], []byte("k2"), []byte("v2")) + MustGetEqual(cluster.engines[2], []byte("k3"), []byte("v3")) + + // remove peer (2, 2) from region 1 + cluster.MustRemovePeer(1, NewPeer(2, 2)) + // add peer (2, 4) to region 1 + cluster.MustAddPeer(1, NewPeer(2, 4)) + // remove peer (3, 3) from region 1 + cluster.MustRemovePeer(1, NewPeer(3, 3)) + + cluster.MustPut([]byte("k4"), []byte("v4")) + MustGetEqual(cluster.engines[2], []byte("k1"), []byte("v1")) + MustGetEqual(cluster.engines[2], []byte("k4"), []byte("v4")) + MustGetNone(cluster.engines[3], []byte("k1")) + MustGetNone(cluster.engines[3], []byte("k4")) +} + +func TestConfChangeRecover3B(t *testing.T) { + // Test: restarts, snapshots, conf change, one client (3B) ... + GenericTest(t, "3B", 1, false, true, false, -1, true, false) +} + +func TestConfChangeRecoverManyClients3B(t *testing.T) { + // Test: restarts, snapshots, conf change, many clients (3B) ... + GenericTest(t, "3B", 20, false, true, false, -1, true, false) +} + +func TestConfChangeUnreliable3B(t *testing.T) { + // Test: unreliable net, snapshots, conf change, many clients (3B) ... + GenericTest(t, "3B", 5, true, false, false, -1, true, false) +} + +func TestConfChangeUnreliableRecover3B(t *testing.T) { + // Test: unreliable net, restarts, snapshots, conf change, many clients (3B) ... + GenericTest(t, "3B", 5, true, true, false, -1, true, false) +} + +func TestConfChangeSnapshotUnreliableRecover3B(t *testing.T) { + // Test: unreliable net, restarts, snapshots, conf change, many clients (3B) ... + GenericTest(t, "3B", 5, true, true, false, 100, true, false) +} + +func TestConfChangeSnapshotUnreliableRecoverConcurrentPartition3B(t *testing.T) { + // Test: unreliable net, restarts, partitions, snapshots, conf change, many clients (3B) ... + GenericTest(t, "3B", 5, true, true, true, 100, true, false) +} + +func TestOneSplit3B(t *testing.T) { + cfg := config.NewTestConfig() + cfg.RegionMaxSize = 800 + cfg.RegionSplitSize = 500 + cluster := NewTestCluster(5, cfg) + cluster.Start() + defer cluster.Shutdown() + + cluster.MustPut([]byte("k1"), []byte("v1")) + cluster.MustPut([]byte("k2"), []byte("v2")) + + region := cluster.GetRegion([]byte("k1")) + region1 := cluster.GetRegion([]byte("k2")) + assert.Equal(t, region.GetId(), region1.GetId()) + + cluster.AddFilter( + &PartitionFilter{ + s1: []uint64{1, 2, 3, 4}, + s2: []uint64{5}, + }, + ) + + // write some data to trigger split + for i := 100; i < 200; i++ { + cluster.MustPut([]byte(fmt.Sprintf("k%d", i)), []byte(fmt.Sprintf("v%d", i))) + } + + time.Sleep(200 * time.Millisecond) + cluster.ClearFilters() + + left := cluster.GetRegion([]byte("k1")) + right := cluster.GetRegion([]byte("k2")) + + assert.NotEqual(t, left.GetId(), right.GetId()) + assert.True(t, bytes.Equal(region.GetStartKey(), left.GetStartKey())) + assert.True(t, bytes.Equal(left.GetEndKey(), right.GetStartKey())) + assert.True(t, bytes.Equal(right.GetEndKey(), region.GetEndKey())) + + req := NewRequest(left.GetId(), left.GetRegionEpoch(), []*raft_cmdpb.Request{NewGetCfCmd(engine_util.CfDefault, []byte("k2"))}) + resp, _ := cluster.CallCommandOnLeader(&req, time.Second) + assert.NotNil(t, resp.GetHeader().GetError()) + assert.NotNil(t, resp.GetHeader().GetError().GetKeyNotInRegion()) + + MustGetEqual(cluster.engines[5], []byte("k100"), []byte("v100")) +} + +func TestSplitRecover3B(t *testing.T) { + // Test: restarts, snapshots, conf change, one client (3B) ... + GenericTest(t, "3B", 1, false, true, false, -1, false, true) +} + +func TestSplitRecoverManyClients3B(t *testing.T) { + // Test: restarts, snapshots, conf change, many clients (3B) ... + GenericTest(t, "3B", 20, false, true, false, -1, false, true) +} + +func TestSplitUnreliable3B(t *testing.T) { + // Test: unreliable net, snapshots, conf change, many clients (3B) ... + GenericTest(t, "3B", 5, true, false, false, -1, false, true) +} + +func TestSplitUnreliableRecover3B(t *testing.T) { + // Test: unreliable net, restarts, snapshots, conf change, many clients (3B) ... + GenericTest(t, "3B", 5, true, true, false, -1, false, true) +} + +func TestSplitConfChangeSnapshotUnreliableRecover3B(t *testing.T) { + // Test: unreliable net, restarts, snapshots, conf change, many clients (3B) ... + GenericTest(t, "3B", 5, true, true, false, 100, true, true) +} + +func TestSplitConfChangeSnapshotUnreliableRecoverConcurrentPartition3B(t *testing.T) { + // Test: unreliable net, restarts, partitions, snapshots, conf change, many clients (3B) ... + GenericTest(t, "3B", 5, true, true, true, 100, true, true) +} diff --git a/kv/test_raftstore/utils.go b/kv/test_raftstore/utils.go new file mode 100644 index 00000000..dc4ac953 --- /dev/null +++ b/kv/test_raftstore/utils.go @@ -0,0 +1,137 @@ +package test_raftstore + +import ( + "bytes" + "encoding/hex" + "fmt" + "time" + + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/log" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/raft_cmdpb" +) + +func SleepMS(ms int64) { + time.Sleep(time.Duration(ms) * time.Millisecond) +} + +func NewPeer(storeID, peerID uint64) *metapb.Peer { + peer := &metapb.Peer{ + StoreId: storeID, + Id: peerID, + } + return peer +} + +func NewBaseRequest(regionID uint64, epoch *metapb.RegionEpoch) raft_cmdpb.RaftCmdRequest { + req := raft_cmdpb.RaftCmdRequest{} + req.Header = &raft_cmdpb.RaftRequestHeader{RegionId: regionID, RegionEpoch: epoch} + return req +} + +func NewRequest(regionID uint64, epoch *metapb.RegionEpoch, requests []*raft_cmdpb.Request) raft_cmdpb.RaftCmdRequest { + req := NewBaseRequest(regionID, epoch) + req.Requests = requests + return req +} + +func NewAdminRequest(regionID uint64, epoch *metapb.RegionEpoch, request *raft_cmdpb.AdminRequest) *raft_cmdpb.RaftCmdRequest { + req := NewBaseRequest(regionID, epoch) + req.AdminRequest = request + return &req +} + +func NewPutCfCmd(cf string, key, value []byte) *raft_cmdpb.Request { + cmd := &raft_cmdpb.Request{} + cmd.CmdType = raft_cmdpb.CmdType_Put + cmd.Put = &raft_cmdpb.PutRequest{Key: key, Value: value, Cf: cf} + return cmd +} + +func NewGetCfCmd(cf string, key []byte) *raft_cmdpb.Request { + get := &raft_cmdpb.GetRequest{ + Cf: cf, + Key: key, + } + cmd := &raft_cmdpb.Request{ + CmdType: raft_cmdpb.CmdType_Get, + Get: get, + } + return cmd +} + +func NewDeleteCfCmd(cf string, key []byte) *raft_cmdpb.Request { + delete := &raft_cmdpb.DeleteRequest{ + Cf: cf, + Key: key, + } + cmd := &raft_cmdpb.Request{ + CmdType: raft_cmdpb.CmdType_Delete, + Delete: delete, + } + return cmd +} + +func NewSnapCmd() *raft_cmdpb.Request { + cmd := &raft_cmdpb.Request{ + CmdType: raft_cmdpb.CmdType_Snap, + Snap: &raft_cmdpb.SnapRequest{}, + } + return cmd +} + +func NewTransferLeaderCmd(peer *metapb.Peer) *raft_cmdpb.AdminRequest { + transferLeader := raft_cmdpb.TransferLeaderRequest{Peer: peer} + cmd := &raft_cmdpb.AdminRequest{ + CmdType: raft_cmdpb.AdminCmdType_TransferLeader, + TransferLeader: &transferLeader, + } + return cmd +} + +func MustGetCf(engine *engine_util.Engines, cf string, key []byte, value []byte) { + for i := 0; i < 300; i++ { + val, err := engine_util.GetCF(engine.Kv, cf, key) + if err == nil && (value == nil || bytes.Compare(val, value) == 0) { + return + } + SleepMS(20) + } + panic(fmt.Sprintf("can't get value %s for key %s", hex.EncodeToString(value), hex.EncodeToString(key))) +} + +func MustGetCfEqual(engine *engine_util.Engines, cf string, key []byte, value []byte) { + MustGetCf(engine, cf, key, value) +} + +func MustGetEqual(engine *engine_util.Engines, key []byte, value []byte) { + MustGetCf(engine, engine_util.CfDefault, key, value) +} + +func MustGetCfNone(engine *engine_util.Engines, cf string, key []byte) { + var val []byte + var err error + for i := 0; i < 300; i++ { + val, err = engine_util.GetCF(engine.Kv, cf, key) + if err == badger.ErrKeyNotFound { + return + } + SleepMS(20) + } + panic(fmt.Sprintf("get value %s for key %s", hex.EncodeToString(val), hex.EncodeToString(key))) +} + +func MustGetNone(engine *engine_util.Engines, key []byte) { + MustGetCfNone(engine, engine_util.CfDefault, key) +} + +func NewTestCluster(count int, cfg *config.Config) *Cluster { + log.SetLevelByString(cfg.LogLevel) + log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds | log.Lshortfile) + schedulerClient := NewMockSchedulerClient(0, uint64(count)+1) + simulator := NewNodeSimulator(schedulerClient) + return NewCluster(count, schedulerClient, simulator, cfg) +} diff --git a/kv/transaction/commands4b_test.go b/kv/transaction/commands4b_test.go new file mode 100644 index 00000000..c33b896a --- /dev/null +++ b/kv/transaction/commands4b_test.go @@ -0,0 +1,596 @@ +package transaction + +import ( + "testing" + + "github.com/pingcap-incubator/tinykv/kv/transaction/mvcc" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/stretchr/testify/assert" +) + +// TestGetValue4B getting a value works in the simple case. +func TestGetValue4B(t *testing.T) { + builder := newBuilder(t) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{99}, ts: 50, value: []byte{42}}, + {cf: engine_util.CfWrite, key: []byte{99}, ts: 54, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 50}}, + }) + + var req kvrpcpb.GetRequest + req.Key = []byte{99} + req.Version = mvcc.TsMax + resp := builder.runOneRequest(&req).(*kvrpcpb.GetResponse) + + assert.Nil(t, resp.RegionError) + assert.Nil(t, resp.Error) + assert.Equal(t, []byte{42}, resp.Value) +} + +// TestGetValueTs4B getting a value works with different timestamps. +func TestGetValueTs4B(t *testing.T) { + builder := newBuilder(t) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{99}, ts: 50, value: []byte{42}}, + {cf: engine_util.CfWrite, key: []byte{99}, ts: 54, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 50}}, + }) + + var req0 kvrpcpb.GetRequest + req0.Key = []byte{99} + req0.Version = 100 + var req1 kvrpcpb.GetRequest + req1.Key = []byte{99} + req1.Version = 100 + var req2 kvrpcpb.GetRequest + req2.Key = []byte{99} + req2.Version = 100 + + resps := builder.runRequests(&req0, &req1, &req2) + resp0 := resps[0].(*kvrpcpb.GetResponse) + resp1 := resps[1].(*kvrpcpb.GetResponse) + resp2 := resps[2].(*kvrpcpb.GetResponse) + assert.Nil(t, resp0.RegionError) + assert.Nil(t, resp0.Error) + assert.Equal(t, []byte{42}, resp0.Value) + assert.Nil(t, resp1.RegionError) + assert.Nil(t, resp1.Error) + assert.Equal(t, []byte{42}, resp1.Value) + assert.Nil(t, resp2.RegionError) + assert.Nil(t, resp2.Error) + assert.Equal(t, []byte{42}, resp2.Value) +} + +// TestGetEmpty4B tests that get on an empty DB. +func TestGetEmpty4B(t *testing.T) { + builder := newBuilder(t) + + var req kvrpcpb.GetRequest + req.Key = []byte{100} + req.Version = mvcc.TsMax + resp := builder.runOneRequest(&req).(*kvrpcpb.GetResponse) + + assert.Nil(t, resp.RegionError) + assert.Nil(t, resp.Error) + assert.Equal(t, []byte(nil), resp.Value) +} + +// TestGetNone4B tests that getting a missing key works. +func TestGetNone4B(t *testing.T) { + builder := newBuilder(t) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{99}, ts: 50, value: []byte{42}}, + {cf: engine_util.CfWrite, key: []byte{99}, ts: 54, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 50}}, + {cf: engine_util.CfDefault, key: []byte{101}, ts: 50, value: []byte{42}}, + {cf: engine_util.CfWrite, key: []byte{101}, ts: 54, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 50}}, + }) + + var req kvrpcpb.GetRequest + req.Key = []byte{100} + req.Version = mvcc.TsMax + + resp := builder.runOneRequest(&req).(*kvrpcpb.GetResponse) + assert.Nil(t, resp.RegionError) + assert.Nil(t, resp.Error) + assert.Equal(t, []byte(nil), resp.Value) +} + +// TestGetVersions4B tests we get the correct value when there are multiple versions. +func TestGetVersions4B(t *testing.T) { + builder := newBuilder(t) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{99}, ts: 50, value: []byte{42}}, + {cf: engine_util.CfWrite, key: []byte{99}, ts: 54, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 50}}, + {cf: engine_util.CfDefault, key: []byte{99}, ts: 60, value: []byte{43}}, + {cf: engine_util.CfWrite, key: []byte{99}, ts: 66, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 60}}, + {cf: engine_util.CfDefault, key: []byte{99}, ts: 120, value: []byte{44}}, + {cf: engine_util.CfWrite, key: []byte{99}, ts: 122, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 120}}, + }) + + var req0 kvrpcpb.GetRequest + req0.Key = []byte{99} + req0.Version = 40 + var req1 kvrpcpb.GetRequest + req1.Key = []byte{99} + req1.Version = 56 + var req2 kvrpcpb.GetRequest + req2.Key = []byte{99} + req2.Version = 60 + var req3 kvrpcpb.GetRequest + req3.Key = []byte{99} + req3.Version = 65 + var req4 kvrpcpb.GetRequest + req4.Key = []byte{99} + req4.Version = 66 + var req5 kvrpcpb.GetRequest + req5.Key = []byte{99} + req5.Version = 100 + + resps := builder.runRequests(&req0, &req1, &req2, &req3, &req4, &req5) + resp0 := resps[0].(*kvrpcpb.GetResponse) + resp1 := resps[1].(*kvrpcpb.GetResponse) + resp2 := resps[2].(*kvrpcpb.GetResponse) + resp3 := resps[3].(*kvrpcpb.GetResponse) + resp4 := resps[4].(*kvrpcpb.GetResponse) + resp5 := resps[5].(*kvrpcpb.GetResponse) + + assert.Nil(t, resp0.RegionError) + assert.Nil(t, resp0.Error) + assert.Equal(t, []byte(nil), resp0.Value) + assert.Nil(t, resp1.RegionError) + assert.Nil(t, resp1.Error) + assert.Equal(t, []byte{42}, resp1.Value) + assert.Nil(t, resp2.RegionError) + assert.Nil(t, resp2.Error) + assert.Equal(t, []byte{42}, resp2.Value) + assert.Nil(t, resp3.RegionError) + assert.Nil(t, resp3.Error) + assert.Equal(t, []byte{42}, resp3.Value) + assert.Nil(t, resp4.RegionError) + assert.Nil(t, resp4.Error) + assert.Equal(t, []byte{43}, resp4.Value) + assert.Nil(t, resp5.RegionError) + assert.Nil(t, resp5.Error) + assert.Equal(t, []byte{43}, resp5.Value) +} + +// TestGetDeleted4B tests we get the correct value when there are multiple versions, including a deletion. +func TestGetDeleted4B(t *testing.T) { + builder := newBuilder(t) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{99}, ts: 50, value: []byte{42}}, + {cf: engine_util.CfWrite, key: []byte{99}, ts: 54, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 50}}, + {cf: engine_util.CfDefault, key: []byte{99}, ts: 60, value: nil}, + {cf: engine_util.CfWrite, key: []byte{99}, ts: 66, value: []byte{2, 0, 0, 0, 0, 0, 0, 0, 60}}, + {cf: engine_util.CfDefault, key: []byte{99}, ts: 120, value: []byte{44}}, + {cf: engine_util.CfWrite, key: []byte{99}, ts: 122, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 120}}, + }) + + var req0 kvrpcpb.GetRequest + req0.Key = []byte{99} + req0.Version = 54 + var req1 kvrpcpb.GetRequest + req1.Key = []byte{99} + req1.Version = 60 + var req2 kvrpcpb.GetRequest + req2.Key = []byte{99} + req2.Version = 65 + var req3 kvrpcpb.GetRequest + req3.Key = []byte{99} + req3.Version = 66 + var req4 kvrpcpb.GetRequest + req4.Key = []byte{99} + req4.Version = 67 + var req5 kvrpcpb.GetRequest + req5.Key = []byte{99} + req5.Version = 122 + + resps := builder.runRequests(&req0, &req1, &req2, &req3, &req4, &req5) + resp0 := resps[0].(*kvrpcpb.GetResponse) + resp1 := resps[1].(*kvrpcpb.GetResponse) + resp2 := resps[2].(*kvrpcpb.GetResponse) + resp3 := resps[3].(*kvrpcpb.GetResponse) + resp4 := resps[4].(*kvrpcpb.GetResponse) + resp5 := resps[5].(*kvrpcpb.GetResponse) + + assert.Nil(t, resp0.RegionError) + assert.Nil(t, resp0.Error) + assert.Equal(t, []byte{42}, resp0.Value) + assert.Nil(t, resp1.RegionError) + assert.Nil(t, resp1.Error) + assert.Equal(t, []byte{42}, resp1.Value) + assert.Nil(t, resp2.RegionError) + assert.Nil(t, resp2.Error) + assert.Equal(t, []byte{42}, resp2.Value) + assert.Nil(t, resp3.RegionError) + assert.Nil(t, resp3.Error) + assert.Equal(t, []byte(nil), resp3.Value) + assert.Nil(t, resp4.RegionError) + assert.Nil(t, resp4.Error) + assert.Equal(t, []byte(nil), resp4.Value) + assert.Nil(t, resp5.RegionError) + assert.Nil(t, resp5.Error) + assert.Equal(t, []byte{44}, resp5.Value) +} + +// TestGetLocked4B tests getting a value when it is locked by another transaction. +func TestGetLocked4B(t *testing.T) { + builder := newBuilder(t) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{99}, ts: 50, value: []byte{42}}, + {cf: engine_util.CfWrite, key: []byte{99}, ts: 54, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 50}}, + {cf: engine_util.CfLock, key: []byte{99}, value: []byte{99, 1, 0, 0, 0, 0, 0, 0, 0, 200, 0, 0, 0, 0, 0, 0, 0, 0}}, + }) + + var req0 kvrpcpb.GetRequest + req0.Key = []byte{99} + req0.Version = 55 + var req1 kvrpcpb.GetRequest + req1.Key = []byte{99} + req1.Version = 300 + + resps := builder.runRequests(&req0, &req1) + resp0 := resps[0].(*kvrpcpb.GetResponse) + resp1 := resps[1].(*kvrpcpb.GetResponse) + + assert.Nil(t, resp0.RegionError) + assert.Nil(t, resp0.Error) + assert.Equal(t, []byte{42}, resp0.Value) + + assert.Nil(t, resp1.RegionError) + lockInfo := resp1.Error.Locked + assert.Equal(t, []byte{99}, lockInfo.Key) + assert.Equal(t, []byte{99}, lockInfo.PrimaryLock) + assert.Equal(t, uint64(200), lockInfo.LockVersion) +} + +// TestEmptyPrewrite4B tests that a Prewrite with no mutations succeeds and changes nothing. +func TestEmptyPrewrite4B(t *testing.T) { + builder := newBuilder(t) + cmd := builder.prewriteRequest() + resp := builder.runOneRequest(cmd).(*kvrpcpb.PrewriteResponse) + + assert.Empty(t, resp.Errors) + assert.Nil(t, resp.RegionError) + builder.assertLen(engine_util.CfDefault, 0) +} + +// TestSinglePrewrite4B tests a prewrite with one write, it should succeed, we test all the expected values. +func TestSinglePrewrite4B(t *testing.T) { + builder := newBuilder(t) + cmd := builder.prewriteRequest(mutation(3, []byte{42}, kvrpcpb.Op_Put)) + cmd.LockTtl = 1000 + resp := builder.runOneRequest(cmd).(*kvrpcpb.PrewriteResponse) + + assert.Empty(t, resp.Errors) + assert.Nil(t, resp.RegionError) + builder.assertLens(1, 1, 0) + builder.assert([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, value: []byte{42}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{1, 1, 0, 0, 0, 0, 0, 0, 0, builder.ts(), 0, 0, 0, 0, 0, 0, 3, 232}}, + }) +} + +// TestPrewriteLocked4B tests that two prewrites to the same key causes a lock error. +func TestPrewriteLocked4B(t *testing.T) { + builder := newBuilder(t) + cmd := builder.prewriteRequest(mutation(3, []byte{42}, kvrpcpb.Op_Put)) + cmd2 := builder.prewriteRequest(mutation(3, []byte{53}, kvrpcpb.Op_Put)) + resps := builder.runRequests(cmd, cmd2) + + assert.Empty(t, resps[0].(*kvrpcpb.PrewriteResponse).Errors) + assert.Nil(t, resps[0].(*kvrpcpb.PrewriteResponse).RegionError) + assert.Equal(t, len(resps[1].(*kvrpcpb.PrewriteResponse).Errors), 1) + assert.Nil(t, resps[1].(*kvrpcpb.PrewriteResponse).RegionError) + builder.assertLens(1, 1, 0) + builder.assert([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: 100, value: []byte{42}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{1, 1, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0}}, + }) +} + +// TestPrewriteWritten4B tests an attempted prewrite with a write conflict. +func TestPrewriteWritten4B(t *testing.T) { + builder := newBuilder(t) + cmd := builder.prewriteRequest(mutation(3, []byte{42}, kvrpcpb.Op_Put)) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: 80, value: []byte{5}}, + {cf: engine_util.CfWrite, key: []byte{3}, ts: 101, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.PrewriteResponse) + + assert.Equal(t, 1, len(resp.Errors)) + assert.NotNil(t, resp.Errors[0].Conflict) + assert.Nil(t, resp.RegionError) + builder.assertLens(1, 0, 1) + + builder.assert([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: 80, value: []byte{5}}, + }) +} + +// TestPrewriteWrittenNoConflict4B tests an attempted prewrite with a write already present, but no conflict. +func TestPrewriteWrittenNoConflict4B(t *testing.T) { + builder := newBuilder(t) + cmd := builder.prewriteRequest(mutation(3, []byte{42}, kvrpcpb.Op_Put)) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: 80, value: []byte{5}}, + {cf: engine_util.CfWrite, key: []byte{3}, ts: 90, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.PrewriteResponse) + + assert.Empty(t, resp.Errors) + assert.Nil(t, resp.RegionError) + assert.Nil(t, resp.RegionError) + builder.assertLens(2, 1, 1) + + builder.assert([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, value: []byte{5}, ts: 80}, + {cf: engine_util.CfDefault, key: []byte{3}, value: []byte{42}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{1, 1, 0, 0, 0, 0, 0, 0, 0, builder.ts(), 0, 0, 0, 0, 0, 0, 0, 0}}, + }) +} + +// TestMultiplePrewrites4B tests that multiple prewrites to different keys succeeds. +func TestMultiplePrewrites4B(t *testing.T) { + builder := newBuilder(t) + cmd := builder.prewriteRequest(mutation(3, []byte{42}, kvrpcpb.Op_Put)) + cmd2 := builder.prewriteRequest(mutation(4, []byte{53}, kvrpcpb.Op_Put)) + resps := builder.runRequests(cmd, cmd2) + + assert.Empty(t, resps[0].(*kvrpcpb.PrewriteResponse).Errors) + assert.Nil(t, resps[0].(*kvrpcpb.PrewriteResponse).RegionError) + assert.Empty(t, resps[1].(*kvrpcpb.PrewriteResponse).Errors) + assert.Nil(t, resps[1].(*kvrpcpb.PrewriteResponse).RegionError) + builder.assertLens(2, 2, 0) + + builder.assert([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: 100, value: []byte{42}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{1, 1, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0}}, + {cf: engine_util.CfDefault, key: []byte{4}, ts: 101, value: []byte{53}}, + {cf: engine_util.CfLock, key: []byte{4}, value: []byte{1, 1, 0, 0, 0, 0, 0, 0, 0, 101, 0, 0, 0, 0, 0, 0, 0, 0}}, + }) +} + +// TestPrewriteOverwrite4B tests that two writes in the same prewrite succeed and we see the second write. +func TestPrewriteOverwrite4B(t *testing.T) { + builder := newBuilder(t) + cmd := builder.prewriteRequest(mutation(3, []byte{42}, kvrpcpb.Op_Put), mutation(3, []byte{45}, kvrpcpb.Op_Put)) + resp := builder.runOneRequest(cmd).(*kvrpcpb.PrewriteResponse) + + assert.Empty(t, resp.Errors) + assert.Nil(t, resp.RegionError) + builder.assertLens(1, 1, 0) + + builder.assert([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, value: []byte{45}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{1, 1, 0, 0, 0, 0, 0, 0, 0, builder.ts(), 0, 0, 0, 0, 0, 0, 0, 0}}, + }) +} + +// TestPrewriteMultiple4B tests that a prewrite with multiple mutations succeeds. +func TestPrewriteMultiple4B(t *testing.T) { + builder := newBuilder(t) + cmd := builder.prewriteRequest( + mutation(3, []byte{42}, kvrpcpb.Op_Put), + mutation(4, []byte{43}, kvrpcpb.Op_Put), + mutation(5, []byte{44}, kvrpcpb.Op_Put), + mutation(4, nil, kvrpcpb.Op_Del), + mutation(4, []byte{1, 3, 5}, kvrpcpb.Op_Put), + mutation(255, []byte{45}, kvrpcpb.Op_Put), + ) + resp := builder.runOneRequest(cmd).(*kvrpcpb.PrewriteResponse) + + assert.Empty(t, resp.Errors) + assert.Nil(t, resp.RegionError) + builder.assertLens(4, 4, 0) + + builder.assert([]kv{ + {cf: engine_util.CfDefault, key: []byte{4}, value: []byte{1, 3, 5}}, + }) +} + +// TestEmptyCommit4B tests a commit request with no keys to commit. +func TestEmptyCommit4B(t *testing.T) { + builder := newBuilder(t) + cmd := builder.commitRequest([][]byte{}...) + resp := builder.runOneRequest(cmd).(*kvrpcpb.CommitResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(0, 0, 0) +} + +// TestSimpleCommit4B tests committing a single key. +func TestSingleCommit4B(t *testing.T) { + builder := newBuilder(t) + cmd := builder.commitRequest([]byte{3}) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, value: []byte{42}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, builder.ts(), 0, 0, 0, 0, 0, 0, 0, 0}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.CommitResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(1, 0, 1) + builder.assert([]kv{ + {cf: engine_util.CfWrite, key: []byte{3}, ts: 110, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, builder.ts()}}, + {cf: engine_util.CfDefault, key: []byte{3}}, + }) +} + +// TestCommitOverwrite4B tests committing where there is already a write. +func TestCommitOverwrite4B(t *testing.T) { + builder := newBuilder(t) + cmd := builder.commitRequest([]byte{3}) + builder.init([]kv{ + // A previous, committed write. + {cf: engine_util.CfDefault, key: []byte{3}, ts: 80, value: []byte{15}}, + {cf: engine_util.CfWrite, key: []byte{3}, ts: 84, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + + // The current, pre-written write. + {cf: engine_util.CfDefault, key: []byte{3}, value: []byte{42}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{1, 1, 0, 0, 0, 0, 0, 0, 0, builder.ts(), 0, 0, 0, 0, 0, 0, 0, 0}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.CommitResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(2, 0, 2) + builder.assert([]kv{ + {cf: engine_util.CfWrite, key: []byte{3}, ts: 110, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, builder.ts()}}, + {cf: engine_util.CfDefault, key: []byte{3}}, + }) +} + +// TestCommitMultipleKeys4B tests committing multiple keys in the same commit. Also puts some other data in the DB and test +// that it is unchanged. +func TestCommitMultipleKeys4B(t *testing.T) { + builder := newBuilder(t) + cmd := builder.commitRequest([]byte{3}, []byte{12, 4, 0}, []byte{15}) + builder.init([]kv{ + // Current, pre-written. + {cf: engine_util.CfDefault, key: []byte{3}, value: []byte{42}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{1, 1, 0, 0, 0, 0, 0, 0, 0, builder.ts(), 0, 0, 0, 0, 0, 0, 0, 0}}, + {cf: engine_util.CfDefault, key: []byte{12, 4, 0}, value: []byte{1, 1, 0, 0, 1, 5}}, + {cf: engine_util.CfLock, key: []byte{12, 4, 0}, value: []byte{1, 1, 0, 0, 0, 0, 0, 0, 0, builder.ts(), 0, 0, 0, 0, 0, 0, 0, 0}}, + {cf: engine_util.CfDefault, key: []byte{15}, value: []byte{0}}, + {cf: engine_util.CfLock, key: []byte{15}, value: []byte{1, 1, 0, 0, 0, 0, 0, 0, 0, builder.ts(), 0, 0, 0, 0, 0, 0, 0, 0}}, + + // Some committed data. + {cf: engine_util.CfDefault, key: []byte{4}, ts: 80, value: []byte{15}}, + {cf: engine_util.CfWrite, key: []byte{4}, ts: 84, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + {cf: engine_util.CfDefault, key: []byte{3, 0}, ts: 80, value: []byte{150}}, + {cf: engine_util.CfWrite, key: []byte{3, 0}, ts: 84, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + + // Another pre-written transaction. + {cf: engine_util.CfDefault, key: []byte{2}, ts: 99, value: []byte{0, 0, 0, 8}}, + {cf: engine_util.CfLock, key: []byte{2}, value: []byte{1, 2, 0, 0, 0, 0, 0, 0, 0, 99, 0, 0, 0, 0, 0, 0, 0, 0}}, + {cf: engine_util.CfDefault, key: []byte{43, 6}, ts: 99, value: []byte{1, 1, 0, 0, 1, 5}}, + {cf: engine_util.CfLock, key: []byte{43, 6}, value: []byte{1, 2, 0, 0, 0, 0, 0, 0, 0, 99, 0, 0, 0, 0, 0, 0, 0, 0}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.CommitResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(7, 2, 5) + builder.assert([]kv{ + // The newly committed data. + {cf: engine_util.CfWrite, key: []byte{3}, ts: 110, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, builder.ts()}}, + {cf: engine_util.CfWrite, key: []byte{12, 4, 0}, ts: 110, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, builder.ts()}}, + {cf: engine_util.CfWrite, key: []byte{15}, ts: 110, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, builder.ts()}}, + + // Committed data is untouched. + {cf: engine_util.CfDefault, key: []byte{4}, ts: 80}, + {cf: engine_util.CfWrite, key: []byte{4}, ts: 84}, + {cf: engine_util.CfDefault, key: []byte{3, 0}, ts: 80}, + {cf: engine_util.CfWrite, key: []byte{3, 0}, ts: 84}, + + // Pre-written data is untouched. + {cf: engine_util.CfDefault, key: []byte{2}, ts: 99}, + {cf: engine_util.CfLock, key: []byte{2}}, + {cf: engine_util.CfDefault, key: []byte{43, 6}, ts: 99}, + {cf: engine_util.CfLock, key: []byte{43, 6}}, + }) +} + +// TestRecommitKey4B tests committing the same key multiple times in one commit. +func TestRecommitKey4B(t *testing.T) { + builder := newBuilder(t) + cmd := builder.commitRequest([]byte{3}, []byte{3}) + builder.init([]kv{ + // The current, pre-written write. + {cf: engine_util.CfDefault, key: []byte{3}, value: []byte{42}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{1, 1, 0, 0, 0, 0, 0, 0, 0, builder.ts(), 0, 0, 0, 0, 0, 0, 0, 0}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.CommitResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(1, 0, 1) + builder.assert([]kv{ + {cf: engine_util.CfWrite, key: []byte{3}, ts: 110, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, builder.ts()}}, + {cf: engine_util.CfDefault, key: []byte{3}}, + }) +} + +// TestCommitConflictRollback4B tests committing a rolled back transaction. +func TestCommitConflictRollback4B(t *testing.T) { + builder := newBuilder(t) + cmd := builder.commitRequest([]byte{3}) + builder.init([]kv{ + {cf: engine_util.CfWrite, key: []byte{3}, ts: 110, value: []byte{3, 0, 0, 0, 0, 0, 0, 0, builder.ts()}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.CommitResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(0, 0, 1) + builder.assert([]kv{ + {cf: engine_util.CfWrite, key: []byte{3}, ts: 110}, + }) +} + +// TestCommitConflictRace4B tests committing where a key is pre-written by a different transaction. +func TestCommitConflictRace4B(t *testing.T) { + builder := newBuilder(t) + cmd := builder.commitRequest([]byte{3}) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: 90, value: []byte{110}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{1, 3, 0, 0, 0, 0, 0, 0, 0, 90, 0, 0, 0, 0, 0, 0, 0, 0}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.CommitResponse) + + assert.NotNil(t, resp.Error.Retryable) + assert.Nil(t, resp.RegionError) + builder.assertLens(1, 1, 0) + builder.assert([]kv{ + {cf: engine_util.CfLock, key: []byte{3}}, + {cf: engine_util.CfDefault, key: []byte{3}, ts: 90}, + }) +} + +// TestCommitConflictRepeat4B tests recommitting a transaction (i.e., the same commit request is received twice). +func TestCommitConflictRepeat4B(t *testing.T) { + builder := newBuilder(t) + cmd := builder.commitRequest([]byte{3}) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, value: []byte{42}}, + {cf: engine_util.CfWrite, key: []byte{3}, ts: 110, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, builder.ts()}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.CommitResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(1, 0, 1) + builder.assert([]kv{ + {cf: engine_util.CfWrite, key: []byte{3}, ts: 110}, + {cf: engine_util.CfDefault, key: []byte{3}}, + }) +} + +// TestCommitMissingPrewrite4a tests committing a transaction which was not prewritten (i.e., a request was lost, but +// the commit request was not). +func TestCommitMissingPrewrite4a(t *testing.T) { + builder := newBuilder(t) + cmd := builder.commitRequest([]byte{3}) + builder.init([]kv{ + // Some committed data. + {cf: engine_util.CfDefault, key: []byte{4}, ts: 80, value: []byte{15}}, + {cf: engine_util.CfWrite, key: []byte{4}, ts: 84, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + {cf: engine_util.CfDefault, key: []byte{3, 0}, ts: 80, value: []byte{150}}, + {cf: engine_util.CfWrite, key: []byte{3, 0}, ts: 84, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + // Note no prewrite. + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.CommitResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(2, 0, 2) + builder.assert([]kv{ + {cf: engine_util.CfDefault, key: []byte{4}, ts: 80}, + {cf: engine_util.CfWrite, key: []byte{4}, ts: 84}, + {cf: engine_util.CfDefault, key: []byte{3, 0}, ts: 80}, + {cf: engine_util.CfWrite, key: []byte{3, 0}, ts: 84}, + }) +} diff --git a/kv/transaction/commands4c_test.go b/kv/transaction/commands4c_test.go new file mode 100644 index 00000000..3130a945 --- /dev/null +++ b/kv/transaction/commands4c_test.go @@ -0,0 +1,484 @@ +package transaction + +import ( + "encoding/binary" + "testing" + + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/stretchr/testify/assert" +) + +// TestEmptyRollback4C tests a rollback with no keys. +func TestEmptyRollback4C(t *testing.T) { + builder := newBuilder(t) + cmd := builder.rollbackRequest([][]byte{}...) + resp := builder.runOneRequest(cmd).(*kvrpcpb.BatchRollbackResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(0, 0, 0) +} + +// TestRollback4C tests a successful rollback. +func TestRollback4C(t *testing.T) { + builder := newBuilder(t) + cmd := builder.rollbackRequest([]byte{3}) + + builder.init([]kv{ + // See TestSinglePrewrite. + {cf: engine_util.CfDefault, key: []byte{3}, value: []byte{42}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{1, 1, 0, 0, 0, 0, 0, 0, 0, builder.ts(), 0, 0, 0, 0, 0, 0, 0, 0}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.BatchRollbackResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(0, 0, 1) + builder.assert([]kv{ + {cf: engine_util.CfWrite, key: []byte{3}, value: []byte{3, 0, 0, 0, 0, 0, 0, 0, builder.ts()}}, + }) +} + +// TestRollbackDuplicateKeys4C tests a rollback which rolls back multiple keys, including one duplicated key. +func TestRollbackDuplicateKeys4C(t *testing.T) { + builder := newBuilder(t) + cmd := builder.rollbackRequest([]byte{3}, []byte{15}, []byte{3}) + + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, value: []byte{42}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{1, 1, 0, 0, 0, 0, 0, 0, 0, builder.ts(), 0, 0, 0, 0, 0, 0, 0, 0}}, + {cf: engine_util.CfDefault, key: []byte{15}, value: []byte{0}}, + {cf: engine_util.CfLock, key: []byte{15}, value: []byte{1, 1, 0, 0, 0, 0, 0, 0, 0, builder.ts(), 0, 0, 0, 0, 0, 0, 0, 0}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.BatchRollbackResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(0, 0, 2) + builder.assert([]kv{ + {cf: engine_util.CfWrite, key: []byte{3}, value: []byte{3, 0, 0, 0, 0, 0, 0, 0, builder.ts()}}, + {cf: engine_util.CfWrite, key: []byte{15}, value: []byte{3, 0, 0, 0, 0, 0, 0, 0, builder.ts()}}, + }) +} + +// TestRollbackMissingPrewrite4C tests trying to roll back a missing prewrite. +func TestRollbackMissingPrewrite4C(t *testing.T) { + builder := newBuilder(t) + cmd := builder.rollbackRequest([]byte{3}) + resp := builder.runOneRequest(cmd).(*kvrpcpb.BatchRollbackResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(0, 0, 1) + builder.assert([]kv{ + {cf: engine_util.CfWrite, key: []byte{3}, value: []byte{3, 0, 0, 0, 0, 0, 0, 0, builder.ts()}}, + }) +} + +// TestRollbackCommitted4C tests trying to roll back a transaction which is already committed. +func TestRollbackCommitted4C(t *testing.T) { + builder := newBuilder(t) + cmd := builder.rollbackRequest([]byte{3}) + + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, value: []byte{42}}, + {cf: engine_util.CfWrite, key: []byte{3}, ts: 110, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, builder.ts()}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.BatchRollbackResponse) + + assert.NotNil(t, resp.Error.Abort) + assert.Nil(t, resp.RegionError) + builder.assertLens(1, 0, 1) + builder.assert([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}}, + {cf: engine_util.CfWrite, key: []byte{3}, ts: 110}, + }) +} + +// TestRollbackDuplicate4C tests trying to roll back a transaction which has already been rolled back. +func TestRollbackDuplicate4C(t *testing.T) { + builder := newBuilder(t) + cmd := builder.rollbackRequest([]byte{3}) + + builder.init([]kv{ + {cf: engine_util.CfWrite, key: []byte{3}, ts: 100, value: []byte{3, 0, 0, 0, 0, 0, 0, 0, builder.ts()}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.BatchRollbackResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(0, 0, 1) + builder.assert([]kv{ + {cf: engine_util.CfWrite, key: []byte{3}, ts: 100}, + }) +} + +// TestRollbackOtherTxn4C tests trying to roll back the wrong transaction. +func TestRollbackOtherTxn4C(t *testing.T) { + builder := newBuilder(t) + cmd := builder.rollbackRequest([]byte{3}) + + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: 80, value: []byte{42}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{1, 1, 0, 0, 0, 0, 0, 0, 0, 80, 0, 0, 0, 0, 0, 0, 0, 0}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.BatchRollbackResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(1, 1, 1) + builder.assert([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: 80}, + {cf: engine_util.CfLock, key: []byte{3}}, + {cf: engine_util.CfWrite, key: []byte{3}, ts: 100, value: []byte{3, 0, 0, 0, 0, 0, 0, 0, builder.ts()}}, + }) +} + +// TestCheckTxnStatusTtlExpired4C checks that if there is a lock and its ttl has expired, then it is rolled back. +func TestCheckTxnStatusTtlExpired4C(t *testing.T) { + builder := newBuilder(t) + cmd := builder.checkTxnStatusRequest([]byte{3}) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: cmd.LockTs, value: []byte{42}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{3, 1, 0, 0, 5, 0, 0, 0, 0, builder.ts(), 0, 0, 0, 0, 0, 0, 0, 8}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.CheckTxnStatusResponse) + + assert.Nil(t, resp.RegionError) + assert.Equal(t, kvrpcpb.Action_TTLExpireRollback, resp.Action) + builder.assertLens(0, 0, 1) + builder.assert([]kv{ + {cf: engine_util.CfWrite, key: []byte{3}, ts: cmd.LockTs, value: []byte{3, 0, 0, 5, 0, 0, 0, 0, builder.ts()}}, + }) +} + +// TestCheckTxnStatusTtlNotExpired4C checks that if there is a lock and its ttl has not expired, then nothing changes. +func TestCheckTxnStatusTtlNotExpired4C(t *testing.T) { + builder := newBuilder(t) + cmd := builder.checkTxnStatusRequest([]byte{3}) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: cmd.LockTs, value: []byte{42}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{3, 1, 0, 0, 5, 0, 0, 0, 0, builder.ts(), 0, 0, 0, 1, 0, 0, 0, 8}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.CheckTxnStatusResponse) + + assert.Nil(t, resp.RegionError) + assert.Equal(t, kvrpcpb.Action_NoAction, resp.Action) + builder.assertLens(1, 1, 0) + builder.assert([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: cmd.LockTs}, + {cf: engine_util.CfLock, key: []byte{3}}, + }) +} + +// TestCheckTxnStatusRolledBack4C tests checking a key which has already been rolled back.. +func TestCheckTxnStatusRolledBack4C(t *testing.T) { + builder := newBuilder(t) + cmd := builder.checkTxnStatusRequest([]byte{3}) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: cmd.LockTs, value: []byte{42}}, + {cf: engine_util.CfWrite, key: []byte{3}, ts: cmd.LockTs, value: []byte{3, 0, 0, 5, 0, 0, 0, 0, builder.ts()}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{3, 1, 0, 0, 8, 0, 0, 0, 0, builder.ts(), 0, 0, 0, 0, 0, 0, 0, 8}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.CheckTxnStatusResponse) + + assert.Nil(t, resp.RegionError) + assert.Equal(t, kvrpcpb.Action_NoAction, resp.Action) + assert.Equal(t, uint64(0), resp.CommitVersion) + builder.assertLens(1, 1, 1) + builder.assert([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: cmd.LockTs}, + {cf: engine_util.CfWrite, key: []byte{3}, ts: cmd.LockTs}, + {cf: engine_util.CfLock, key: []byte{3}}, + }) +} + +// TestCheckTxnStatusCommitted4C tests checking a key which has already been committed. +func TestCheckTxnStatusCommitted4C(t *testing.T) { + builder := newBuilder(t) + cmd := builder.checkTxnStatusRequest([]byte{3}) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: cmd.LockTs, value: []byte{42}}, + {cf: engine_util.CfWrite, key: []byte{3}, ts: cmd.LockTs, value: []byte{1, 0, 0, 5, 0, 0, 0, 0, builder.ts()}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.CheckTxnStatusResponse) + + assert.Nil(t, resp.RegionError) + assert.Equal(t, kvrpcpb.Action_NoAction, resp.Action) + assert.Equal(t, binary.BigEndian.Uint64([]byte{0, 0, 5, 0, 0, 0, 0, builder.ts()}), resp.CommitVersion) + builder.assertLens(1, 0, 1) + builder.assert([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: cmd.LockTs}, + {cf: engine_util.CfWrite, key: []byte{3}, ts: cmd.LockTs}, + }) +} + +// TestCheckTxnStatusNoLockNoWrite4C checks if there is no data for the key, then we get the right response. +func TestCheckTxnStatusNoLockNoWrite4C(t *testing.T) { + builder := newBuilder(t) + cmd := builder.checkTxnStatusRequest([]byte{3}) + resp := builder.runOneRequest(cmd).(*kvrpcpb.CheckTxnStatusResponse) + + assert.Nil(t, resp.RegionError) + assert.Equal(t, kvrpcpb.Action_LockNotExistRollback, resp.Action) + builder.assertLens(0, 0, 1) + builder.assert([]kv{ + {cf: engine_util.CfWrite, key: []byte{3}, ts: cmd.LockTs, value: []byte{3, 0, 0, 5, 0, 0, 0, 0, builder.ts()}}, + }) +} + +// TestEmptyResolve4C tests a completely empty resolve request. +func TestEmptyResolve4C(t *testing.T) { + builder := newBuilder(t) + cmd := resolveRequest(0, 0) + resp := builder.runOneRequest(cmd).(*kvrpcpb.ResolveLockResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(0, 0, 0) +} + +// TestResolveCommit4C should commit all keys in the specified transaction. +func TestResolveCommit4C(t *testing.T) { + builder := newBuilder(t) + cmd := resolveRequest(100, 120) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: 100, value: []byte{42}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0}}, + {cf: engine_util.CfDefault, key: []byte{7}, ts: 100, value: []byte{43}}, + {cf: engine_util.CfLock, key: []byte{7}, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0}}, + {cf: engine_util.CfDefault, key: []byte{200}, ts: 110, value: []byte{44}}, + {cf: engine_util.CfLock, key: []byte{200}, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 110, 0, 0, 0, 0, 0, 0, 0, 0}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.ResolveLockResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(3, 1, 2) + builder.assert([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: 100}, + {cf: engine_util.CfWrite, key: []byte{3}, ts: 120, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 100}}, + {cf: engine_util.CfDefault, key: []byte{7}, ts: 100}, + {cf: engine_util.CfWrite, key: []byte{7}, ts: 120, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 100}}, + {cf: engine_util.CfDefault, key: []byte{200}, ts: 110}, + {cf: engine_util.CfLock, key: []byte{200}}, + }) +} + +// TestResolveRollback4C should rollback all keys in the specified transaction. +func TestResolveRollback4C(t *testing.T) { + builder := newBuilder(t) + cmd := resolveRequest(100, 0) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: 100, value: []byte{42}}, + {cf: engine_util.CfLock, key: []byte{3}, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0}}, + {cf: engine_util.CfDefault, key: []byte{7}, ts: 100, value: []byte{43}}, + {cf: engine_util.CfLock, key: []byte{7}, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0}}, + {cf: engine_util.CfDefault, key: []byte{200}, ts: 110, value: []byte{44}}, + {cf: engine_util.CfLock, key: []byte{200}, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 110, 0, 0, 0, 0, 0, 0, 0, 0}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.ResolveLockResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(1, 1, 2) + builder.assert([]kv{ + {cf: engine_util.CfWrite, key: []byte{3}, ts: 100, value: []byte{3, 0, 0, 0, 0, 0, 0, 0, 100}}, + {cf: engine_util.CfWrite, key: []byte{7}, ts: 100, value: []byte{3, 0, 0, 0, 0, 0, 0, 0, 100}}, + {cf: engine_util.CfDefault, key: []byte{200}, ts: 110}, + {cf: engine_util.CfLock, key: []byte{200}}, + }) +} + +// TestResolveCommitWritten4C tests a resolve where the matched keys are already committed or rolled back. +func TestResolveCommitWritten4C(t *testing.T) { + builder := newBuilder(t) + cmd := resolveRequest(100, 120) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: 100, value: []byte{42}}, + {cf: engine_util.CfWrite, key: []byte{201}, ts: 120, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 100}}, + {cf: engine_util.CfDefault, key: []byte{7}, ts: 100, value: []byte{43}}, + {cf: engine_util.CfWrite, key: []byte{201}, ts: 100, value: []byte{3, 0, 0, 0, 0, 0, 0, 0, 100}}, + {cf: engine_util.CfDefault, key: []byte{200}, ts: 110, value: []byte{44}}, + {cf: engine_util.CfLock, key: []byte{200}, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 110, 0, 0, 0, 0, 0, 0, 0, 0}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.ResolveLockResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(3, 1, 2) + builder.assert([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: 100}, + {cf: engine_util.CfWrite, key: []byte{201}, ts: 120}, + {cf: engine_util.CfDefault, key: []byte{7}, ts: 100}, + {cf: engine_util.CfWrite, key: []byte{201}, ts: 100}, + {cf: engine_util.CfDefault, key: []byte{200}, ts: 110}, + {cf: engine_util.CfLock, key: []byte{200}}, + }) +} + +// TestResolveRollbackWritten4C tests a rollback resolve where data has already been committed or rolled back. +func TestResolveRollbackWritten4C(t *testing.T) { + builder := newBuilder(t) + cmd := resolveRequest(100, 0) + builder.init([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: 100, value: []byte{42}}, + {cf: engine_util.CfWrite, key: []byte{201}, ts: 120, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 100}}, + {cf: engine_util.CfDefault, key: []byte{7}, ts: 100, value: []byte{43}}, + {cf: engine_util.CfWrite, key: []byte{201}, ts: 100, value: []byte{3, 0, 0, 0, 0, 0, 0, 0, 100}}, + {cf: engine_util.CfDefault, key: []byte{200}, ts: 110, value: []byte{44}}, + {cf: engine_util.CfLock, key: []byte{200}, value: []byte{1, 0, 0, 0, 0, 0, 0, 0, 110, 0, 0, 0, 0, 0, 0, 0, 0}}, + }) + resp := builder.runOneRequest(cmd).(*kvrpcpb.ResolveLockResponse) + + assert.Nil(t, resp.Error) + assert.Nil(t, resp.RegionError) + builder.assertLens(3, 1, 2) + builder.assert([]kv{ + {cf: engine_util.CfDefault, key: []byte{3}, ts: 100}, + {cf: engine_util.CfWrite, key: []byte{201}, ts: 120}, + {cf: engine_util.CfDefault, key: []byte{7}, ts: 100}, + {cf: engine_util.CfWrite, key: []byte{201}, ts: 100}, + {cf: engine_util.CfDefault, key: []byte{200}, ts: 110}, + {cf: engine_util.CfLock, key: []byte{200}}, + }) +} + +// TestScanEmpty4C tests a scan after the end of the DB. +func TestScanEmpty4C(t *testing.T) { + builder := builderForScan(t) + + cmd := builder.scanRequest([]byte{200}, 10000) + resp := builder.runOneRequest(cmd).(*kvrpcpb.ScanResponse) + assert.Nil(t, resp.RegionError) + assert.Empty(t, resp.Pairs) +} + +// TestScanLimitZero4C tests we get nothing if limit is 0. +func TestScanLimitZero4C(t *testing.T) { + builder := builderForScan(t) + + cmd := builder.scanRequest([]byte{3}, 0) + resp := builder.runOneRequest(cmd).(*kvrpcpb.ScanResponse) + assert.Nil(t, resp.RegionError) + assert.Empty(t, resp.Pairs) +} + +// TestScanAll4C start at the beginning of the DB and read all pairs, respecting the timestamp. +func TestScanAll4C(t *testing.T) { + builder := builderForScan(t) + + cmd := builder.scanRequest([]byte{0}, 10000) + resp := builder.runOneRequest(cmd).(*kvrpcpb.ScanResponse) + + assert.Nil(t, resp.RegionError) + assert.Equal(t, 11, len(resp.Pairs)) + assert.Equal(t, []byte{1}, resp.Pairs[0].Key) + assert.Equal(t, []byte{50}, resp.Pairs[0].Value) + assert.Equal(t, []byte{199}, resp.Pairs[10].Key) + assert.Equal(t, []byte{54}, resp.Pairs[10].Value) +} + +// TestScanLimit4C tests that scan takes the limit into account. +func TestScanLimit4C(t *testing.T) { + builder := builderForScan(t) + + cmd := builder.scanRequest([]byte{2}, 6) + resp := builder.runOneRequest(cmd).(*kvrpcpb.ScanResponse) + + assert.Nil(t, resp.RegionError) + assert.Equal(t, 6, len(resp.Pairs)) + assert.Equal(t, []byte{3}, resp.Pairs[0].Key) + assert.Equal(t, []byte{51}, resp.Pairs[0].Value) + assert.Equal(t, []byte{4}, resp.Pairs[5].Key) + assert.Equal(t, []byte{52}, resp.Pairs[5].Value) +} + +// TestScanDeleted4C scan over a value which is deleted then replaced. +func TestScanDeleted4C(t *testing.T) { + builder := builderForScan(t) + + req1 := builder.scanRequest([]byte{100}, 10000) + req1.Version = 100 + req2 := builder.scanRequest([]byte{100}, 10000) + req2.Version = 105 + req3 := builder.scanRequest([]byte{100}, 10000) + req3.Version = 120 + + resps := builder.runRequests(req1, req2, req3) + + resp1 := resps[0].(*kvrpcpb.ScanResponse) + assert.Nil(t, resp1.RegionError) + assert.Equal(t, 3, len(resp1.Pairs)) + assert.Equal(t, []byte{150}, resp1.Pairs[1].Key) + assert.Equal(t, []byte{42}, resp1.Pairs[1].Value) + + resp2 := resps[1].(*kvrpcpb.ScanResponse) + assert.Nil(t, resp2.RegionError) + assert.Equal(t, 2, len(resp2.Pairs)) + assert.Equal(t, []byte{120}, resp2.Pairs[0].Key) + assert.Equal(t, []byte{199}, resp2.Pairs[1].Key) + + resp3 := resps[2].(*kvrpcpb.ScanResponse) + assert.Nil(t, resp3.RegionError) + assert.Equal(t, 3, len(resp3.Pairs)) + assert.Equal(t, []byte{150}, resp3.Pairs[1].Key) + assert.Equal(t, []byte{64}, resp3.Pairs[1].Value) +} + +func builderForScan(t *testing.T) *testBuilder { + values := []kv{ + // Committed before 100. + {engine_util.CfDefault, []byte{1}, 80, []byte{50}}, + {engine_util.CfWrite, []byte{1}, 99, []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + {engine_util.CfDefault, []byte{1, 23}, 80, []byte{55}}, + {engine_util.CfWrite, []byte{1, 23}, 99, []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + {engine_util.CfDefault, []byte{3}, 80, []byte{51}}, + {engine_util.CfWrite, []byte{3}, 99, []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + {engine_util.CfDefault, []byte{3, 45}, 80, []byte{56}}, + {engine_util.CfWrite, []byte{3, 45}, 99, []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + {engine_util.CfDefault, []byte{3, 46}, 80, []byte{57}}, + {engine_util.CfWrite, []byte{3, 46}, 99, []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + {engine_util.CfDefault, []byte{3, 47}, 80, []byte{58}}, + {engine_util.CfWrite, []byte{3, 47}, 99, []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + {engine_util.CfDefault, []byte{3, 48}, 80, []byte{59}}, + {engine_util.CfWrite, []byte{3, 48}, 99, []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + {engine_util.CfDefault, []byte{4}, 80, []byte{52}}, + {engine_util.CfWrite, []byte{4}, 99, []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + {engine_util.CfDefault, []byte{120}, 80, []byte{53}}, + {engine_util.CfWrite, []byte{120}, 99, []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + {engine_util.CfDefault, []byte{199}, 80, []byte{54}}, + {engine_util.CfWrite, []byte{199}, 99, []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + + // Committed after 100. + {engine_util.CfDefault, []byte{4, 45}, 110, []byte{58}}, + {engine_util.CfWrite, []byte{4, 45}, 116, []byte{1, 0, 0, 0, 0, 0, 0, 0, 110}}, + {engine_util.CfDefault, []byte{4, 46}, 110, []byte{57}}, + {engine_util.CfWrite, []byte{4, 46}, 116, []byte{1, 0, 0, 0, 0, 0, 0, 0, 110}}, + {engine_util.CfDefault, []byte{4, 47}, 110, []byte{58}}, + {engine_util.CfWrite, []byte{4, 47}, 116, []byte{1, 0, 0, 0, 0, 0, 0, 0, 110}}, + {engine_util.CfDefault, []byte{4, 48}, 110, []byte{59}}, + {engine_util.CfWrite, []byte{4, 48}, 116, []byte{1, 0, 0, 0, 0, 0, 0, 0, 110}}, + + // Committed after 100, but started before. + {engine_util.CfDefault, []byte{5, 45}, 97, []byte{60}}, + {engine_util.CfWrite, []byte{5, 45}, 101, []byte{1, 0, 0, 0, 0, 0, 0, 0, 97}}, + {engine_util.CfDefault, []byte{5, 46}, 97, []byte{61}}, + {engine_util.CfWrite, []byte{5, 46}, 101, []byte{1, 0, 0, 0, 0, 0, 0, 0, 97}}, + {engine_util.CfDefault, []byte{5, 47}, 97, []byte{62}}, + {engine_util.CfWrite, []byte{5, 47}, 101, []byte{1, 0, 0, 0, 0, 0, 0, 0, 97}}, + {engine_util.CfDefault, []byte{5, 48}, 97, []byte{63}}, + {engine_util.CfWrite, []byte{5, 48}, 101, []byte{1, 0, 0, 0, 0, 0, 0, 0, 97}}, + + // A deleted value and replaced value. + {engine_util.CfDefault, []byte{150}, 80, []byte{42}}, + {engine_util.CfWrite, []byte{150}, 99, []byte{1, 0, 0, 0, 0, 0, 0, 0, 80}}, + {engine_util.CfWrite, []byte{150}, 101, []byte{2, 0, 0, 0, 0, 0, 0, 0, 97}}, + {engine_util.CfDefault, []byte{150}, 110, []byte{64}}, + {engine_util.CfWrite, []byte{150}, 116, []byte{1, 0, 0, 0, 0, 0, 0, 0, 110}}, + } + builder := newBuilder(t) + builder.init(values) + return &builder +} diff --git a/kv/transaction/commands_test.go b/kv/transaction/commands_test.go new file mode 100644 index 00000000..c67ba2d6 --- /dev/null +++ b/kv/transaction/commands_test.go @@ -0,0 +1,211 @@ +package transaction + +// This file contains utility code for testing commands. + +import ( + "context" + "encoding/binary" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/pingcap-incubator/tinykv/kv/server" + "github.com/pingcap-incubator/tinykv/kv/storage" + "github.com/pingcap-incubator/tinykv/kv/transaction/mvcc" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "github.com/stretchr/testify/assert" +) + +// testBuilder is a helper type for running command tests. +type testBuilder struct { + t *testing.T + server *server.Server + // mem will always be the backing store for server. + mem *storage.MemStorage + // Keep track of timestamps. + prevTs uint64 +} + +// kv is a type which identifies a key/value pair to testBuilder. +type kv struct { + cf string + // The user key (unencoded, no time stamp). + key []byte + // Can be elided. The builder's prevTS will be used if the ts is needed. + ts uint64 + // Can be elided in assertion functions. If elided then testBuilder checks that the value has not changed. + value []byte +} + +func newBuilder(t *testing.T) testBuilder { + mem := storage.NewMemStorage() + server := server.NewServer(mem) + server.Latches.Validation = func(txn *mvcc.MvccTxn, keys [][]byte) { + keyMap := make(map[string]struct{}) + for _, k := range keys { + keyMap[string(k)] = struct{}{} + } + for _, wr := range txn.Writes() { + key := wr.Key() + // This is a bit of a hack and relies on all the raw tests using keys shorter than 9 bytes, which is the + // minimum length for an encoded key. + if len(key) > 8 { + switch wr.Cf() { + case engine_util.CfDefault: + key = mvcc.DecodeUserKey(wr.Key()) + case engine_util.CfWrite: + key = mvcc.DecodeUserKey(wr.Key()) + } + } + if _, ok := keyMap[string(key)]; !ok { + t.Errorf("Failed latching validation: tried to write a key which was not latched in %v", wr.Data) + } + } + } + return testBuilder{t, server, mem, 99} +} + +// init sets values in the test's DB. +func (builder *testBuilder) init(values []kv) { + for _, kv := range values { + ts := kv.ts + if ts == 0 { + ts = builder.prevTs + } + switch kv.cf { + case engine_util.CfDefault: + builder.mem.Set(kv.cf, mvcc.EncodeKey(kv.key, ts), kv.value) + case engine_util.CfWrite: + builder.mem.Set(kv.cf, mvcc.EncodeKey(kv.key, ts), kv.value) + case engine_util.CfLock: + builder.mem.Set(kv.cf, kv.key, kv.value) + } + } +} + +func (builder *testBuilder) runRequests(reqs ...interface{}) []interface{} { + var result []interface{} + for _, req := range reqs { + reqName := fmt.Sprintf("%v", reflect.TypeOf(req)) + reqName = strings.TrimPrefix(strings.TrimSuffix(reqName, "Request"), "*kvrpcpb.") + fnName := "Kv" + reqName + serverVal := reflect.ValueOf(builder.server) + fn := serverVal.MethodByName(fnName) + ctxtVal := reflect.ValueOf(context.Background()) + reqVal := reflect.ValueOf(req) + + results := fn.Call([]reflect.Value{ctxtVal, reqVal}) + + assert.Nil(builder.t, results[1].Interface()) + result = append(result, results[0].Interface()) + } + return result +} + +// runOneCmd is like runCommands but only runs a single command. +func (builder *testBuilder) runOneRequest(req interface{}) interface{} { + return builder.runRequests(req)[0] +} + +func (builder *testBuilder) nextTs() uint64 { + builder.prevTs++ + return builder.prevTs +} + +// ts returns the most recent timestamp used by testBuilder as a byte. +func (builder *testBuilder) ts() byte { + return byte(builder.prevTs) +} + +// assert that a key/value pair exists and has the given value, or if there is no value that it is unchanged. +func (builder *testBuilder) assert(kvs []kv) { + for _, kv := range kvs { + var key []byte + ts := kv.ts + if ts == 0 { + ts = builder.prevTs + } + switch kv.cf { + case engine_util.CfDefault: + key = mvcc.EncodeKey(kv.key, ts) + case engine_util.CfWrite: + key = mvcc.EncodeKey(kv.key, ts) + case engine_util.CfLock: + key = kv.key + } + if kv.value == nil { + assert.False(builder.t, builder.mem.HasChanged(kv.cf, key)) + } else { + assert.Equal(builder.t, kv.value, builder.mem.Get(kv.cf, key)) + } + } +} + +// assertLen asserts the size of one of the column families. +func (builder *testBuilder) assertLen(cf string, size int) { + assert.Equal(builder.t, size, builder.mem.Len(cf)) +} + +// assertLens asserts the size of each column family. +func (builder *testBuilder) assertLens(def int, lock int, write int) { + builder.assertLen(engine_util.CfDefault, def) + builder.assertLen(engine_util.CfLock, lock) + builder.assertLen(engine_util.CfWrite, write) +} + +func (builder *testBuilder) prewriteRequest(muts ...*kvrpcpb.Mutation) *kvrpcpb.PrewriteRequest { + var req kvrpcpb.PrewriteRequest + req.PrimaryLock = []byte{1} + req.StartVersion = builder.nextTs() + req.Mutations = muts + return &req +} + +func mutation(key byte, value []byte, op kvrpcpb.Op) *kvrpcpb.Mutation { + var mut kvrpcpb.Mutation + mut.Key = []byte{key} + mut.Value = value + mut.Op = op + return &mut +} + +func (builder *testBuilder) commitRequest(keys ...[]byte) *kvrpcpb.CommitRequest { + var req kvrpcpb.CommitRequest + req.StartVersion = builder.nextTs() + req.CommitVersion = builder.prevTs + 10 + req.Keys = keys + return &req +} + +func (builder *testBuilder) rollbackRequest(keys ...[]byte) *kvrpcpb.BatchRollbackRequest { + var req kvrpcpb.BatchRollbackRequest + req.StartVersion = builder.nextTs() + req.Keys = keys + return &req +} + +func (builder *testBuilder) checkTxnStatusRequest(key []byte) *kvrpcpb.CheckTxnStatusRequest { + var req kvrpcpb.CheckTxnStatusRequest + builder.nextTs() + req.LockTs = binary.BigEndian.Uint64([]byte{0, 0, 5, 0, 0, 0, 0, builder.ts()}) + req.CurrentTs = binary.BigEndian.Uint64([]byte{0, 0, 6, 0, 0, 0, 0, builder.ts()}) + req.PrimaryKey = key + return &req +} + +func resolveRequest(startTs uint64, commitTs uint64) *kvrpcpb.ResolveLockRequest { + var req kvrpcpb.ResolveLockRequest + req.StartVersion = startTs + req.CommitVersion = commitTs + return &req +} + +func (builder *testBuilder) scanRequest(startKey []byte, limit uint32) *kvrpcpb.ScanRequest { + var req kvrpcpb.ScanRequest + req.StartKey = startKey + req.Limit = limit + req.Version = builder.nextTs() + return &req +} diff --git a/kv/transaction/doc.go b/kv/transaction/doc.go new file mode 100644 index 00000000..27f9bb5f --- /dev/null +++ b/kv/transaction/doc.go @@ -0,0 +1,40 @@ +package transaction + +// The transaction package implements TinyKV's 'transaction' layer. This takes incoming requests from the kv/server/server.go +// as input and turns them into reads and writes of the underlying key/value store (defined by Storage in kv/storage/storage.go). +// The storage engine handles communicating with other nodes and writing data to disk. The transaction layer must +// translate high-level TinyKV commands into low-level raw key/value commands and ensure that processing of commands do +// not interfere with processing other commands. +// +// Note that there are two kinds of transactions in play: TinySQL transactions are collaborative between TinyKV and its +// client (e.g., TinySQL). They are implemented using multiple TinyKV requests and ensure that multiple SQL commands can +// be executed atomically. There are also mvcc transactions which are an implementation detail of this +// layer in TinyKV (represented by MvccTxn in kv/transaction/mvcc/transaction.go). These ensure that a *single* request +// is executed atomically. +// +// *Locks* are used to implement TinySQL transactions. Setting or checking a lock in a TinySQL transaction is lowered to +// writing to the underlying store. +// +// *Latches* are used to implement mvcc transactions and are not visible to the client. They are stored outside the +// underlying storage (or equivalently, you can think of every key having its own latch). See the latches package for details. +// +// Within the `mvcc` package, `Lock` and `Write` provide abstractions for lowering locks and writes into simple keys and values. +// +// ## Encoding user key/values +// +// The mvcc strategy is essentially to store all data (committed and uncommitted) at every point in time. So for example, if we store +// a value for a key, then store another value (a logical overwrite) at a later time, both values are preserved in the underlying +// storage. +// +// This is implemented by encoding user keys with their timestamps (the starting timestamp of the transaction in which they are +// written) to make an encoded key (see codec.go). The `default` CF is a mapping from encoded keys to their values. +// +// Locking a key means writing into the `lock` CF. In this CF, we use the user key (i.e., not the encoded key so that a key is locked +// for all timestamps). The value in the `lock` CF consists of the 'primary key' for the transaction, the kind of lock (for 'put', +// 'delete', or 'rollback'), the start timestamp of the transaction, and the lock's ttl (time to live). See lock.go for the +// implementation. +// +// The status of values is stored in the `write` CF. Here we map keys encoded with their commit timestamps (i.e., the time at which a +// a transaction is committed) to a value containing the transaction's starting timestamp, and the kind of write ('put', 'delete', or +// 'rollback'). Note that for transactions which are rolled back, the start timestamp is used for the commit timestamp in the encoded +// key. diff --git a/kv/transaction/latches/latches.go b/kv/transaction/latches/latches.go new file mode 100644 index 00000000..7bbf7cef --- /dev/null +++ b/kv/transaction/latches/latches.go @@ -0,0 +1,99 @@ +package latches + +import ( + "sync" + + "github.com/pingcap-incubator/tinykv/kv/transaction/mvcc" +) + +// Latching provides atomicity of TinyKV commands. This should not be confused with SQL transactions which provide atomicity +// for multiple TinyKV commands. For example, consider two commit commands, these write to multiple keys/CFs so if they race, +// then it is possible for inconsistent data to be written. By latching the keys each command might write, we ensure that the +// two commands will not race to write the same keys. +// +// A latch is a per-key lock. There is only one latch per user key, not one per CF or one for each encoded key. Latches are +// only needed for writing. Only one thread can hold a latch at a time and all keys that a command might write must be locked +// at once. +// +// Latching is implemented using a single map which maps keys to a Go WaitGroup. Access to this map is guarded by a mutex +// to ensure that latching is atomic and consistent. Since the mutex is a global lock, it would cause intolerable contention +// in a real system. + +type Latches struct { + // Before modifying any property of a key, the thread must have the latch for that key. `Latches` maps each latched + // key to a WaitGroup. Threads who find a key locked should wait on that WaitGroup. + latchMap map[string]*sync.WaitGroup + // Mutex to guard latchMap. A thread must hold this mutex while it makes any change to latchMap. + latchGuard sync.Mutex + // An optional validation function, only used for testing. + Validation func(txn *mvcc.MvccTxn, keys [][]byte) +} + +// NewLatches creates a new Latches object for managing a databases latches. There should only be one such object, shared +// between all threads. +func NewLatches() *Latches { + l := new(Latches) + l.latchMap = make(map[string]*sync.WaitGroup) + return l +} + +// AcquireLatches tries lock all Latches specified by keys. If this succeeds, nil is returned. If any of the keys are +// locked, then AcquireLatches requires a WaitGroup which the thread can use to be woken when the lock is free. +func (l *Latches) AcquireLatches(keysToLatch [][]byte) *sync.WaitGroup { + l.latchGuard.Lock() + defer l.latchGuard.Unlock() + + // Check none of the keys we want to write are locked. + for _, key := range keysToLatch { + if latchWg, ok := l.latchMap[string(key)]; ok { + // Return a wait group to wait on. + return latchWg + } + } + + // All Latches are available, lock them all with a new wait group. + wg := new(sync.WaitGroup) + wg.Add(1) + for _, key := range keysToLatch { + l.latchMap[string(key)] = wg + } + + return nil +} + +// ReleaseLatches releases the latches for all keys in keysToUnlatch. It will wakeup any threads blocked on one of the +// latches. All keys in keysToUnlatch must have been locked together in one call to AcquireLatches. +func (l *Latches) ReleaseLatches(keysToUnlatch [][]byte) { + l.latchGuard.Lock() + defer l.latchGuard.Unlock() + + first := true + for _, key := range keysToUnlatch { + if first { + wg := l.latchMap[string(key)] + wg.Done() + first = false + } + delete(l.latchMap, string(key)) + } +} + +// WaitForLatches attempts to lock all keys in keysToLatch using AcquireLatches. If a latch ia already locked, then = +// WaitForLatches will wait for it to become unlocked then try again. Therefore WaitForLatches may block for an unbounded +// length of time. +func (l *Latches) WaitForLatches(keysToLatch [][]byte) { + for { + wg := l.AcquireLatches(keysToLatch) + if wg == nil { + return + } + wg.Wait() + } +} + +// Validate calls the function in Validation, if it exists. +func (l *Latches) Validate(txn *mvcc.MvccTxn, latched [][]byte) { + if l.Validation != nil { + l.Validation(txn, latched) + } +} diff --git a/kv/transaction/latches/latches_test.go b/kv/transaction/latches/latches_test.go new file mode 100644 index 00000000..1f4cba7e --- /dev/null +++ b/kv/transaction/latches/latches_test.go @@ -0,0 +1,30 @@ +package latches + +import ( + "github.com/stretchr/testify/assert" + "sync" + "testing" +) + +func TestAcquireLatches(t *testing.T) { + l := Latches{ + latchMap: make(map[string]*sync.WaitGroup), + } + + // Acquiring a new latch is ok. + wg := l.AcquireLatches([][]byte{{}, {3}, {3, 0, 42}}) + assert.Nil(t, wg) + + // Can only acquire once. + wg = l.AcquireLatches([][]byte{{}}) + assert.NotNil(t, wg) + wg = l.AcquireLatches([][]byte{{3, 0, 42}}) + assert.NotNil(t, wg) + + // Release then acquire is ok. + l.ReleaseLatches([][]byte{{3}, {3, 0, 43}}) + wg = l.AcquireLatches([][]byte{{3}}) + assert.Nil(t, wg) + wg = l.AcquireLatches([][]byte{{3, 0, 42}}) + assert.NotNil(t, wg) +} diff --git a/kv/transaction/mvcc/lock.go b/kv/transaction/mvcc/lock.go new file mode 100644 index 00000000..2f33a61e --- /dev/null +++ b/kv/transaction/mvcc/lock.go @@ -0,0 +1,105 @@ +package mvcc + +import ( + "bytes" + "encoding/binary" + "fmt" + "reflect" + + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" +) + +const TsMax uint64 = ^uint64(0) + +type Lock struct { + Primary []byte + Ts uint64 + Ttl uint64 + Kind WriteKind +} + +type KlPair struct { + Key []byte + Lock *Lock +} + +// Info creates a LockInfo object from a Lock object for key. +func (lock *Lock) Info(key []byte) *kvrpcpb.LockInfo { + info := kvrpcpb.LockInfo{} + info.Key = key + info.LockVersion = lock.Ts + info.PrimaryLock = lock.Primary + info.LockTtl = lock.Ttl + return &info +} + +func (lock *Lock) ToBytes() []byte { + buf := append(lock.Primary, byte(lock.Kind)) + buf = append(buf, make([]byte, 16)...) + binary.BigEndian.PutUint64(buf[len(lock.Primary)+1:], lock.Ts) + binary.BigEndian.PutUint64(buf[len(lock.Primary)+9:], lock.Ttl) + return buf +} + +// ParseLock attempts to parse a byte string into a Lock object. +func ParseLock(input []byte) (*Lock, error) { + if len(input) <= 16 { + return nil, fmt.Errorf("mvcc: error parsing lock, not enough input, found %d bytes", len(input)) + } + + primaryLen := len(input) - 17 + primary := input[:primaryLen] + kind := WriteKind(input[primaryLen]) + ts := binary.BigEndian.Uint64(input[primaryLen+1:]) + ttl := binary.BigEndian.Uint64(input[primaryLen+9:]) + + return &Lock{Primary: primary, Ts: ts, Ttl: ttl, Kind: kind}, nil +} + +// IsLockedFor checks if lock locks key at txnStartTs. +func (lock *Lock) IsLockedFor(key []byte, txnStartTs uint64, resp interface{}) bool { + if lock == nil { + return false + } + if txnStartTs == TsMax && bytes.Compare(key, lock.Primary) != 0 { + return false + } + if lock.Ts <= txnStartTs { + err := &kvrpcpb.KeyError{Locked: lock.Info(key)} + respValue := reflect.ValueOf(resp) + reflect.Indirect(respValue).FieldByName("Error").Set(reflect.ValueOf(err)) + return true + } + return false +} + +// AllLocksForTxn returns all locks for the current transaction. +func AllLocksForTxn(txn *MvccTxn) ([]KlPair, error) { + var result []KlPair + for iter := txn.Reader().IterCF(engine_util.CfLock); iter.Valid(); iter.Next() { + item := iter.Item() + val, err := item.Value() + if err != nil { + return nil, err + } + lock, err := ParseLock(val) + if err != nil { + return nil, err + } + if lock.Ts == txn.StartTS() { + result = append(result, KlPair{item.Key(), lock}) + } + } + return result, nil +} + +func LockedError(info ...kvrpcpb.LockInfo) []*kvrpcpb.KeyError { + var result []*kvrpcpb.KeyError + for _, i := range info { + var ke kvrpcpb.KeyError + ke.Locked = &i + result = append(result, &ke) + } + return result +} diff --git a/kv/transaction/mvcc/transaction.go b/kv/transaction/mvcc/transaction.go new file mode 100644 index 00000000..1a206d7c --- /dev/null +++ b/kv/transaction/mvcc/transaction.go @@ -0,0 +1,111 @@ +package mvcc + +import ( + "encoding/binary" + + "github.com/pingcap-incubator/tinykv/kv/storage" + "github.com/pingcap-incubator/tinykv/kv/util/codec" +) + +// MvccTxn groups together writes as part of a single transaction. It also provides an abstraction over low-level +// storage, lowering the concepts of timestamps, writes, and locks into plain keys and values. +type MvccTxn struct { + // Your code here (4a). +} + +func (txn *MvccTxn) Reader() storage.StorageReader { + // Your code here (4a). + return nil +} + +func (txn *MvccTxn) StartTS() uint64 { + // Your code here (4a). + return 0 +} + +// Writes returns all changes added to this transaction. +func (txn *MvccTxn) Writes() []storage.Modify { + // Your code here (4a). + return nil +} + +// PutWrite records a write at key and ts. +func (txn *MvccTxn) PutWrite(key []byte, ts uint64, write *Write) { + // Your code here (4a). +} + +// GetLock returns a lock if key is locked. It will return (nil, nil) if there is no lock on key, and (nil, err) +// if an error occurs during lookup. +func (txn *MvccTxn) GetLock(key []byte) (*Lock, error) { + // Your code here (4a). + return nil, nil +} + +// PutLock adds a key/lock to this transaction. +func (txn *MvccTxn) PutLock(key []byte, lock *Lock) { + // Your code here (4a). +} + +// DeleteLock adds a delete lock to this transaction. +func (txn *MvccTxn) DeleteLock(key []byte) { + // Your code here (4a). +} + +// GetValue finds the value for key, valid at the start timestamp of this transaction. +// I.e., the most recent value committed before the start of this transaction. +func (txn *MvccTxn) GetValue(key []byte) ([]byte, error) { + // Your code here (4a). + return nil, nil +} + +// PutValue adds a key/value write to this transaction. +func (txn *MvccTxn) PutValue(key []byte, value []byte) { + // Your code here (4a). +} + +// DeleteValue removes a key/value pair in this transaction. +func (txn *MvccTxn) DeleteValue(key []byte) { + // Your code here (4a). +} + +// CurrentWrite searches for a write with this transaction's start timestamp. It returns a Write from the DB and that +// write's commit timestamp, or an error. +func (txn *MvccTxn) CurrentWrite(key []byte) (*Write, uint64, error) { + // Your code here (4a). + return nil, 0, nil +} + +// MostRecentWrite finds the most recent write with the given key. It returns a Write from the DB and that +// write's commit timestamp, or an error. +func (txn *MvccTxn) MostRecentWrite(key []byte) (*Write, uint64, error) { + // Your code here (4a). + return nil, 0, nil +} + +// EncodeKey encodes a user key and appends an encoded timestamp to a key. Keys and timestamps are encoded so that +// timestamped keys are sorted first by key (ascending), then by timestamp (descending). The encoding is based on +// https://github.com/facebook/mysql-5.6/wiki/MyRocks-record-format#memcomparable-format. +func EncodeKey(key []byte, ts uint64) []byte { + encodedKey := codec.EncodeBytes(key) + newKey := append(encodedKey, make([]byte, 8)...) + binary.BigEndian.PutUint64(newKey[len(encodedKey):], ^ts) + return newKey +} + +// DecodeUserKey takes a key + timestamp and returns the key part. +func DecodeUserKey(key []byte) []byte { + _, userKey, err := codec.DecodeBytes(key) + if err != nil { + panic(err) + } + return userKey +} + +// decodeTimestamp takes a key + timestamp and returns the timestamp part. +func decodeTimestamp(key []byte) uint64 { + left, _, err := codec.DecodeBytes(key) + if err != nil { + panic(err) + } + return ^binary.BigEndian.Uint64(left) +} diff --git a/kv/transaction/mvcc/transaction_test.go b/kv/transaction/mvcc/transaction_test.go new file mode 100644 index 00000000..f4218c65 --- /dev/null +++ b/kv/transaction/mvcc/transaction_test.go @@ -0,0 +1,296 @@ +package mvcc + +import ( + "bytes" + "github.com/pingcap-incubator/tinykv/kv/util/engine_util" + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + "testing" + + "github.com/pingcap-incubator/tinykv/kv/storage" + "github.com/stretchr/testify/assert" +) + +func TestEncodeKey(t *testing.T) { + assert.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 0, 247, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, EncodeKey([]byte{}, 0)) + assert.Equal(t, []byte{42, 0, 0, 0, 0, 0, 0, 0, 248, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, EncodeKey([]byte{42}, 0)) + assert.Equal(t, []byte{42, 0, 5, 0, 0, 0, 0, 0, 250, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, EncodeKey([]byte{42, 0, 5}, 0)) + assert.Equal(t, []byte{42, 0, 0, 0, 0, 0, 0, 0, 248, 0, 0, 39, 154, 52, 120, 65, 255}, EncodeKey([]byte{42}, ^uint64(43543258743295))) + assert.Equal(t, []byte{42, 0, 5, 0, 0, 0, 0, 0, 250, 0, 0, 0, 0, 5, 226, 221, 76}, EncodeKey([]byte{42, 0, 5}, ^uint64(98753868))) + + // Test that encoded keys are in descending order. + assert.True(t, bytes.Compare(EncodeKey([]byte{42}, 238), EncodeKey([]byte{200}, 0)) < 0) + assert.True(t, bytes.Compare(EncodeKey([]byte{42}, 238), EncodeKey([]byte{42, 0}, 0)) < 0) +} + +func TestDecodeKey(t *testing.T) { + assert.Equal(t, []byte{}, DecodeUserKey(EncodeKey([]byte{}, 0))) + assert.Equal(t, []byte{42}, DecodeUserKey(EncodeKey([]byte{42}, 0))) + assert.Equal(t, []byte{42, 0, 5}, DecodeUserKey(EncodeKey([]byte{42, 0, 5}, 0))) + assert.Equal(t, []byte{42}, DecodeUserKey(EncodeKey([]byte{42}, 2342342355436234))) + assert.Equal(t, []byte{42, 0, 5}, DecodeUserKey(EncodeKey([]byte{42, 0, 5}, 234234))) +} + +func testTxn(startTs uint64, f func(m *storage.MemStorage)) MvccTxn { + mem := storage.NewMemStorage() + if f != nil { + f(mem) + } + reader, _ := mem.Reader(&kvrpcpb.Context{}) + return NewTxn(reader, startTs) +} + +func assertPutInTxn(t *testing.T, txn *MvccTxn, key []byte, value []byte, cf string) { + writes := txn.Writes() + assert.Equal(t, 1, len(writes)) + expected := storage.Put{Cf: cf, Key: key, Value: value} + put, ok := writes[0].Data.(storage.Put) + assert.True(t, ok) + assert.Equal(t, expected, put) +} + +func assertDeleteInTxn(t *testing.T, txn *MvccTxn, key []byte, cf string) { + writes := txn.Writes() + assert.Equal(t, 1, len(writes)) + expected := storage.Delete{Cf: cf, Key: key} + del, ok := writes[0].Data.(storage.Delete) + assert.True(t, ok) + assert.Equal(t, expected, del) +} + +func TestPutLock4A(t *testing.T) { + txn := testTxn(42, nil) + lock := Lock{ + Primary: []byte{16}, + Ts: 100, + Ttl: 100000, + Kind: WriteKindRollback, + } + + txn.PutLock([]byte{1}, &lock) + assertPutInTxn(t, &txn, []byte{1}, lock.ToBytes(), engine_util.CfLock) +} + +func TestPutWrite4A(t *testing.T) { + txn := testTxn(0, nil) + write := Write{ + StartTS: 100, + Kind: WriteKindDelete, + } + + txn.PutWrite([]byte{16, 240}, 0, &write) + assertPutInTxn(t, &txn, EncodeKey([]byte{16, 240}, 0), write.ToBytes(), engine_util.CfWrite) +} + +func TestPutValue4A(t *testing.T) { + txn := testTxn(453325345, nil) + value := []byte{1, 1, 2, 3, 5, 8, 13} + + txn.PutValue([]byte{32}, value) + assertPutInTxn(t, &txn, EncodeKey([]byte{32}, 453325345), value, engine_util.CfDefault) +} + +func TestGetLock4A(t *testing.T) { + lock := Lock{ + Primary: []byte{16}, + Ts: 100, + Ttl: 100000, + Kind: WriteKindRollback, + } + txn := testTxn(42, func(m *storage.MemStorage) { + m.Set(engine_util.CfLock, []byte{1}, lock.ToBytes()) + }) + + gotLock, err := txn.GetLock([]byte{1}) + assert.Nil(t, err) + assert.Equal(t, lock, *gotLock) +} + +func TestDeleteLock4A(t *testing.T) { + txn := testTxn(42, nil) + txn.DeleteLock([]byte{1}) + assertDeleteInTxn(t, &txn, []byte{1}, engine_util.CfLock) +} + +func TestDeleteValue4A(t *testing.T) { + txn := testTxn(63454245, nil) + txn.DeleteValue([]byte{17, 255, 0}) + assertDeleteInTxn(t, &txn, EncodeKey([]byte{17, 255, 0}, 63454245), engine_util.CfDefault) +} + +func singleEntry(m *storage.MemStorage) { + m.Set(engine_util.CfDefault, EncodeKey([]byte{16, 240}, 40), []byte{1, 2, 3}) + write := Write{ + StartTS: 40, + Kind: WriteKindPut, + } + m.Set(engine_util.CfWrite, EncodeKey([]byte{16, 240}, 42), write.ToBytes()) +} + +func TestGetValueSimple4A(t *testing.T) { + txn := testTxn(43, singleEntry) + + value, err := txn.GetValue([]byte{16, 240}) + assert.Nil(t, err) + assert.Equal(t, []byte{1, 2, 3}, value) +} + +func TestGetValueMissing4A(t *testing.T) { + txn := testTxn(43, singleEntry) + + value, err := txn.GetValue([]byte{16, 241}) + assert.Nil(t, err) + assert.Equal(t, []byte(nil), value) +} + +func TestGetValueTooEarly4A(t *testing.T) { + txn := testTxn(41, singleEntry) + + value, err := txn.GetValue([]byte{16, 240}) + assert.Nil(t, err) + assert.Equal(t, []byte(nil), value) +} + +func twoEntries(m *storage.MemStorage) { + m.Set(engine_util.CfDefault, EncodeKey([]byte{16, 240}, 40), []byte{1, 2, 3}) + write1 := Write{ + StartTS: 40, + Kind: WriteKindPut, + } + m.Set(engine_util.CfWrite, EncodeKey([]byte{16, 240}, 42), write1.ToBytes()) + + m.Set(engine_util.CfDefault, EncodeKey([]byte{16, 240}, 50), []byte{255, 0, 255}) + write2 := Write{ + StartTS: 50, + Kind: WriteKindPut, + } + m.Set(engine_util.CfWrite, EncodeKey([]byte{16, 240}, 52), write2.ToBytes()) +} + +func TestGetValueOverwritten4A(t *testing.T) { + txn := testTxn(52, twoEntries) + + value, err := txn.GetValue([]byte{16, 240}) + assert.Nil(t, err) + assert.Equal(t, []byte{255, 0, 255}, value) +} + +func TestGetValueNotOverwritten4A(t *testing.T) { + txn := testTxn(50, twoEntries) + + value, err := txn.GetValue([]byte{16, 240}) + assert.Nil(t, err) + assert.Equal(t, []byte{1, 2, 3}, value) +} + +func deleted(m *storage.MemStorage) { + m.Set(engine_util.CfDefault, EncodeKey([]byte{16, 240}, 40), []byte{1, 2, 3}) + write1 := Write{ + StartTS: 40, + Kind: WriteKindPut, + } + m.Set(engine_util.CfWrite, EncodeKey([]byte{16, 240}, 42), write1.ToBytes()) + + write2 := Write{ + StartTS: 50, + Kind: WriteKindDelete, + } + m.Set(engine_util.CfWrite, EncodeKey([]byte{16, 240}, 52), write2.ToBytes()) +} + +func TestGetValueDeleted4A(t *testing.T) { + txn := testTxn(500, deleted) + + value, err := txn.GetValue([]byte{16, 240}) + assert.Nil(t, err) + assert.Equal(t, []byte(nil), value) +} + +func TestGetValueNotDeleted4A(t *testing.T) { + txn := testTxn(45, deleted) + + value, err := txn.GetValue([]byte{16, 240}) + assert.Nil(t, err) + assert.Equal(t, []byte{1, 2, 3}, value) +} + +func TestCurrentWrite4A(t *testing.T) { + txn := testTxn(50, twoEntries) + + write, ts, err := txn.CurrentWrite([]byte{16, 240}) + assert.Nil(t, err) + assert.Equal(t, Write{ + StartTS: 50, + Kind: WriteKindPut, + }, *write) + assert.Equal(t, uint64(52), ts) + + txn.StartTS = 40 + write, ts, err = txn.CurrentWrite([]byte{16, 240}) + assert.Nil(t, err) + assert.Equal(t, Write{ + StartTS: 40, + Kind: WriteKindPut, + }, *write) + assert.Equal(t, uint64(42), ts) + + txn.StartTS = 41 + write, ts, err = txn.CurrentWrite([]byte{16, 240}) + assert.Nil(t, err) + var noWrite *Write + assert.Equal(t, noWrite, write) + assert.Equal(t, uint64(0), ts) +} + +func TestMostRecentWrite4A(t *testing.T) { + // Empty DB. + txn := testTxn(50, nil) + write, ts, err := txn.MostRecentWrite([]byte{16, 240}) + assert.Nil(t, write) + assert.Equal(t, uint64(0), ts) + assert.Nil(t, err) + + // Simple case - key exists. + txn = testTxn(50, twoEntries) + write, ts, err = txn.MostRecentWrite([]byte{16, 240}) + assert.Nil(t, err) + assert.Equal(t, Write{ + StartTS: 50, + Kind: WriteKindPut, + }, *write) + assert.Equal(t, uint64(52), ts) + // No entry for other keys. + write, ts, err = txn.MostRecentWrite([]byte{16}) + assert.Nil(t, write) + assert.Equal(t, uint64(0), ts) + assert.Nil(t, err) + + // Deleted key. + txn = testTxn(50, deleted) + write, ts, err = txn.MostRecentWrite([]byte{16, 240}) + assert.Nil(t, err) + assert.Equal(t, Write{ + StartTS: 50, + Kind: WriteKindDelete, + }, *write) + assert.Equal(t, uint64(52), ts) + + // Result does not depend on txn ts. + txn = testTxn(5000, twoEntries) + write, ts, err = txn.MostRecentWrite([]byte{16, 240}) + assert.Nil(t, err) + assert.Equal(t, Write{ + StartTS: 50, + Kind: WriteKindPut, + }, *write) + assert.Equal(t, uint64(52), ts) + + // Result does not depend on txn ts. + txn = testTxn(1, twoEntries) + write, ts, err = txn.MostRecentWrite([]byte{16, 240}) + assert.Nil(t, err) + assert.Equal(t, Write{ + StartTS: 50, + Kind: WriteKindPut, + }, *write) + assert.Equal(t, uint64(52), ts) +} diff --git a/kv/transaction/mvcc/write.go b/kv/transaction/mvcc/write.go new file mode 100644 index 00000000..524f9511 --- /dev/null +++ b/kv/transaction/mvcc/write.go @@ -0,0 +1,69 @@ +package mvcc + +import ( + "encoding/binary" + "fmt" + + "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" +) + +// Write is a representation of a committed write to backing storage. +// A serialized version is stored in the "write" CF of our engine when a write is committed. That allows MvccTxn to find +// the status of a key at a given timestamp. +type Write struct { + StartTS uint64 + Kind WriteKind +} + +func (wr *Write) ToBytes() []byte { + buf := append([]byte{byte(wr.Kind)}, 0, 0, 0, 0, 0, 0, 0, 0) + binary.BigEndian.PutUint64(buf[1:], wr.StartTS) + return buf +} + +func ParseWrite(value []byte) (*Write, error) { + if value == nil { + return nil, nil + } + if len(value) != 9 { + return nil, fmt.Errorf("mvcc/write/ParseWrite: value is incorrect length, expected 9, found %d", len(value)) + } + kind := value[0] + startTs := binary.BigEndian.Uint64(value[1:]) + + return &Write{startTs, WriteKind(kind)}, nil +} + +type WriteKind int + +const ( + WriteKindPut WriteKind = 1 + WriteKindDelete WriteKind = 2 + WriteKindRollback WriteKind = 3 +) + +func (wk WriteKind) ToProto() kvrpcpb.Op { + switch wk { + case WriteKindPut: + return kvrpcpb.Op_Put + case WriteKindDelete: + return kvrpcpb.Op_Del + case WriteKindRollback: + return kvrpcpb.Op_Rollback + } + + return -1 +} + +func WriteKindFromProto(op kvrpcpb.Op) WriteKind { + switch op { + case kvrpcpb.Op_Put: + return WriteKindPut + case kvrpcpb.Op_Del: + return WriteKindDelete + case kvrpcpb.Op_Rollback: + return WriteKindRollback + } + + return -1 +} diff --git a/kv/util/codec/codec.go b/kv/util/codec/codec.go new file mode 100644 index 00000000..974ab30a --- /dev/null +++ b/kv/util/codec/codec.go @@ -0,0 +1,84 @@ +package codec + +import ( + "fmt" + "github.com/pingcap/errors" +) + +const ( + encGroupSize = 8 + encMarker = byte(0xFF) + encPad = byte(0x0) +) + +var pads = make([]byte, encGroupSize) + +// EncodeBytes guarantees the encoded value is in ascending order for comparison, +// encoding with the following rule: +// [group1][marker1]...[groupN][markerN] +// group is 8 bytes slice which is padding with 0. +// marker is `0xFF - padding 0 count` +// For example: +// [] -> [0, 0, 0, 0, 0, 0, 0, 0, 247] +// [1, 2, 3] -> [1, 2, 3, 0, 0, 0, 0, 0, 250] +// [1, 2, 3, 0] -> [1, 2, 3, 0, 0, 0, 0, 0, 251] +// [1, 2, 3, 4, 5, 6, 7, 8] -> [1, 2, 3, 4, 5, 6, 7, 8, 255, 0, 0, 0, 0, 0, 0, 0, 0, 247] +// Refer: https://github.com/facebook/mysql-5.6/wiki/MyRocks-record-format#memcomparable-format +func EncodeBytes(data []byte) []byte { + // Allocate more space to avoid unnecessary slice growing. + // Assume that the byte slice size is about `(len(data) / encGroupSize + 1) * (encGroupSize + 1)` bytes, + // that is `(len(data) / 8 + 1) * 9` in our implement. + dLen := len(data) + result := make([]byte, 0, (dLen/encGroupSize+1)*(encGroupSize+1)+8) // make extra room for appending ts + for idx := 0; idx <= dLen; idx += encGroupSize { + remain := dLen - idx + padCount := 0 + if remain >= encGroupSize { + result = append(result, data[idx:idx+encGroupSize]...) + } else { + padCount = encGroupSize - remain + result = append(result, data[idx:]...) + result = append(result, pads[:padCount]...) + } + + marker := encMarker - byte(padCount) + result = append(result, marker) + } + return result +} + +// DecodeBytes decodes bytes which is encoded by EncodeBytes before, +// returns the leftover bytes and decoded value if no error. +func DecodeBytes(b []byte) ([]byte, []byte, error) { + data := make([]byte, 0, len(b)) + for { + if len(b) < encGroupSize+1 { + return nil, nil, fmt.Errorf("insufficient bytes to decode value: %d", len(b)) + } + + groupBytes := b[:encGroupSize+1] + + group := groupBytes[:encGroupSize] + marker := groupBytes[encGroupSize] + + padCount := encMarker - marker + if padCount > encGroupSize { + return nil, nil, errors.Errorf("invalid marker byte, group bytes %q", groupBytes) + } + + realGroupSize := encGroupSize - padCount + data = append(data, group[:realGroupSize]...) + b = b[encGroupSize+1:] + + if padCount != 0 { + // Check validity of padding bytes. + for _, v := range group[realGroupSize:] { + if v != encPad { + return nil, nil, errors.Errorf("invalid padding byte, group bytes %q", groupBytes) + } + } + break + } + } + return b, data, nil +} diff --git a/kv/util/engine_util/cf_iterator.go b/kv/util/engine_util/cf_iterator.go new file mode 100644 index 00000000..72c9f67f --- /dev/null +++ b/kv/util/engine_util/cf_iterator.go @@ -0,0 +1,129 @@ +package engine_util + +import ( + "github.com/Connor1996/badger" +) + +type CFItem struct { + item *badger.Item + prefixLen int +} + +// String returns a string representation of Item +func (i *CFItem) String() string { + return i.item.String() +} + +func (i *CFItem) Key() []byte { + return i.item.Key()[i.prefixLen:] +} + +func (i *CFItem) KeyCopy(dst []byte) []byte { + return i.item.KeyCopy(dst)[i.prefixLen:] +} + +func (i *CFItem) Version() uint64 { + return i.item.Version() +} + +func (i *CFItem) IsEmpty() bool { + return i.item.IsEmpty() +} + +func (i *CFItem) Value() ([]byte, error) { + return i.item.Value() +} + +func (i *CFItem) ValueSize() int { + return i.item.ValueSize() +} + +func (i *CFItem) ValueCopy(dst []byte) ([]byte, error) { + return i.item.ValueCopy(dst) +} + +func (i *CFItem) IsDeleted() bool { + return i.item.IsDeleted() +} + +func (i *CFItem) EstimatedSize() int64 { + return i.item.EstimatedSize() +} + +func (i *CFItem) UserMeta() []byte { + return i.item.UserMeta() +} + +type BadgerIterator struct { + iter *badger.Iterator + prefix string +} + +func NewCFIterator(cf string, txn *badger.Txn) *BadgerIterator { + return &BadgerIterator{ + iter: txn.NewIterator(badger.DefaultIteratorOptions), + prefix: cf + "_", + } +} + +func (it *BadgerIterator) Item() DBItem { + return &CFItem{ + item: it.iter.Item(), + prefixLen: len(it.prefix), + } +} + +func (it *BadgerIterator) Valid() bool { return it.iter.ValidForPrefix([]byte(it.prefix)) } + +func (it *BadgerIterator) ValidForPrefix(prefix []byte) bool { + return it.iter.ValidForPrefix(append(prefix, []byte(it.prefix)...)) +} + +func (it *BadgerIterator) Close() { + it.iter.Close() +} + +func (it *BadgerIterator) Next() { + it.iter.Next() +} + +func (it *BadgerIterator) Seek(key []byte) { + it.iter.Seek(append([]byte(it.prefix), key...)) +} + +func (it *BadgerIterator) Rewind() { + it.iter.Rewind() +} + +type DBIterator interface { + // Item returns pointer to the current key-value pair. + Item() DBItem + // Valid returns false when iteration is done. + Valid() bool + // Next would advance the iterator by one. Always check it.Valid() after a Next() + // to ensure you have access to a valid it.Item(). + Next() + // Seek would seek to the provided key if present. If absent, it would seek to the next smallest key + // greater than provided. + Seek([]byte) + + // Close the iterator + Close() +} + +type DBItem interface { + // Key returns the key. + Key() []byte + // KeyCopy returns a copy of the key of the item, writing it to dst slice. + // If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and + // returned. + KeyCopy(dst []byte) []byte + // Value retrieves the value of the item. + Value() ([]byte, error) + // ValueSize returns the size of the value. + ValueSize() int + // ValueCopy returns a copy of the value of the item from the value log, writing it to dst slice. + // If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and + // returned. + ValueCopy(dst []byte) ([]byte, error) +} diff --git a/kv/util/engine_util/doc.go b/kv/util/engine_util/doc.go new file mode 100644 index 00000000..73d254af --- /dev/null +++ b/kv/util/engine_util/doc.go @@ -0,0 +1,17 @@ +package engine_util + +/* +An engine is a low-level system for storing key/value pairs locally (without distribution or any transaction support, +etc.). This package contains code for interacting with such engines. + +CF means 'column family'. A good description of column families is given in https://github.com/facebook/rocksdb/wiki/Column-Families +(specifically for RocksDB, but the general concepts are universal). In short, a column family is a key namespace. +Multiple column families are usually implemented as almost separate databases. Importantly each column family can be +configured separately. Writes can be made atomic across column families, which cannot be done for separate databases. + +engine_util includes the following packages: + +* engines: a data structure for keeping engines required by unistore. +* write_batch: code to batch writes into a single, atomic 'transaction'. +* cf_iterator: code to iterate over a whole column family in badger. +*/ diff --git a/kv/util/engine_util/engine_util_test.go b/kv/util/engine_util/engine_util_test.go new file mode 100644 index 00000000..17158039 --- /dev/null +++ b/kv/util/engine_util/engine_util_test.go @@ -0,0 +1,83 @@ +package engine_util + +import ( + "bytes" + "io/ioutil" + "testing" + + "github.com/Connor1996/badger" + "github.com/stretchr/testify/require" +) + +func TestEngineUtil(t *testing.T) { + dir, err := ioutil.TempDir("", "engine_util") + opts := badger.DefaultOptions + opts.Dir = dir + opts.ValueDir = dir + db, err := badger.Open(opts) + require.Nil(t, err) + + batch := new(WriteBatch) + batch.SetCF(CfDefault, []byte("a"), []byte("a1")) + batch.SetCF(CfDefault, []byte("b"), []byte("b1")) + batch.SetCF(CfDefault, []byte("c"), []byte("c1")) + batch.SetCF(CfDefault, []byte("d"), []byte("d1")) + batch.SetCF(CfWrite, []byte("a"), []byte("a2")) + batch.SetCF(CfWrite, []byte("b"), []byte("b2")) + batch.SetCF(CfWrite, []byte("d"), []byte("d2")) + batch.SetCF(CfLock, []byte("a"), []byte("a3")) + batch.SetCF(CfLock, []byte("c"), []byte("c3")) + batch.SetCF(CfDefault, []byte("e"), []byte("e1")) + batch.DeleteCF(CfDefault, []byte("e")) + err = batch.WriteToDB(db) + require.Nil(t, err) + + _, err = GetCF(db, CfDefault, []byte("e")) + require.Equal(t, err, badger.ErrKeyNotFound) + txn := db.NewTransaction(false) + defer txn.Discard() + defaultIter := NewCFIterator(CfDefault, txn) + defaultIter.Seek([]byte("a")) + item := defaultIter.Item() + require.True(t, bytes.Equal(item.Key(), []byte("a"))) + val, _ := item.Value() + require.True(t, bytes.Equal(val, []byte("a1"))) + defaultIter.Next() + item = defaultIter.Item() + require.True(t, bytes.Equal(item.Key(), []byte("b"))) + val, _ = item.Value() + require.True(t, bytes.Equal(val, []byte("b1"))) + defaultIter.Next() + item = defaultIter.Item() + require.True(t, bytes.Equal(item.Key(), []byte("c"))) + val, _ = item.Value() + require.True(t, bytes.Equal(val, []byte("c1"))) + defaultIter.Next() + item = defaultIter.Item() + require.True(t, bytes.Equal(item.Key(), []byte("d"))) + val, _ = item.Value() + require.True(t, bytes.Equal(val, []byte("d1"))) + defaultIter.Next() + require.False(t, defaultIter.Valid()) + defaultIter.Close() + + writeIter := NewCFIterator(CfWrite, txn) + writeIter.Seek([]byte("b")) + item = writeIter.Item() + require.True(t, bytes.Equal(item.Key(), []byte("b"))) + val, _ = item.Value() + require.True(t, bytes.Equal(val, []byte("b2"))) + writeIter.Next() + item = writeIter.Item() + require.True(t, bytes.Equal(item.Key(), []byte("d"))) + val, _ = item.Value() + require.True(t, bytes.Equal(val, []byte("d2"))) + writeIter.Next() + require.False(t, writeIter.Valid()) + writeIter.Close() + + lockIter := NewCFIterator(CfLock, txn) + lockIter.Seek([]byte("d")) + require.False(t, lockIter.Valid()) + lockIter.Close() +} diff --git a/kv/util/engine_util/engines.go b/kv/util/engine_util/engines.go new file mode 100644 index 00000000..7c34bb83 --- /dev/null +++ b/kv/util/engine_util/engines.go @@ -0,0 +1,82 @@ +package engine_util + +import ( + "os" + "path/filepath" + + "github.com/Connor1996/badger" + "github.com/pingcap-incubator/tinykv/kv/config" + "github.com/pingcap-incubator/tinykv/log" +) + +// Engines keeps references to and data for the engines used by unistore. +// All engines are badger key/value databases. +// the Path fields are the filesystem path to where the data is stored. +type Engines struct { + // Data, including data which is committed (i.e., committed across other nodes) and un-committed (i.e., only present + // locally). + Kv *badger.DB + KvPath string + // Metadata used by Raft. + Raft *badger.DB + RaftPath string +} + +func NewEngines(kvEngine, raftEngine *badger.DB, kvPath, raftPath string) *Engines { + return &Engines{ + Kv: kvEngine, + KvPath: kvPath, + Raft: raftEngine, + RaftPath: raftPath, + } +} + +func (en *Engines) WriteKV(wb *WriteBatch) error { + return wb.WriteToDB(en.Kv) +} + +func (en *Engines) WriteRaft(wb *WriteBatch) error { + return wb.WriteToDB(en.Raft) +} + +func (en *Engines) Close() error { + if err := en.Kv.Close(); err != nil { + return err + } + if err := en.Raft.Close(); err != nil { + return err + } + return nil +} + +func (en *Engines) Destroy() error { + if err := en.Close(); err != nil { + return err + } + if err := os.RemoveAll(en.KvPath); err != nil { + return err + } + if err := os.RemoveAll(en.RaftPath); err != nil { + return err + } + return nil +} + +// CreateDB creates a new Badger DB on disk at subPath. +func CreateDB(subPath string, conf *config.Config) *badger.DB { + opts := badger.DefaultOptions + if subPath == "raft" { + // Do not need to write blob for raft engine because it will be deleted soon. + opts.ValueThreshold = 0 + } + opts.Dir = filepath.Join(conf.DBPath, subPath) + opts.ValueDir = opts.Dir + if err := os.MkdirAll(opts.Dir, os.ModePerm); err != nil { + log.Fatal(err) + } + db, err := badger.Open(opts) + if err != nil { + log.Fatal(err) + } + return db +} diff --git a/kv/util/engine_util/util.go b/kv/util/engine_util/util.go new file mode 100644 index 00000000..ee70d31b --- /dev/null +++ b/kv/util/engine_util/util.go @@ -0,0 +1,108 @@ +package engine_util + +import ( + "bytes" + + "github.com/Connor1996/badger" + "github.com/golang/protobuf/proto" +) + +func KeyWithCF(cf string, key []byte) []byte { + return append([]byte(cf+"_"), key...) +} + +func GetCF(db *badger.DB, cf string, key []byte) (val []byte, err error) { + err = db.View(func(txn *badger.Txn) error { + item, err := txn.Get(KeyWithCF(cf, key)) + if err != nil { + return err + } + val, err = item.ValueCopy(val) + return err + }) + return +} + +func GetCFFromTxn(txn *badger.Txn, cf string, key []byte) (val []byte, err error) { + item, err := txn.Get(KeyWithCF(cf, key)) + if err != nil { + return nil, err + } + val, err = item.ValueCopy(val) + return +} + +func PutCF(engine *badger.DB, cf string, key []byte, val []byte) error { + return engine.Update(func(txn *badger.Txn) error { + return txn.Set(KeyWithCF(cf, key), val) + }) +} + +func GetMeta(engine *badger.DB, key []byte, msg proto.Message) error { + var val []byte + err := engine.View(func(txn *badger.Txn) error { + item, err := txn.Get(key) + if err != nil { + return err + } + val, err = item.Value() + return err + }) + if err != nil { + return err + } + return proto.Unmarshal(val, msg) +} + +func GetMetaFromTxn(txn *badger.Txn, key []byte, msg proto.Message) error { + item, err := txn.Get(key) + if err != nil { + return err + } + val, err := item.Value() + if err != nil { + return err + } + return proto.Unmarshal(val, msg) +} + +func PutMeta(engine *badger.DB, key []byte, msg proto.Message) error { + val, err := proto.Marshal(msg) + if err != nil { + return err + } + return engine.Update(func(txn *badger.Txn) error { + return txn.Set(key, val) + }) +} + +func DeleteRange(db *badger.DB, startKey, endKey []byte) error { + batch := new(WriteBatch) + txn := db.NewTransaction(false) + defer txn.Discard() + for _, cf := range CFs { + deleteRangeCF(txn, batch, cf, startKey, endKey) + } + + return batch.WriteToDB(db) +} + +func deleteRangeCF(txn *badger.Txn, batch *WriteBatch, cf string, startKey, endKey []byte) { + it := NewCFIterator(cf, txn) + for it.Seek(startKey); it.Valid(); it.Next() { + item := it.Item() + key := item.KeyCopy(nil) + if ExceedEndKey(key, endKey) { + break + } + batch.DeleteCF(cf, key) + } + defer it.Close() +} + +func ExceedEndKey(current, endKey []byte) bool { + if len(endKey) == 0 { + return false + } + return bytes.Compare(current, endKey) >= 0 +} diff --git a/kv/util/engine_util/write_batch.go b/kv/util/engine_util/write_batch.go new file mode 100644 index 00000000..9f6eee11 --- /dev/null +++ b/kv/util/engine_util/write_batch.go @@ -0,0 +1,110 @@ +package engine_util + +import ( + "github.com/Connor1996/badger" + "github.com/golang/protobuf/proto" + "github.com/pingcap/errors" +) + +type WriteBatch struct { + entries []*badger.Entry + size int + safePoint int + safePointSize int + safePointUndo int +} + +const ( + CfDefault string = "default" + CfWrite string = "write" + CfLock string = "lock" +) + +var CFs [3]string = [3]string{CfDefault, CfWrite, CfLock} + +func (wb *WriteBatch) Len() int { + return len(wb.entries) +} + +func (wb *WriteBatch) SetCF(cf string, key, val []byte) { + wb.entries = append(wb.entries, &badger.Entry{ + Key: KeyWithCF(cf, key), + Value: val, + }) + wb.size += len(key) + len(val) +} + +func (wb *WriteBatch) DeleteMeta(key []byte) { + wb.entries = append(wb.entries, &badger.Entry{ + Key: key, + }) + wb.size += len(key) +} + +func (wb *WriteBatch) DeleteCF(cf string, key []byte) { + wb.entries = append(wb.entries, &badger.Entry{ + Key: KeyWithCF(cf, key), + }) + wb.size += len(key) +} + +func (wb *WriteBatch) SetMeta(key []byte, msg proto.Message) error { + val, err := proto.Marshal(msg) + if err != nil { + return errors.WithStack(err) + } + wb.entries = append(wb.entries, &badger.Entry{ + Key: key, + Value: val, + }) + wb.size += len(key) + len(val) + return nil +} + +func (wb *WriteBatch) SetSafePoint() { + wb.safePoint = len(wb.entries) + wb.safePointSize = wb.size +} + +func (wb *WriteBatch) RollbackToSafePoint() { + wb.entries = wb.entries[:wb.safePoint] + wb.size = wb.safePointSize +} + +func (wb *WriteBatch) WriteToDB(db *badger.DB) error { + if len(wb.entries) > 0 { + err := db.Update(func(txn *badger.Txn) error { + for _, entry := range wb.entries { + var err1 error + if len(entry.Value) == 0 { + err1 = txn.Delete(entry.Key) + } else { + err1 = txn.SetEntry(entry) + } + if err1 != nil { + return err1 + } + } + return nil + }) + if err != nil { + return errors.WithStack(err) + } + } + return nil +} + +func (wb *WriteBatch) MustWriteToDB(db *badger.DB) { + err := wb.WriteToDB(db) + if err != nil { + panic(err) + } +} + +func (wb *WriteBatch) Reset() { + wb.entries = wb.entries[:0] + wb.size = 0 + wb.safePoint = 0 + wb.safePointSize = 0 + wb.safePointUndo = 0 +} diff --git a/kv/util/file.go b/kv/util/file.go new file mode 100644 index 00000000..e79d9b37 --- /dev/null +++ b/kv/util/file.go @@ -0,0 +1,57 @@ +package util + +import ( + "hash/crc32" + "io" + "os" + + "github.com/pingcap/errors" +) + +func GetFileSize(path string) (uint64, error) { + fi, err := os.Stat(path) + if err != nil { + return 0, errors.WithStack(err) + } + return uint64(fi.Size()), nil +} + +func FileExists(path string) bool { + fi, err := os.Stat(path) + if err != nil { + return false + } + return !fi.IsDir() +} + +func DirExists(path string) bool { + fi, err := os.Stat(path) + if err != nil { + return false + } + return fi.IsDir() +} + +func DeleteFileIfExists(path string) (bool, error) { + err := os.Remove(path) + if os.IsNotExist(err) { + return false, nil + } else if err != nil { + return false, errors.WithStack(err) + } + return true, nil +} + +// CalcCRC32 Calculates the given file's CRC32 checksum. +func CalcCRC32(path string) (uint32, error) { + digest := crc32.NewIEEE() + f, err := os.Open(path) + if err != nil { + return 0, errors.WithStack(err) + } + _, err = io.Copy(digest, f) + if err != nil { + return 0, errors.WithStack(err) + } + return digest.Sum32(), nil +} diff --git a/kv/util/worker/worker.go b/kv/util/worker/worker.go new file mode 100644 index 00000000..6d4444b8 --- /dev/null +++ b/kv/util/worker/worker.go @@ -0,0 +1,60 @@ +package worker + +import "sync" + +type TaskStop struct{} + +type Task interface{} + +type Worker struct { + name string + sender chan<- Task + receiver <-chan Task + closeCh chan struct{} + wg *sync.WaitGroup +} + +type TaskHandler interface { + Handle(t Task) +} + +type Starter interface { + Start() +} + +func (w *Worker) Start(handler TaskHandler) { + w.wg.Add(1) + go func() { + defer w.wg.Done() + if s, ok := handler.(Starter); ok { + s.Start() + } + for { + Task := <-w.receiver + if _, ok := Task.(TaskStop); ok { + return + } + handler.Handle(Task) + } + }() +} + +func (w *Worker) Sender() chan<- Task { + return w.sender +} + +func (w *Worker) Stop() { + w.sender <- TaskStop{} +} + +const defaultWorkerCapacity = 128 + +func NewWorker(name string, wg *sync.WaitGroup) *Worker { + ch := make(chan Task, defaultWorkerCapacity) + return &Worker{ + sender: (chan<- Task)(ch), + receiver: (<-chan Task)(ch), + name: name, + wg: wg, + } +} diff --git a/log/log.go b/log/log.go new file mode 100644 index 00000000..2dacb83f --- /dev/null +++ b/log/log.go @@ -0,0 +1,268 @@ +//high level log wrapper, so it can output different log based on level +package log + +import ( + "fmt" + "io" + "log" + "os" + "runtime" +) + +const ( + Ldate = log.Ldate + Llongfile = log.Llongfile + Lmicroseconds = log.Lmicroseconds + Lshortfile = log.Lshortfile + LstdFlags = log.LstdFlags + Ltime = log.Ltime +) + +type ( + LogLevel int + LogType int +) + +const ( + LOG_FATAL = LogType(0x1) + LOG_ERROR = LogType(0x2) + LOG_WARNING = LogType(0x4) + LOG_INFO = LogType(0x8) + LOG_DEBUG = LogType(0x10) +) + +const ( + LOG_LEVEL_NONE = LogLevel(0x0) + LOG_LEVEL_FATAL = LOG_LEVEL_NONE | LogLevel(LOG_FATAL) + LOG_LEVEL_ERROR = LOG_LEVEL_FATAL | LogLevel(LOG_ERROR) + LOG_LEVEL_WARN = LOG_LEVEL_ERROR | LogLevel(LOG_WARNING) + LOG_LEVEL_INFO = LOG_LEVEL_WARN | LogLevel(LOG_INFO) + LOG_LEVEL_DEBUG = LOG_LEVEL_INFO | LogLevel(LOG_DEBUG) + LOG_LEVEL_ALL = LOG_LEVEL_DEBUG +) + +const FORMAT_TIME_DAY string = "20060102" +const FORMAT_TIME_HOUR string = "2006010215" + +var _log *Logger = New() + +func init() { + SetFlags(Ldate | Ltime | Lshortfile) + SetHighlighting(runtime.GOOS != "windows") +} + +func GlobalLogger() *log.Logger { + return _log._log +} + +func SetLevel(level LogLevel) { + _log.SetLevel(level) +} +func GetLogLevel() LogLevel { + return _log.level +} + +func SetFlags(flags int) { + _log._log.SetFlags(flags) +} + +func Info(v ...interface{}) { + _log.Info(v...) +} + +func Infof(format string, v ...interface{}) { + _log.Infof(format, v...) +} + +func Panic(v ...interface{}) { + _log.Panic(v...) +} + +func Panicf(format string, v ...interface{}) { + _log.Panicf(format, v...) +} + +func Debug(v ...interface{}) { + _log.Debug(v...) +} + +func Debugf(format string, v ...interface{}) { + _log.Debugf(format, v...) +} + +func Warn(v ...interface{}) { + _log.Warning(v...) +} + +func Warnf(format string, v ...interface{}) { + _log.Warningf(format, v...) +} + +func Warning(v ...interface{}) { + _log.Warning(v...) +} + +func Warningf(format string, v ...interface{}) { + _log.Warningf(format, v...) +} + +func Error(v ...interface{}) { + _log.Error(v...) +} + +func Errorf(format string, v ...interface{}) { + _log.Errorf(format, v...) +} + +func Fatal(v ...interface{}) { + _log.Fatal(v...) +} + +func Fatalf(format string, v ...interface{}) { + _log.Fatalf(format, v...) +} + +func SetLevelByString(level string) { + _log.SetLevelByString(level) +} + +func SetHighlighting(highlighting bool) { + _log.SetHighlighting(highlighting) +} + +type Logger struct { + _log *log.Logger + level LogLevel + highlighting bool +} + +func (l *Logger) SetHighlighting(highlighting bool) { + l.highlighting = highlighting +} + +func (l *Logger) SetFlags(flags int) { + l._log.SetFlags(flags) +} + +func (l *Logger) Flags() int { + return l._log.Flags() +} + +func (l *Logger) SetLevel(level LogLevel) { + l.level = level +} + +func (l *Logger) SetLevelByString(level string) { + l.level = StringToLogLevel(level) +} + +func (l *Logger) log(t LogType, v ...interface{}) { + l.logf(t, "%v\n", v) +} + +func (l *Logger) logf(t LogType, format string, v ...interface{}) { + if l.level|LogLevel(t) != l.level { + return + } + + logStr, logColor := LogTypeToString(t) + var s string + if l.highlighting { + s = "\033" + logColor + "m[" + logStr + "] " + fmt.Sprintf(format, v...) + "\033[0m" + } else { + s = "[" + logStr + "] " + fmt.Sprintf(format, v...) + } + l._log.Output(4, s) +} + +func (l *Logger) Fatal(v ...interface{}) { + l.log(LOG_FATAL, v...) + os.Exit(-1) +} + +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.logf(LOG_FATAL, format, v...) + os.Exit(-1) +} + +func (l *Logger) Panic(v ...interface{}) { + l._log.Panic(v...) +} + +func (l *Logger) Panicf(format string, v ...interface{}) { + l._log.Panicf(format, v...) +} + +func (l *Logger) Error(v ...interface{}) { + l.log(LOG_ERROR, v...) +} + +func (l *Logger) Errorf(format string, v ...interface{}) { + l.logf(LOG_ERROR, format, v...) +} + +func (l *Logger) Warning(v ...interface{}) { + l.log(LOG_WARNING, v...) +} + +func (l *Logger) Warningf(format string, v ...interface{}) { + l.logf(LOG_WARNING, format, v...) +} + +func (l *Logger) Debug(v ...interface{}) { + l.log(LOG_DEBUG, v...) +} + +func (l *Logger) Debugf(format string, v ...interface{}) { + l.logf(LOG_DEBUG, format, v...) +} + +func (l *Logger) Info(v ...interface{}) { + l.log(LOG_INFO, v...) +} + +func (l *Logger) Infof(format string, v ...interface{}) { + l.logf(LOG_INFO, format, v...) +} + +func StringToLogLevel(level string) LogLevel { + switch level { + case "fatal": + return LOG_LEVEL_FATAL + case "error": + return LOG_LEVEL_ERROR + case "warn": + return LOG_LEVEL_WARN + case "warning": + return LOG_LEVEL_WARN + case "debug": + return LOG_LEVEL_DEBUG + case "info": + return LOG_LEVEL_INFO + } + return LOG_LEVEL_ALL +} + +func LogTypeToString(t LogType) (string, string) { + switch t { + case LOG_FATAL: + return "fatal", "[0;31" + case LOG_ERROR: + return "error", "[0;31" + case LOG_WARNING: + return "warning", "[0;33" + case LOG_DEBUG: + return "debug", "[0;36" + case LOG_INFO: + return "info", "[0;37" + } + return "unknown", "[0;37" +} + +func New() *Logger { + return NewLogger(os.Stderr, "") +} + +func NewLogger(w io.Writer, prefix string) *Logger { + return &Logger{_log: log.New(w, prefix, LstdFlags), level: LOG_LEVEL_ALL, highlighting: true} +} diff --git a/proto/generate_go.sh b/proto/generate_go.sh new file mode 100755 index 00000000..59f4cec6 --- /dev/null +++ b/proto/generate_go.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash + +# This script generates Go representations of Protobuf protocols. It will generate Go code in the pkg subdirectory +# for every protocol in the proto subdirectory. It uses protoc, the protobuf compiler, which must be installed. + +set -ex + +push () { + pushd $1 >/dev/null 2>&1 +} + +pop () { + popd $1 >/dev/null 2>&1 +} + +cmd_exists () { + which "$1" 1>/dev/null 2>&1 +} + +PROGRAM=$(basename "$0") + +if [ -z $GOPATH ]; then + printf "Error: the environment variable GOPATH is not set, please set it before running %s\n" $PROGRAM > /dev/stderr + exit 1 +fi + +GO_PREFIX_PATH=github.com/pingcap-incubator/tinykv/proto/pkg +export PATH=$(pwd)/_tools/bin:$GOPATH/bin:$PATH + +echo "install tools..." +GO111MODULE=off go get github.com/twitchtv/retool +# Ensure we're using the right versions of our tools (see tools.json). +GO111MODULE=off retool -base-dir=$(pwd) sync || exit 1 + +function collect() { + file=$(basename $1) + base_name=$(basename $file ".proto") + mkdir -p ../pkg/$base_name + if [ -z $GO_OUT_M ]; then + GO_OUT_M="M$file=$GO_PREFIX_PATH/$base_name" + else + GO_OUT_M="$GO_OUT_M,M$file=$GO_PREFIX_PATH/$base_name" + fi +} + +cd proto +for file in `ls *.proto` + do + collect $file +done + +echo "generate go code..." +ret=0 + +function gen() { + base_name=$(basename $1 ".proto") + protoc -I.:../include --gofast_out=plugins=grpc,$GO_OUT_M:../pkg/$base_name $1 || ret=$? + cd ../pkg/$base_name + sed -i.bak -E 's/import _ \"gogoproto\"//g' *.pb.go + sed -i.bak -E 's/import fmt \"fmt\"//g' *.pb.go + sed -i.bak -E 's/import io \"io\"//g' *.pb.go + sed -i.bak -E 's/import math \"math\"//g' *.pb.go + sed -i.bak -E 's/import _ \".*rustproto\"//' *.pb.go + rm -f *.bak + goimports -w *.pb.go + cd ../../proto +} + +for file in `ls *.proto` + do + gen $file +done +exit $ret diff --git a/proto/include/gogoproto/gogo.proto b/proto/include/gogoproto/gogo.proto new file mode 100644 index 00000000..bc8d889f --- /dev/null +++ b/proto/include/gogoproto/gogo.proto @@ -0,0 +1,136 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; +} diff --git a/proto/include/google/protobuf/any.proto b/proto/include/google/protobuf/any.proto new file mode 100644 index 00000000..b6cc7cb2 --- /dev/null +++ b/proto/include/google/protobuf/any.proto @@ -0,0 +1,154 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/proto/include/google/protobuf/api.proto b/proto/include/google/protobuf/api.proto new file mode 100644 index 00000000..67c1ddbd --- /dev/null +++ b/proto/include/google/protobuf/api.proto @@ -0,0 +1,210 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +import "google/protobuf/source_context.proto"; +import "google/protobuf/type.proto"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "ApiProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "types"; + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +message Api { + + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + string name = 1; + + // The methods of this interface, in unspecified order. + repeated Method methods = 2; + + // Any metadata attached to the interface. + repeated Option options = 3; + + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + // + // + string version = 4; + + // Source context for the protocol buffer service represented by this + // message. + SourceContext source_context = 5; + + // Included interfaces. See [Mixin][]. + repeated Mixin mixins = 6; + + // The source syntax of the service. + Syntax syntax = 7; +} + +// Method represents a method of an API interface. +message Method { + + // The simple name of this method. + string name = 1; + + // A URL of the input message type. + string request_type_url = 2; + + // If true, the request is streamed. + bool request_streaming = 3; + + // The URL of the output message type. + string response_type_url = 4; + + // If true, the response is streamed. + bool response_streaming = 5; + + // Any metadata attached to the method. + repeated Option options = 6; + + // The source syntax of this method. + Syntax syntax = 7; +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +message Mixin { + // The fully qualified name of the interface which is included. + string name = 1; + + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + string root = 2; +} diff --git a/proto/include/google/protobuf/compiler/plugin.proto b/proto/include/google/protobuf/compiler/plugin.proto new file mode 100644 index 00000000..e85c852f --- /dev/null +++ b/proto/include/google/protobuf/compiler/plugin.proto @@ -0,0 +1,167 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +syntax = "proto2"; +package google.protobuf.compiler; +option java_package = "com.google.protobuf.compiler"; +option java_outer_classname = "PluginProtos"; + +option go_package = "plugin_go"; + +import "google/protobuf/descriptor.proto"; + +// The version number of protocol compiler. +message Version { + optional int32 major = 1; + optional int32 minor = 2; + optional int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + optional string suffix = 4; +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +message CodeGeneratorRequest { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + repeated string file_to_generate = 1; + + // The generator parameter passed on the command-line. + optional string parameter = 2; + + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + repeated FileDescriptorProto proto_file = 15; + + // The version number of protocol compiler. + optional Version compiler_version = 3; + +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +message CodeGeneratorResponse { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + optional string error = 1; + + // Represents a single generated file. + message File { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + optional string name = 1; + + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + optional string insertion_point = 2; + + // The file contents. + optional string content = 15; + } + repeated File file = 15; +} diff --git a/proto/include/google/protobuf/descriptor.proto b/proto/include/google/protobuf/descriptor.proto new file mode 100644 index 00000000..1598ad7c --- /dev/null +++ b/proto/include/google/protobuf/descriptor.proto @@ -0,0 +1,872 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; +option go_package = "descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; + optional int32 end = 2; + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + }; + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + }; + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default=false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default=false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default=false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default=false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default=SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default=false]; + optional bool java_generic_services = 17 [default=false]; + optional bool py_generic_services = 18 [default=false]; + optional bool php_generic_services = 42 [default=false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default=false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default=false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. + extensions 1000 to max; + + //reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default=false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default=false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default=false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + //reserved 8; // javalite_serializable + //reserved 9; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default=false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default=false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default=false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + //reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default=false]; + + //reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default=false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed=true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed=true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/proto/include/google/protobuf/duration.proto b/proto/include/google/protobuf/duration.proto new file mode 100644 index 00000000..8bbaa8b6 --- /dev/null +++ b/proto/include/google/protobuf/duration.proto @@ -0,0 +1,117 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/proto/include/google/protobuf/empty.proto b/proto/include/google/protobuf/empty.proto new file mode 100644 index 00000000..6057c852 --- /dev/null +++ b/proto/include/google/protobuf/empty.proto @@ -0,0 +1,52 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/proto/include/google/protobuf/field_mask.proto b/proto/include/google/protobuf/field_mask.proto new file mode 100644 index 00000000..12161981 --- /dev/null +++ b/proto/include/google/protobuf/field_mask.proto @@ -0,0 +1,252 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "FieldMaskProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "types"; + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, the existing +// repeated values in the target resource will be overwritten by the new values. +// Note that a repeated field is only allowed in the last position of a `paths` +// string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then the existing sub-message in the target resource is +// overwritten. Given the target message: +// +// f { +// b { +// d : 1 +// x : 2 +// } +// c : 1 +// } +// +// And an update message: +// +// f { +// b { +// d : 10 +// } +// } +// +// then if the field mask is: +// +// paths: "f.b" +// +// then the result will be: +// +// f { +// b { +// d : 10 +// } +// c : 1 +// } +// +// However, if the update mask was: +// +// paths: "f.b.d" +// +// then the result would be: +// +// f { +// b { +// d : 10 +// x : 2 +// } +// c : 1 +// } +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of the all the API methods, which have any FieldMask type +// field in the request, should verify the included field paths, and return +// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. +message FieldMask { + // The set of field mask paths. + repeated string paths = 1; +} diff --git a/proto/include/google/protobuf/source_context.proto b/proto/include/google/protobuf/source_context.proto new file mode 100644 index 00000000..8654578c --- /dev/null +++ b/proto/include/google/protobuf/source_context.proto @@ -0,0 +1,48 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "SourceContextProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "types"; + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +message SourceContext { + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + string file_name = 1; +} diff --git a/proto/include/google/protobuf/struct.proto b/proto/include/google/protobuf/struct.proto new file mode 100644 index 00000000..4f78641f --- /dev/null +++ b/proto/include/google/protobuf/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/proto/include/google/protobuf/timestamp.proto b/proto/include/google/protobuf/timestamp.proto new file mode 100644 index 00000000..150468b5 --- /dev/null +++ b/proto/include/google/protobuf/timestamp.proto @@ -0,0 +1,135 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/proto/include/google/protobuf/type.proto b/proto/include/google/protobuf/type.proto new file mode 100644 index 00000000..fcd15bfd --- /dev/null +++ b/proto/include/google/protobuf/type.proto @@ -0,0 +1,187 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +import "google/protobuf/any.proto"; +import "google/protobuf/source_context.proto"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TypeProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "types"; + +// A protocol buffer message type. +message Type { + // The fully qualified message name. + string name = 1; + // The list of fields. + repeated Field fields = 2; + // The list of types appearing in `oneof` definitions in this type. + repeated string oneofs = 3; + // The protocol buffer options. + repeated Option options = 4; + // The source context. + SourceContext source_context = 5; + // The source syntax. + Syntax syntax = 6; +} + +// A single field of a message type. +message Field { + // Basic field types. + enum Kind { + // Field type unknown. + TYPE_UNKNOWN = 0; + // Field type double. + TYPE_DOUBLE = 1; + // Field type float. + TYPE_FLOAT = 2; + // Field type int64. + TYPE_INT64 = 3; + // Field type uint64. + TYPE_UINT64 = 4; + // Field type int32. + TYPE_INT32 = 5; + // Field type fixed64. + TYPE_FIXED64 = 6; + // Field type fixed32. + TYPE_FIXED32 = 7; + // Field type bool. + TYPE_BOOL = 8; + // Field type string. + TYPE_STRING = 9; + // Field type group. Proto2 syntax only, and deprecated. + TYPE_GROUP = 10; + // Field type message. + TYPE_MESSAGE = 11; + // Field type bytes. + TYPE_BYTES = 12; + // Field type uint32. + TYPE_UINT32 = 13; + // Field type enum. + TYPE_ENUM = 14; + // Field type sfixed32. + TYPE_SFIXED32 = 15; + // Field type sfixed64. + TYPE_SFIXED64 = 16; + // Field type sint32. + TYPE_SINT32 = 17; + // Field type sint64. + TYPE_SINT64 = 18; + }; + + // Whether a field is optional, required, or repeated. + enum Cardinality { + // For fields with unknown cardinality. + CARDINALITY_UNKNOWN = 0; + // For optional fields. + CARDINALITY_OPTIONAL = 1; + // For required fields. Proto2 syntax only. + CARDINALITY_REQUIRED = 2; + // For repeated fields. + CARDINALITY_REPEATED = 3; + }; + + // The field type. + Kind kind = 1; + // The field cardinality. + Cardinality cardinality = 2; + // The field number. + int32 number = 3; + // The field name. + string name = 4; + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + string type_url = 6; + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + int32 oneof_index = 7; + // Whether to use alternative packed wire representation. + bool packed = 8; + // The protocol buffer options. + repeated Option options = 9; + // The field JSON name. + string json_name = 10; + // The string value of the default value of this field. Proto2 syntax only. + string default_value = 11; +} + +// Enum type definition. +message Enum { + // Enum type name. + string name = 1; + // Enum value definitions. + repeated EnumValue enumvalue = 2; + // Protocol buffer options. + repeated Option options = 3; + // The source context. + SourceContext source_context = 4; + // The source syntax. + Syntax syntax = 5; +} + +// Enum value definition. +message EnumValue { + // Enum value name. + string name = 1; + // Enum value number. + int32 number = 2; + // Protocol buffer options. + repeated Option options = 3; +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +message Option { + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + string name = 1; + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Any value = 2; +} + +// The syntax in which a protocol buffer element is defined. +enum Syntax { + // Syntax `proto2`. + SYNTAX_PROTO2 = 0; + // Syntax `proto3`. + SYNTAX_PROTO3 = 1; +} diff --git a/proto/include/google/protobuf/wrappers.proto b/proto/include/google/protobuf/wrappers.proto new file mode 100644 index 00000000..c5632e5c --- /dev/null +++ b/proto/include/google/protobuf/wrappers.proto @@ -0,0 +1,118 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/proto/pkg/coprocessor/coprocessor.pb.go b/proto/pkg/coprocessor/coprocessor.pb.go new file mode 100644 index 00000000..7f824378 --- /dev/null +++ b/proto/pkg/coprocessor/coprocessor.pb.go @@ -0,0 +1,1137 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: coprocessor.proto + +package coprocessor + +import ( + "fmt" + "io" + "math" + + proto "github.com/golang/protobuf/proto" + + _ "github.com/gogo/protobuf/gogoproto" + + errorpb "github.com/pingcap-incubator/tinykv/proto/pkg/errorpb" + + kvrpcpb "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + + github_com_pingcap_kvproto_pkg_sharedbytes "github.com/pingcap/kvproto/pkg/sharedbytes" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// [start, end) +type KeyRange struct { + Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + End []byte `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyRange) Reset() { *m = KeyRange{} } +func (m *KeyRange) String() string { return proto.CompactTextString(m) } +func (*KeyRange) ProtoMessage() {} +func (*KeyRange) Descriptor() ([]byte, []int) { + return fileDescriptor_coprocessor_be2a9258674b12d6, []int{0} +} +func (m *KeyRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KeyRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KeyRange.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *KeyRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyRange.Merge(dst, src) +} +func (m *KeyRange) XXX_Size() int { + return m.Size() +} +func (m *KeyRange) XXX_DiscardUnknown() { + xxx_messageInfo_KeyRange.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyRange proto.InternalMessageInfo + +func (m *KeyRange) GetStart() []byte { + if m != nil { + return m.Start + } + return nil +} + +func (m *KeyRange) GetEnd() []byte { + if m != nil { + return m.End + } + return nil +} + +type Request struct { + Context *kvrpcpb.Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` + Tp int64 `protobuf:"varint,2,opt,name=tp,proto3" json:"tp,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + StartTs uint64 `protobuf:"varint,7,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"` + Ranges []*KeyRange `protobuf:"bytes,4,rep,name=ranges" json:"ranges,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_coprocessor_be2a9258674b12d6, []int{1} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(dst, src) +} +func (m *Request) XXX_Size() int { + return m.Size() +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +func (m *Request) GetContext() *kvrpcpb.Context { + if m != nil { + return m.Context + } + return nil +} + +func (m *Request) GetTp() int64 { + if m != nil { + return m.Tp + } + return 0 +} + +func (m *Request) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *Request) GetStartTs() uint64 { + if m != nil { + return m.StartTs + } + return 0 +} + +func (m *Request) GetRanges() []*KeyRange { + if m != nil { + return m.Ranges + } + return nil +} + +type Response struct { + Data github_com_pingcap_kvproto_pkg_sharedbytes.SharedBytes `protobuf:"bytes,1,opt,name=data,proto3,customtype=github.com/pingcap/kvproto/pkg/sharedbytes.SharedBytes" json:"data"` + RegionError *errorpb.Error `protobuf:"bytes,2,opt,name=region_error,json=regionError" json:"region_error,omitempty"` + Locked *kvrpcpb.LockInfo `protobuf:"bytes,3,opt,name=locked" json:"locked,omitempty"` + OtherError string `protobuf:"bytes,4,opt,name=other_error,json=otherError,proto3" json:"other_error,omitempty"` + Range *KeyRange `protobuf:"bytes,5,opt,name=range" json:"range,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { + return fileDescriptor_coprocessor_be2a9258674b12d6, []int{2} +} +func (m *Response) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Response.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Response.Merge(dst, src) +} +func (m *Response) XXX_Size() int { + return m.Size() +} +func (m *Response) XXX_DiscardUnknown() { + xxx_messageInfo_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Response proto.InternalMessageInfo + +func (m *Response) GetRegionError() *errorpb.Error { + if m != nil { + return m.RegionError + } + return nil +} + +func (m *Response) GetLocked() *kvrpcpb.LockInfo { + if m != nil { + return m.Locked + } + return nil +} + +func (m *Response) GetOtherError() string { + if m != nil { + return m.OtherError + } + return "" +} + +func (m *Response) GetRange() *KeyRange { + if m != nil { + return m.Range + } + return nil +} + +func init() { + proto.RegisterType((*KeyRange)(nil), "coprocessor.KeyRange") + proto.RegisterType((*Request)(nil), "coprocessor.Request") + proto.RegisterType((*Response)(nil), "coprocessor.Response") +} +func (m *KeyRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyRange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Start) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCoprocessor(dAtA, i, uint64(len(m.Start))) + i += copy(dAtA[i:], m.Start) + } + if len(m.End) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintCoprocessor(dAtA, i, uint64(len(m.End))) + i += copy(dAtA[i:], m.End) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Context != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintCoprocessor(dAtA, i, uint64(m.Context.Size())) + n1, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Tp != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintCoprocessor(dAtA, i, uint64(m.Tp)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintCoprocessor(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + dAtA[i] = 0x22 + i++ + i = encodeVarintCoprocessor(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.StartTs != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintCoprocessor(dAtA, i, uint64(m.StartTs)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Response) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintCoprocessor(dAtA, i, uint64(m.Data.Size())) + n2, err := m.Data.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.RegionError != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintCoprocessor(dAtA, i, uint64(m.RegionError.Size())) + n3, err := m.RegionError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Locked != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintCoprocessor(dAtA, i, uint64(m.Locked.Size())) + n4, err := m.Locked.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if len(m.OtherError) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintCoprocessor(dAtA, i, uint64(len(m.OtherError))) + i += copy(dAtA[i:], m.OtherError) + } + if m.Range != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintCoprocessor(dAtA, i, uint64(m.Range.Size())) + n5, err := m.Range.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintCoprocessor(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *KeyRange) Size() (n int) { + var l int + _ = l + l = len(m.Start) + if l > 0 { + n += 1 + l + sovCoprocessor(uint64(l)) + } + l = len(m.End) + if l > 0 { + n += 1 + l + sovCoprocessor(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Request) Size() (n int) { + var l int + _ = l + if m.Context != nil { + l = m.Context.Size() + n += 1 + l + sovCoprocessor(uint64(l)) + } + if m.Tp != 0 { + n += 1 + sovCoprocessor(uint64(m.Tp)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovCoprocessor(uint64(l)) + } + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovCoprocessor(uint64(l)) + } + } + if m.StartTs != 0 { + n += 1 + sovCoprocessor(uint64(m.StartTs)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Response) Size() (n int) { + var l int + _ = l + l = m.Data.Size() + n += 1 + l + sovCoprocessor(uint64(l)) + if m.RegionError != nil { + l = m.RegionError.Size() + n += 1 + l + sovCoprocessor(uint64(l)) + } + if m.Locked != nil { + l = m.Locked.Size() + n += 1 + l + sovCoprocessor(uint64(l)) + } + l = len(m.OtherError) + if l > 0 { + n += 1 + l + sovCoprocessor(uint64(l)) + } + if m.Range != nil { + l = m.Range.Size() + n += 1 + l + sovCoprocessor(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovCoprocessor(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozCoprocessor(x uint64) (n int) { + return sovCoprocessor(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *KeyRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCoprocessor + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Start = append(m.Start[:0], dAtA[iNdEx:postIndex]...) + if m.Start == nil { + m.Start = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCoprocessor + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.End = append(m.End[:0], dAtA[iNdEx:postIndex]...) + if m.End == nil { + m.End = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCoprocessor(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCoprocessor + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCoprocessor + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Context == nil { + m.Context = &kvrpcpb.Context{} + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Tp", wireType) + } + m.Tp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Tp |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCoprocessor + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCoprocessor + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, &KeyRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType) + } + m.StartTs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTs |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCoprocessor(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCoprocessor + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Response: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCoprocessor + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCoprocessor + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionError == nil { + m.RegionError = &errorpb.Error{} + } + if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Locked", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCoprocessor + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Locked == nil { + m.Locked = &kvrpcpb.LockInfo{} + } + if err := m.Locked.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OtherError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCoprocessor + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OtherError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCoprocessor + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Range == nil { + m.Range = &KeyRange{} + } + if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCoprocessor(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCoprocessor + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCoprocessor(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthCoprocessor + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCoprocessor + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipCoprocessor(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthCoprocessor = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCoprocessor = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("coprocessor.proto", fileDescriptor_coprocessor_be2a9258674b12d6) } + +var fileDescriptor_coprocessor_be2a9258674b12d6 = []byte{ + // 400 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x51, 0x3d, 0x6f, 0xd4, 0x40, + 0x10, 0xcd, 0xde, 0x37, 0xe3, 0x24, 0xba, 0xac, 0x82, 0x64, 0x52, 0xf8, 0xac, 0xab, 0x0c, 0x08, + 0x5b, 0x18, 0x89, 0x92, 0xe2, 0x10, 0x05, 0x82, 0x6a, 0xa1, 0x8f, 0xec, 0xf5, 0xe2, 0x3b, 0x19, + 0x3c, 0xcb, 0xee, 0x26, 0x22, 0x7f, 0x81, 0x5f, 0x40, 0xcf, 0x9f, 0x49, 0x49, 0x4d, 0x11, 0xa1, + 0xe3, 0x8f, 0x20, 0xcf, 0xda, 0xd1, 0x35, 0x54, 0x7e, 0xf3, 0xfc, 0xfc, 0x66, 0xde, 0x33, 0x9c, + 0x49, 0xd4, 0x06, 0xa5, 0xb2, 0x16, 0x4d, 0xaa, 0x0d, 0x3a, 0xe4, 0xc1, 0x01, 0x75, 0x71, 0xa2, + 0x8c, 0x41, 0xa3, 0x4b, 0xff, 0xee, 0xe2, 0xa4, 0xb9, 0x36, 0x5a, 0xde, 0x8f, 0xe7, 0x35, 0xd6, + 0x48, 0x30, 0xeb, 0x90, 0x67, 0xd7, 0x39, 0x2c, 0xde, 0xa9, 0x1b, 0x51, 0xb4, 0xb5, 0xe2, 0xe7, + 0x30, 0xb5, 0xae, 0x30, 0x2e, 0x64, 0x31, 0x4b, 0x8e, 0x85, 0x1f, 0xf8, 0x12, 0xc6, 0xaa, 0xad, + 0xc2, 0x11, 0x71, 0x1d, 0x5c, 0xff, 0x64, 0x30, 0x17, 0xea, 0xeb, 0x95, 0xb2, 0x8e, 0x3f, 0x81, + 0xb9, 0xc4, 0xd6, 0xa9, 0x6f, 0xfe, 0xab, 0x20, 0x5f, 0xa6, 0xc3, 0xda, 0xd7, 0x9e, 0x17, 0x83, + 0x80, 0x9f, 0xc2, 0xc8, 0x69, 0x32, 0x1a, 0x8b, 0x91, 0xd3, 0x9c, 0xc3, 0xa4, 0x2a, 0x5c, 0x11, + 0x8e, 0xc9, 0x9a, 0x30, 0x7f, 0x06, 0x33, 0xd3, 0x1d, 0x63, 0xc3, 0x49, 0x3c, 0x4e, 0x82, 0xfc, + 0x61, 0x7a, 0x18, 0x7a, 0x38, 0x55, 0xf4, 0x22, 0xfe, 0x08, 0x16, 0x74, 0xe5, 0xa5, 0xb3, 0xe1, + 0x3c, 0x66, 0xc9, 0x44, 0xcc, 0x69, 0xfe, 0x68, 0xd7, 0xdf, 0x47, 0xb0, 0x10, 0xca, 0x6a, 0x6c, + 0xad, 0xe2, 0xa2, 0x5f, 0x45, 0xc9, 0x36, 0xaf, 0x6e, 0xef, 0x56, 0x47, 0xbf, 0xef, 0x56, 0x2f, + 0xeb, 0x9d, 0xdb, 0x5e, 0x95, 0xa9, 0xc4, 0x2f, 0x99, 0xde, 0xb5, 0xb5, 0x2c, 0x74, 0xd6, 0x5c, + 0xfb, 0x8e, 0x74, 0x53, 0x67, 0x76, 0x5b, 0x18, 0x55, 0x95, 0x37, 0x4e, 0xd9, 0xf4, 0x03, 0xe1, + 0x4d, 0x87, 0xfb, 0x53, 0x9f, 0xc3, 0xb1, 0x51, 0xf5, 0x0e, 0xdb, 0x4b, 0xea, 0x9d, 0x82, 0x05, + 0xf9, 0x69, 0x3a, 0xfc, 0x85, 0x37, 0xdd, 0x53, 0x04, 0x5e, 0x43, 0x03, 0x7f, 0x0c, 0xb3, 0xcf, + 0x28, 0x1b, 0x55, 0x51, 0xe6, 0x20, 0x3f, 0xbb, 0x2f, 0xeb, 0x3d, 0xca, 0xe6, 0x6d, 0xfb, 0x09, + 0x45, 0x2f, 0xe0, 0x2b, 0x08, 0xd0, 0x6d, 0x95, 0xe9, 0xcd, 0x27, 0x31, 0x4b, 0x1e, 0x08, 0x20, + 0xca, 0x7b, 0x3d, 0x85, 0x29, 0x95, 0x10, 0x4e, 0xc9, 0xea, 0x3f, 0x45, 0x79, 0xcd, 0x66, 0x79, + 0xbb, 0x8f, 0xd8, 0xaf, 0x7d, 0xc4, 0xfe, 0xec, 0x23, 0xf6, 0xe3, 0x6f, 0x74, 0x54, 0xce, 0x28, + 0xe6, 0x8b, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x25, 0xbf, 0xd1, 0x29, 0x55, 0x02, 0x00, 0x00, +} diff --git a/proto/pkg/eraftpb/eraftpb.pb.go b/proto/pkg/eraftpb/eraftpb.pb.go new file mode 100644 index 00000000..927bc873 --- /dev/null +++ b/proto/pkg/eraftpb/eraftpb.pb.go @@ -0,0 +1,2320 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: eraftpb.proto + +package eraftpb + +import ( + "fmt" + "io" + "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type EntryType int32 + +const ( + EntryType_EntryNormal EntryType = 0 + EntryType_EntryConfChange EntryType = 1 +) + +var EntryType_name = map[int32]string{ + 0: "EntryNormal", + 1: "EntryConfChange", +} +var EntryType_value = map[string]int32{ + "EntryNormal": 0, + "EntryConfChange": 1, +} + +func (x EntryType) String() string { + return proto.EnumName(EntryType_name, int32(x)) +} +func (EntryType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_5a4d1f1b47871789, []int{0} +} + +// Some MessageType defined here are local messages which not come from the network, but should +// also use the Step method to handle +type MessageType int32 + +const ( + // 'MessageType_MsgHup' is a local message used for election. If an election timeout happened, + // the node should passes 'MessageType_MsgHup' to its Step method and start a new election. + MessageType_MsgHup MessageType = 0 + // 'MessageType_MsgBeat' is a local message that signals the leader to send a heartbeat + // of the 'MessageType_MsgHeartbeat' type to its followers. + MessageType_MsgBeat MessageType = 1 + // 'MessageType_MsgPropose' is a local message that proposes to append data to the leader's log entries. + MessageType_MsgPropose MessageType = 2 + // 'MessageType_MsgAppend' contains log entries to replicate. + MessageType_MsgAppend MessageType = 3 + // 'MessageType_MsgAppendResponse' is response to log replication request('MessageType_MsgAppend'). + MessageType_MsgAppendResponse MessageType = 4 + // 'MessageType_MsgRequestVote' requests votes for election. + MessageType_MsgRequestVote MessageType = 5 + // 'MessageType_MsgRequestVoteResponse' contains responses from voting request. + MessageType_MsgRequestVoteResponse MessageType = 6 + // 'MessageType_MsgSnapshot' requests to install a snapshot message. + MessageType_MsgSnapshot MessageType = 7 + // 'MessageType_MsgHeartbeat' sends heartbeat from leader to its followers. + MessageType_MsgHeartbeat MessageType = 8 + // 'MessageType_MsgHeartbeatResponse' is a response to 'MessageType_MsgHeartbeat'. + MessageType_MsgHeartbeatResponse MessageType = 9 + // 'MessageType_MsgTransferLeader' requests the leader to transfer its leadership. + MessageType_MsgTransferLeader MessageType = 11 + // 'MessageType_MsgTimeoutNow' send from the leader to the leadership transfer target, to let + // the transfer target timeout immediately and start a new election. + MessageType_MsgTimeoutNow MessageType = 12 +) + +var MessageType_name = map[int32]string{ + 0: "MsgHup", + 1: "MsgBeat", + 2: "MsgPropose", + 3: "MsgAppend", + 4: "MsgAppendResponse", + 5: "MsgRequestVote", + 6: "MsgRequestVoteResponse", + 7: "MsgSnapshot", + 8: "MsgHeartbeat", + 9: "MsgHeartbeatResponse", + 11: "MsgTransferLeader", + 12: "MsgTimeoutNow", +} +var MessageType_value = map[string]int32{ + "MsgHup": 0, + "MsgBeat": 1, + "MsgPropose": 2, + "MsgAppend": 3, + "MsgAppendResponse": 4, + "MsgRequestVote": 5, + "MsgRequestVoteResponse": 6, + "MsgSnapshot": 7, + "MsgHeartbeat": 8, + "MsgHeartbeatResponse": 9, + "MsgTransferLeader": 11, + "MsgTimeoutNow": 12, +} + +func (x MessageType) String() string { + return proto.EnumName(MessageType_name, int32(x)) +} +func (MessageType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_5a4d1f1b47871789, []int{1} +} + +type ConfChangeType int32 + +const ( + ConfChangeType_AddNode ConfChangeType = 0 + ConfChangeType_RemoveNode ConfChangeType = 1 +) + +var ConfChangeType_name = map[int32]string{ + 0: "AddNode", + 1: "RemoveNode", +} +var ConfChangeType_value = map[string]int32{ + "AddNode": 0, + "RemoveNode": 1, +} + +func (x ConfChangeType) String() string { + return proto.EnumName(ConfChangeType_name, int32(x)) +} +func (ConfChangeType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_5a4d1f1b47871789, []int{2} +} + +// The entry is a type of change that needs to be applied. It contains two data fields. +// While the fields are built into the model; their usage is determined by the entry_type. +// +// For normal entries, the data field should contain the data change that should be applied. +// The context field can be used for any contextual data that might be relevant to the +// application of the data. +// +// For configuration changes, the data will contain the ConfChange message and the +// context will provide anything needed to assist the configuration change. The context +// is for the user to set and use in this case. +type Entry struct { + EntryType EntryType `protobuf:"varint,1,opt,name=entry_type,json=entryType,proto3,enum=eraftpb.EntryType" json:"entry_type,omitempty"` + Term uint64 `protobuf:"varint,2,opt,name=term,proto3" json:"term,omitempty"` + Index uint64 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} +func (*Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_5a4d1f1b47871789, []int{0} +} +func (m *Entry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Entry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_Entry.Merge(dst, src) +} +func (m *Entry) XXX_Size() int { + return m.Size() +} +func (m *Entry) XXX_DiscardUnknown() { + xxx_messageInfo_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_Entry proto.InternalMessageInfo + +func (m *Entry) GetEntryType() EntryType { + if m != nil { + return m.EntryType + } + return EntryType_EntryNormal +} + +func (m *Entry) GetTerm() uint64 { + if m != nil { + return m.Term + } + return 0 +} + +func (m *Entry) GetIndex() uint64 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *Entry) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// SnapshotMetadata cantains the log index and term of the last log applied to this +// Snapshot, along with the membership information of the time the last log applied. +type SnapshotMetadata struct { + ConfState *ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state,omitempty"` + Index uint64 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + Term uint64 `protobuf:"varint,3,opt,name=term,proto3" json:"term,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } +func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } +func (*SnapshotMetadata) ProtoMessage() {} +func (*SnapshotMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_5a4d1f1b47871789, []int{1} +} +func (m *SnapshotMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SnapshotMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SnapshotMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SnapshotMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapshotMetadata.Merge(dst, src) +} +func (m *SnapshotMetadata) XXX_Size() int { + return m.Size() +} +func (m *SnapshotMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_SnapshotMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapshotMetadata proto.InternalMessageInfo + +func (m *SnapshotMetadata) GetConfState() *ConfState { + if m != nil { + return m.ConfState + } + return nil +} + +func (m *SnapshotMetadata) GetIndex() uint64 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *SnapshotMetadata) GetTerm() uint64 { + if m != nil { + return m.Term + } + return 0 +} + +type Snapshot struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + Metadata *SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_5a4d1f1b47871789, []int{2} +} +func (m *Snapshot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Snapshot.Merge(dst, src) +} +func (m *Snapshot) XXX_Size() int { + return m.Size() +} +func (m *Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_Snapshot proto.InternalMessageInfo + +func (m *Snapshot) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *Snapshot) GetMetadata() *SnapshotMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +type Message struct { + MsgType MessageType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=eraftpb.MessageType" json:"msg_type,omitempty"` + To uint64 `protobuf:"varint,2,opt,name=to,proto3" json:"to,omitempty"` + From uint64 `protobuf:"varint,3,opt,name=from,proto3" json:"from,omitempty"` + Term uint64 `protobuf:"varint,4,opt,name=term,proto3" json:"term,omitempty"` + LogTerm uint64 `protobuf:"varint,5,opt,name=log_term,json=logTerm,proto3" json:"log_term,omitempty"` + Index uint64 `protobuf:"varint,6,opt,name=index,proto3" json:"index,omitempty"` + Entries []*Entry `protobuf:"bytes,7,rep,name=entries" json:"entries,omitempty"` + Commit uint64 `protobuf:"varint,8,opt,name=commit,proto3" json:"commit,omitempty"` + Snapshot *Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot,omitempty"` + Reject bool `protobuf:"varint,10,opt,name=reject,proto3" json:"reject,omitempty"` + // TODO: Delete Start + RejectHint uint64 `protobuf:"varint,11,opt,name=reject_hint,json=rejectHint,proto3" json:"reject_hint,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_5a4d1f1b47871789, []int{3} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(dst, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +func (m *Message) GetMsgType() MessageType { + if m != nil { + return m.MsgType + } + return MessageType_MsgHup +} + +func (m *Message) GetTo() uint64 { + if m != nil { + return m.To + } + return 0 +} + +func (m *Message) GetFrom() uint64 { + if m != nil { + return m.From + } + return 0 +} + +func (m *Message) GetTerm() uint64 { + if m != nil { + return m.Term + } + return 0 +} + +func (m *Message) GetLogTerm() uint64 { + if m != nil { + return m.LogTerm + } + return 0 +} + +func (m *Message) GetIndex() uint64 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *Message) GetEntries() []*Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *Message) GetCommit() uint64 { + if m != nil { + return m.Commit + } + return 0 +} + +func (m *Message) GetSnapshot() *Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +func (m *Message) GetReject() bool { + if m != nil { + return m.Reject + } + return false +} + +func (m *Message) GetRejectHint() uint64 { + if m != nil { + return m.RejectHint + } + return 0 +} + +// HardState contains the state of a node, including the current term, commit index +// and the vote record +type HardState struct { + Term uint64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + Vote uint64 `protobuf:"varint,2,opt,name=vote,proto3" json:"vote,omitempty"` + Commit uint64 `protobuf:"varint,3,opt,name=commit,proto3" json:"commit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HardState) Reset() { *m = HardState{} } +func (m *HardState) String() string { return proto.CompactTextString(m) } +func (*HardState) ProtoMessage() {} +func (*HardState) Descriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_5a4d1f1b47871789, []int{4} +} +func (m *HardState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HardState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HardState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *HardState) XXX_Merge(src proto.Message) { + xxx_messageInfo_HardState.Merge(dst, src) +} +func (m *HardState) XXX_Size() int { + return m.Size() +} +func (m *HardState) XXX_DiscardUnknown() { + xxx_messageInfo_HardState.DiscardUnknown(m) +} + +var xxx_messageInfo_HardState proto.InternalMessageInfo + +func (m *HardState) GetTerm() uint64 { + if m != nil { + return m.Term + } + return 0 +} + +func (m *HardState) GetVote() uint64 { + if m != nil { + return m.Vote + } + return 0 +} + +func (m *HardState) GetCommit() uint64 { + if m != nil { + return m.Commit + } + return 0 +} + +// ConfState contains the current membership information of the raft group +type ConfState struct { + // all node id + Nodes []uint64 `protobuf:"varint,1,rep,packed,name=nodes" json:"nodes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfState) Reset() { *m = ConfState{} } +func (m *ConfState) String() string { return proto.CompactTextString(m) } +func (*ConfState) ProtoMessage() {} +func (*ConfState) Descriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_5a4d1f1b47871789, []int{5} +} +func (m *ConfState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConfState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ConfState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfState.Merge(dst, src) +} +func (m *ConfState) XXX_Size() int { + return m.Size() +} +func (m *ConfState) XXX_DiscardUnknown() { + xxx_messageInfo_ConfState.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfState proto.InternalMessageInfo + +func (m *ConfState) GetNodes() []uint64 { + if m != nil { + return m.Nodes + } + return nil +} + +// ConfChange is the data that attach on entry with EntryConfChange type +type ConfChange struct { + ChangeType ConfChangeType `protobuf:"varint,1,opt,name=change_type,json=changeType,proto3,enum=eraftpb.ConfChangeType" json:"change_type,omitempty"` + // node will be add/remove + NodeId uint64 `protobuf:"varint,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + Context []byte `protobuf:"bytes,3,opt,name=context,proto3" json:"context,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfChange) Reset() { *m = ConfChange{} } +func (m *ConfChange) String() string { return proto.CompactTextString(m) } +func (*ConfChange) ProtoMessage() {} +func (*ConfChange) Descriptor() ([]byte, []int) { + return fileDescriptor_eraftpb_5a4d1f1b47871789, []int{6} +} +func (m *ConfChange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConfChange.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ConfChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfChange.Merge(dst, src) +} +func (m *ConfChange) XXX_Size() int { + return m.Size() +} +func (m *ConfChange) XXX_DiscardUnknown() { + xxx_messageInfo_ConfChange.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfChange proto.InternalMessageInfo + +func (m *ConfChange) GetChangeType() ConfChangeType { + if m != nil { + return m.ChangeType + } + return ConfChangeType_AddNode +} + +func (m *ConfChange) GetNodeId() uint64 { + if m != nil { + return m.NodeId + } + return 0 +} + +func (m *ConfChange) GetContext() []byte { + if m != nil { + return m.Context + } + return nil +} + +func init() { + proto.RegisterType((*Entry)(nil), "eraftpb.Entry") + proto.RegisterType((*SnapshotMetadata)(nil), "eraftpb.SnapshotMetadata") + proto.RegisterType((*Snapshot)(nil), "eraftpb.Snapshot") + proto.RegisterType((*Message)(nil), "eraftpb.Message") + proto.RegisterType((*HardState)(nil), "eraftpb.HardState") + proto.RegisterType((*ConfState)(nil), "eraftpb.ConfState") + proto.RegisterType((*ConfChange)(nil), "eraftpb.ConfChange") + proto.RegisterEnum("eraftpb.EntryType", EntryType_name, EntryType_value) + proto.RegisterEnum("eraftpb.MessageType", MessageType_name, MessageType_value) + proto.RegisterEnum("eraftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) +} +func (m *Entry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Entry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.EntryType != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.EntryType)) + } + if m.Term != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.Term)) + } + if m.Index != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.Index)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ConfState != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.ConfState.Size())) + n1, err := m.ConfState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Index != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.Index)) + } + if m.Term != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.Term)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Data) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.Metadata != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.Metadata.Size())) + n2, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MsgType != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.MsgType)) + } + if m.To != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.To)) + } + if m.From != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.From)) + } + if m.Term != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.Term)) + } + if m.LogTerm != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.LogTerm)) + } + if m.Index != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.Index)) + } + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0x3a + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Commit != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.Commit)) + } + if m.Snapshot != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.Snapshot.Size())) + n3, err := m.Snapshot.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Reject { + dAtA[i] = 0x50 + i++ + if m.Reject { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.RejectHint != 0 { + dAtA[i] = 0x58 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.RejectHint)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *HardState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HardState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Term != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.Term)) + } + if m.Vote != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.Vote)) + } + if m.Commit != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.Commit)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + dAtA5 := make([]byte, len(m.Nodes)*10) + var j4 int + for _, num := range m.Nodes { + for num >= 1<<7 { + dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j4++ + } + dAtA5[j4] = uint8(num) + j4++ + } + dAtA[i] = 0xa + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(j4)) + i += copy(dAtA[i:], dAtA5[:j4]) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ChangeType != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.ChangeType)) + } + if m.NodeId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(m.NodeId)) + } + if len(m.Context) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintEraftpb(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintEraftpb(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Entry) Size() (n int) { + var l int + _ = l + if m.EntryType != 0 { + n += 1 + sovEraftpb(uint64(m.EntryType)) + } + if m.Term != 0 { + n += 1 + sovEraftpb(uint64(m.Term)) + } + if m.Index != 0 { + n += 1 + sovEraftpb(uint64(m.Index)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovEraftpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SnapshotMetadata) Size() (n int) { + var l int + _ = l + if m.ConfState != nil { + l = m.ConfState.Size() + n += 1 + l + sovEraftpb(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovEraftpb(uint64(m.Index)) + } + if m.Term != 0 { + n += 1 + sovEraftpb(uint64(m.Term)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Snapshot) Size() (n int) { + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovEraftpb(uint64(l)) + } + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovEraftpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Message) Size() (n int) { + var l int + _ = l + if m.MsgType != 0 { + n += 1 + sovEraftpb(uint64(m.MsgType)) + } + if m.To != 0 { + n += 1 + sovEraftpb(uint64(m.To)) + } + if m.From != 0 { + n += 1 + sovEraftpb(uint64(m.From)) + } + if m.Term != 0 { + n += 1 + sovEraftpb(uint64(m.Term)) + } + if m.LogTerm != 0 { + n += 1 + sovEraftpb(uint64(m.LogTerm)) + } + if m.Index != 0 { + n += 1 + sovEraftpb(uint64(m.Index)) + } + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovEraftpb(uint64(l)) + } + } + if m.Commit != 0 { + n += 1 + sovEraftpb(uint64(m.Commit)) + } + if m.Snapshot != nil { + l = m.Snapshot.Size() + n += 1 + l + sovEraftpb(uint64(l)) + } + if m.Reject { + n += 2 + } + if m.RejectHint != 0 { + n += 1 + sovEraftpb(uint64(m.RejectHint)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HardState) Size() (n int) { + var l int + _ = l + if m.Term != 0 { + n += 1 + sovEraftpb(uint64(m.Term)) + } + if m.Vote != 0 { + n += 1 + sovEraftpb(uint64(m.Vote)) + } + if m.Commit != 0 { + n += 1 + sovEraftpb(uint64(m.Commit)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfState) Size() (n int) { + var l int + _ = l + if len(m.Nodes) > 0 { + l = 0 + for _, e := range m.Nodes { + l += sovEraftpb(uint64(e)) + } + n += 1 + sovEraftpb(uint64(l)) + l + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChange) Size() (n int) { + var l int + _ = l + if m.ChangeType != 0 { + n += 1 + sovEraftpb(uint64(m.ChangeType)) + } + if m.NodeId != 0 { + n += 1 + sovEraftpb(uint64(m.NodeId)) + } + l = len(m.Context) + if l > 0 { + n += 1 + l + sovEraftpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovEraftpb(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozEraftpb(x uint64) (n int) { + return sovEraftpb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Entry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Entry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EntryType", wireType) + } + m.EntryType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EntryType |= (EntryType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthEraftpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEraftpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEraftpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEraftpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfState == nil { + m.ConfState = &ConfState{} + } + if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEraftpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEraftpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthEraftpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEraftpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &SnapshotMetadata{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEraftpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEraftpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MsgType", wireType) + } + m.MsgType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MsgType |= (MessageType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + m.To = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.To |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + m.From = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.From |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType) + } + m.LogTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LogTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEraftpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, &Entry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEraftpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Snapshot == nil { + m.Snapshot = &Snapshot{} + } + if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Reject = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType) + } + m.RejectHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RejectHint |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEraftpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEraftpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HardState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HardState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + m.Vote = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Vote |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEraftpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEraftpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthEraftpb + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipEraftpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEraftpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfChange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ChangeType", wireType) + } + m.ChangeType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ChangeType |= (ConfChangeType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeId", wireType) + } + m.NodeId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEraftpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthEraftpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEraftpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEraftpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEraftpb(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEraftpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEraftpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEraftpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthEraftpb + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEraftpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEraftpb(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEraftpb = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEraftpb = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("eraftpb.proto", fileDescriptor_eraftpb_5a4d1f1b47871789) } + +var fileDescriptor_eraftpb_5a4d1f1b47871789 = []byte{ + // 683 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x94, 0xdf, 0x4e, 0xdb, 0x4a, + 0x10, 0xc6, 0xb3, 0xf9, 0x67, 0x7b, 0x1c, 0xc2, 0x32, 0x87, 0x03, 0xe6, 0x5c, 0xe4, 0xa4, 0xb9, + 0x8a, 0x90, 0xa0, 0x82, 0xaa, 0x52, 0x6f, 0x01, 0x55, 0xa2, 0x6a, 0x8d, 0x2a, 0x43, 0x7b, 0x1b, + 0x99, 0x78, 0x62, 0x52, 0x61, 0xaf, 0xeb, 0x5d, 0x28, 0x79, 0x93, 0x3e, 0x52, 0x6f, 0x2a, 0xf5, + 0x11, 0x2a, 0xfa, 0x08, 0x7d, 0x81, 0x6a, 0x37, 0xb6, 0xe3, 0xd0, 0xbb, 0xf9, 0xc6, 0xb3, 0x3b, + 0xbf, 0xf9, 0x66, 0x13, 0xd8, 0xa0, 0x3c, 0x9c, 0xa9, 0xec, 0xfa, 0x30, 0xcb, 0x85, 0x12, 0x68, + 0x15, 0x72, 0xf4, 0x00, 0x9d, 0xd7, 0xa9, 0xca, 0x17, 0x78, 0x04, 0x40, 0x3a, 0x98, 0xa8, 0x45, + 0x46, 0x1e, 0x1b, 0xb2, 0x71, 0xff, 0x18, 0x0f, 0xcb, 0x53, 0xa6, 0xe6, 0x6a, 0x91, 0x51, 0xe0, + 0x50, 0x19, 0x22, 0x42, 0x5b, 0x51, 0x9e, 0x78, 0xcd, 0x21, 0x1b, 0xb7, 0x03, 0x13, 0xe3, 0x36, + 0x74, 0xe6, 0x69, 0x44, 0x0f, 0x5e, 0xcb, 0x24, 0x97, 0x42, 0x57, 0x46, 0xa1, 0x0a, 0xbd, 0xf6, + 0x90, 0x8d, 0x7b, 0x81, 0x89, 0x47, 0x02, 0xf8, 0x65, 0x1a, 0x66, 0xf2, 0x46, 0x28, 0x9f, 0x54, + 0xa8, 0x73, 0x1a, 0x62, 0x2a, 0xd2, 0xd9, 0x44, 0xaa, 0x50, 0x2d, 0x21, 0xdc, 0x1a, 0xc4, 0x99, + 0x48, 0x67, 0x97, 0xfa, 0x4b, 0xe0, 0x4c, 0xcb, 0x70, 0xd5, 0xb0, 0xf9, 0xa4, 0xa1, 0x41, 0x6b, + 0xad, 0xd0, 0x46, 0x1f, 0xc0, 0x2e, 0x1b, 0x56, 0x40, 0x6c, 0x05, 0x84, 0x2f, 0xc1, 0x4e, 0x0a, + 0x10, 0x73, 0x99, 0x7b, 0xbc, 0x57, 0xb5, 0x7e, 0x4a, 0x1a, 0x54, 0xa5, 0xa3, 0xef, 0x4d, 0xb0, + 0x7c, 0x92, 0x32, 0x8c, 0x09, 0x9f, 0x83, 0x9d, 0xc8, 0xb8, 0x6e, 0xe1, 0x76, 0x75, 0x45, 0x51, + 0x63, 0x4c, 0xb4, 0x12, 0x19, 0x1b, 0x0b, 0xfb, 0xd0, 0x54, 0xa2, 0x40, 0x6f, 0x2a, 0xa1, 0xb9, + 0x66, 0xb9, 0xa8, 0xb8, 0x75, 0x5c, 0xcd, 0xd2, 0xae, 0xd9, 0xbc, 0x07, 0xf6, 0xad, 0x88, 0x27, + 0x26, 0xdf, 0x31, 0x79, 0xeb, 0x56, 0xc4, 0x57, 0x6b, 0x1b, 0xe8, 0xd6, 0x0d, 0x19, 0x83, 0xa5, + 0x17, 0x37, 0x27, 0xe9, 0x59, 0xc3, 0xd6, 0xd8, 0x3d, 0xee, 0xaf, 0xef, 0x36, 0x28, 0x3f, 0xe3, + 0x0e, 0x74, 0xa7, 0x22, 0x49, 0xe6, 0xca, 0xb3, 0xcd, 0x05, 0x85, 0xc2, 0x03, 0xb0, 0x65, 0xe1, + 0x82, 0xe7, 0x18, 0x7b, 0xb6, 0xfe, 0xb2, 0x27, 0xa8, 0x4a, 0xf4, 0x35, 0x39, 0x7d, 0xa2, 0xa9, + 0xf2, 0x60, 0xc8, 0xc6, 0x76, 0x50, 0x28, 0xfc, 0x1f, 0xdc, 0x65, 0x34, 0xb9, 0x99, 0xa7, 0xca, + 0x73, 0x4d, 0x0f, 0x58, 0xa6, 0xce, 0xe7, 0xa9, 0x1a, 0xbd, 0x05, 0xe7, 0x3c, 0xcc, 0xa3, 0xe5, + 0x76, 0xcb, 0xd9, 0x59, 0x6d, 0x76, 0x84, 0xf6, 0xbd, 0x50, 0x54, 0x3e, 0x3b, 0x1d, 0xd7, 0xa0, + 0x5b, 0x75, 0xe8, 0xd1, 0x33, 0x70, 0xce, 0xea, 0x4f, 0x25, 0x15, 0x11, 0x49, 0x8f, 0x0d, 0x5b, + 0xda, 0x19, 0x23, 0x46, 0x0b, 0x00, 0x5d, 0x72, 0x76, 0x13, 0xa6, 0x31, 0xe1, 0x2b, 0x70, 0xa7, + 0x26, 0xaa, 0x2f, 0x71, 0x77, 0xed, 0x09, 0x2e, 0x2b, 0xcd, 0x1e, 0x61, 0x5a, 0xc5, 0xb8, 0x0b, + 0x96, 0xbe, 0x70, 0x32, 0x8f, 0x0a, 0xb2, 0xae, 0x96, 0x6f, 0x22, 0xf4, 0xc0, 0x9a, 0x8a, 0x54, + 0xd1, 0xc3, 0x12, 0xae, 0x17, 0x94, 0x72, 0xff, 0x08, 0x9c, 0xea, 0x87, 0x85, 0x9b, 0xe0, 0x1a, + 0x71, 0x21, 0xf2, 0x24, 0xbc, 0xe5, 0x0d, 0xfc, 0x07, 0x36, 0x4d, 0x62, 0xd5, 0x93, 0xb3, 0xfd, + 0xdf, 0x0c, 0xdc, 0xda, 0x4b, 0x42, 0x80, 0xae, 0x2f, 0xe3, 0xf3, 0xbb, 0x8c, 0x37, 0xd0, 0x05, + 0xcb, 0x97, 0xf1, 0x29, 0x85, 0x8a, 0x33, 0xec, 0x03, 0xf8, 0x32, 0x7e, 0x9f, 0x8b, 0x4c, 0x48, + 0xe2, 0x4d, 0xdc, 0x00, 0xc7, 0x97, 0xf1, 0x49, 0x96, 0x51, 0x1a, 0xf1, 0x16, 0xfe, 0x0b, 0x5b, + 0x95, 0x0c, 0x48, 0x66, 0x22, 0x95, 0xc4, 0xdb, 0x88, 0xd0, 0xf7, 0x65, 0x1c, 0xd0, 0xe7, 0x3b, + 0x92, 0xea, 0xa3, 0x50, 0xc4, 0x3b, 0xf8, 0x1f, 0xec, 0xac, 0xe7, 0xaa, 0xfa, 0xae, 0x86, 0xf6, + 0x65, 0x5c, 0xae, 0x9f, 0x5b, 0xc8, 0xa1, 0xa7, 0x79, 0x28, 0xcc, 0xd5, 0xb5, 0x06, 0xb1, 0xd1, + 0x83, 0xed, 0x7a, 0xa6, 0x3a, 0xec, 0x14, 0x0c, 0x57, 0x79, 0x98, 0xca, 0x19, 0xe5, 0xef, 0x28, + 0x8c, 0x28, 0xe7, 0x2e, 0x6e, 0xc1, 0x86, 0x4e, 0xcf, 0x13, 0x12, 0x77, 0xea, 0x42, 0x7c, 0xe1, + 0xbd, 0xfd, 0x03, 0xe8, 0xaf, 0x3b, 0xaf, 0x67, 0x3d, 0x89, 0xa2, 0x0b, 0x11, 0x11, 0x6f, 0xe8, + 0x59, 0x03, 0x4a, 0xc4, 0x3d, 0x19, 0xcd, 0x4e, 0xf9, 0xb7, 0xc7, 0x01, 0xfb, 0xf1, 0x38, 0x60, + 0x3f, 0x1f, 0x07, 0xec, 0xeb, 0xaf, 0x41, 0xe3, 0xba, 0x6b, 0xfe, 0xf6, 0x5e, 0xfc, 0x09, 0x00, + 0x00, 0xff, 0xff, 0x2a, 0xa4, 0xbc, 0x39, 0x07, 0x05, 0x00, 0x00, +} diff --git a/proto/pkg/errorpb/errorpb.pb.go b/proto/pkg/errorpb/errorpb.pb.go new file mode 100644 index 00000000..ace5212a --- /dev/null +++ b/proto/pkg/errorpb/errorpb.pb.go @@ -0,0 +1,1852 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: errorpb.proto + +package errorpb + +import ( + "fmt" + "io" + "math" + + proto "github.com/golang/protobuf/proto" + + _ "github.com/gogo/protobuf/gogoproto" + + metapb "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type NotLeader struct { + RegionId uint64 `protobuf:"varint,1,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` + Leader *metapb.Peer `protobuf:"bytes,2,opt,name=leader" json:"leader,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NotLeader) Reset() { *m = NotLeader{} } +func (m *NotLeader) String() string { return proto.CompactTextString(m) } +func (*NotLeader) ProtoMessage() {} +func (*NotLeader) Descriptor() ([]byte, []int) { + return fileDescriptor_errorpb_6ea187258f91197d, []int{0} +} +func (m *NotLeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NotLeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NotLeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *NotLeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_NotLeader.Merge(dst, src) +} +func (m *NotLeader) XXX_Size() int { + return m.Size() +} +func (m *NotLeader) XXX_DiscardUnknown() { + xxx_messageInfo_NotLeader.DiscardUnknown(m) +} + +var xxx_messageInfo_NotLeader proto.InternalMessageInfo + +func (m *NotLeader) GetRegionId() uint64 { + if m != nil { + return m.RegionId + } + return 0 +} + +func (m *NotLeader) GetLeader() *metapb.Peer { + if m != nil { + return m.Leader + } + return nil +} + +type StoreNotMatch struct { + RequestStoreId uint64 `protobuf:"varint,1,opt,name=request_store_id,json=requestStoreId,proto3" json:"request_store_id,omitempty"` + ActualStoreId uint64 `protobuf:"varint,2,opt,name=actual_store_id,json=actualStoreId,proto3" json:"actual_store_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StoreNotMatch) Reset() { *m = StoreNotMatch{} } +func (m *StoreNotMatch) String() string { return proto.CompactTextString(m) } +func (*StoreNotMatch) ProtoMessage() {} +func (*StoreNotMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_errorpb_6ea187258f91197d, []int{1} +} +func (m *StoreNotMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreNotMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreNotMatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *StoreNotMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreNotMatch.Merge(dst, src) +} +func (m *StoreNotMatch) XXX_Size() int { + return m.Size() +} +func (m *StoreNotMatch) XXX_DiscardUnknown() { + xxx_messageInfo_StoreNotMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_StoreNotMatch proto.InternalMessageInfo + +func (m *StoreNotMatch) GetRequestStoreId() uint64 { + if m != nil { + return m.RequestStoreId + } + return 0 +} + +func (m *StoreNotMatch) GetActualStoreId() uint64 { + if m != nil { + return m.ActualStoreId + } + return 0 +} + +type RegionNotFound struct { + RegionId uint64 `protobuf:"varint,1,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegionNotFound) Reset() { *m = RegionNotFound{} } +func (m *RegionNotFound) String() string { return proto.CompactTextString(m) } +func (*RegionNotFound) ProtoMessage() {} +func (*RegionNotFound) Descriptor() ([]byte, []int) { + return fileDescriptor_errorpb_6ea187258f91197d, []int{2} +} +func (m *RegionNotFound) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RegionNotFound) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RegionNotFound.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RegionNotFound) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegionNotFound.Merge(dst, src) +} +func (m *RegionNotFound) XXX_Size() int { + return m.Size() +} +func (m *RegionNotFound) XXX_DiscardUnknown() { + xxx_messageInfo_RegionNotFound.DiscardUnknown(m) +} + +var xxx_messageInfo_RegionNotFound proto.InternalMessageInfo + +func (m *RegionNotFound) GetRegionId() uint64 { + if m != nil { + return m.RegionId + } + return 0 +} + +type KeyNotInRegion struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + RegionId uint64 `protobuf:"varint,2,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` + StartKey []byte `protobuf:"bytes,3,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + EndKey []byte `protobuf:"bytes,4,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyNotInRegion) Reset() { *m = KeyNotInRegion{} } +func (m *KeyNotInRegion) String() string { return proto.CompactTextString(m) } +func (*KeyNotInRegion) ProtoMessage() {} +func (*KeyNotInRegion) Descriptor() ([]byte, []int) { + return fileDescriptor_errorpb_6ea187258f91197d, []int{3} +} +func (m *KeyNotInRegion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KeyNotInRegion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KeyNotInRegion.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *KeyNotInRegion) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyNotInRegion.Merge(dst, src) +} +func (m *KeyNotInRegion) XXX_Size() int { + return m.Size() +} +func (m *KeyNotInRegion) XXX_DiscardUnknown() { + xxx_messageInfo_KeyNotInRegion.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyNotInRegion proto.InternalMessageInfo + +func (m *KeyNotInRegion) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *KeyNotInRegion) GetRegionId() uint64 { + if m != nil { + return m.RegionId + } + return 0 +} + +func (m *KeyNotInRegion) GetStartKey() []byte { + if m != nil { + return m.StartKey + } + return nil +} + +func (m *KeyNotInRegion) GetEndKey() []byte { + if m != nil { + return m.EndKey + } + return nil +} + +type EpochNotMatch struct { + CurrentRegions []*metapb.Region `protobuf:"bytes,1,rep,name=current_regions,json=currentRegions" json:"current_regions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EpochNotMatch) Reset() { *m = EpochNotMatch{} } +func (m *EpochNotMatch) String() string { return proto.CompactTextString(m) } +func (*EpochNotMatch) ProtoMessage() {} +func (*EpochNotMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_errorpb_6ea187258f91197d, []int{4} +} +func (m *EpochNotMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EpochNotMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EpochNotMatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *EpochNotMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_EpochNotMatch.Merge(dst, src) +} +func (m *EpochNotMatch) XXX_Size() int { + return m.Size() +} +func (m *EpochNotMatch) XXX_DiscardUnknown() { + xxx_messageInfo_EpochNotMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_EpochNotMatch proto.InternalMessageInfo + +func (m *EpochNotMatch) GetCurrentRegions() []*metapb.Region { + if m != nil { + return m.CurrentRegions + } + return nil +} + +type StaleCommand struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StaleCommand) Reset() { *m = StaleCommand{} } +func (m *StaleCommand) String() string { return proto.CompactTextString(m) } +func (*StaleCommand) ProtoMessage() {} +func (*StaleCommand) Descriptor() ([]byte, []int) { + return fileDescriptor_errorpb_6ea187258f91197d, []int{5} +} +func (m *StaleCommand) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StaleCommand) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StaleCommand.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *StaleCommand) XXX_Merge(src proto.Message) { + xxx_messageInfo_StaleCommand.Merge(dst, src) +} +func (m *StaleCommand) XXX_Size() int { + return m.Size() +} +func (m *StaleCommand) XXX_DiscardUnknown() { + xxx_messageInfo_StaleCommand.DiscardUnknown(m) +} + +var xxx_messageInfo_StaleCommand proto.InternalMessageInfo + +type Error struct { + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` + NotLeader *NotLeader `protobuf:"bytes,2,opt,name=not_leader,json=notLeader" json:"not_leader,omitempty"` + RegionNotFound *RegionNotFound `protobuf:"bytes,3,opt,name=region_not_found,json=regionNotFound" json:"region_not_found,omitempty"` + KeyNotInRegion *KeyNotInRegion `protobuf:"bytes,4,opt,name=key_not_in_region,json=keyNotInRegion" json:"key_not_in_region,omitempty"` + EpochNotMatch *EpochNotMatch `protobuf:"bytes,5,opt,name=epoch_not_match,json=epochNotMatch" json:"epoch_not_match,omitempty"` + StaleCommand *StaleCommand `protobuf:"bytes,7,opt,name=stale_command,json=staleCommand" json:"stale_command,omitempty"` + StoreNotMatch *StoreNotMatch `protobuf:"bytes,8,opt,name=store_not_match,json=storeNotMatch" json:"store_not_match,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} +func (*Error) Descriptor() ([]byte, []int) { + return fileDescriptor_errorpb_6ea187258f91197d, []int{6} +} +func (m *Error) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Error.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Error) XXX_Merge(src proto.Message) { + xxx_messageInfo_Error.Merge(dst, src) +} +func (m *Error) XXX_Size() int { + return m.Size() +} +func (m *Error) XXX_DiscardUnknown() { + xxx_messageInfo_Error.DiscardUnknown(m) +} + +var xxx_messageInfo_Error proto.InternalMessageInfo + +func (m *Error) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Error) GetNotLeader() *NotLeader { + if m != nil { + return m.NotLeader + } + return nil +} + +func (m *Error) GetRegionNotFound() *RegionNotFound { + if m != nil { + return m.RegionNotFound + } + return nil +} + +func (m *Error) GetKeyNotInRegion() *KeyNotInRegion { + if m != nil { + return m.KeyNotInRegion + } + return nil +} + +func (m *Error) GetEpochNotMatch() *EpochNotMatch { + if m != nil { + return m.EpochNotMatch + } + return nil +} + +func (m *Error) GetStaleCommand() *StaleCommand { + if m != nil { + return m.StaleCommand + } + return nil +} + +func (m *Error) GetStoreNotMatch() *StoreNotMatch { + if m != nil { + return m.StoreNotMatch + } + return nil +} + +func init() { + proto.RegisterType((*NotLeader)(nil), "errorpb.NotLeader") + proto.RegisterType((*StoreNotMatch)(nil), "errorpb.StoreNotMatch") + proto.RegisterType((*RegionNotFound)(nil), "errorpb.RegionNotFound") + proto.RegisterType((*KeyNotInRegion)(nil), "errorpb.KeyNotInRegion") + proto.RegisterType((*EpochNotMatch)(nil), "errorpb.EpochNotMatch") + proto.RegisterType((*StaleCommand)(nil), "errorpb.StaleCommand") + proto.RegisterType((*Error)(nil), "errorpb.Error") +} +func (m *NotLeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NotLeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RegionId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(m.RegionId)) + } + if m.Leader != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(m.Leader.Size())) + n1, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *StoreNotMatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreNotMatch) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RequestStoreId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(m.RequestStoreId)) + } + if m.ActualStoreId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(m.ActualStoreId)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RegionNotFound) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegionNotFound) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RegionId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(m.RegionId)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *KeyNotInRegion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyNotInRegion) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.RegionId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(m.RegionId)) + } + if len(m.StartKey) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(len(m.StartKey))) + i += copy(dAtA[i:], m.StartKey) + } + if len(m.EndKey) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(len(m.EndKey))) + i += copy(dAtA[i:], m.EndKey) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *EpochNotMatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EpochNotMatch) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CurrentRegions) > 0 { + for _, msg := range m.CurrentRegions { + dAtA[i] = 0xa + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *StaleCommand) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StaleCommand) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Error) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Error) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Message) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + if m.NotLeader != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(m.NotLeader.Size())) + n2, err := m.NotLeader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.RegionNotFound != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(m.RegionNotFound.Size())) + n3, err := m.RegionNotFound.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.KeyNotInRegion != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(m.KeyNotInRegion.Size())) + n4, err := m.KeyNotInRegion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.EpochNotMatch != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(m.EpochNotMatch.Size())) + n5, err := m.EpochNotMatch.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.StaleCommand != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(m.StaleCommand.Size())) + n6, err := m.StaleCommand.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.StoreNotMatch != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintErrorpb(dAtA, i, uint64(m.StoreNotMatch.Size())) + n7, err := m.StoreNotMatch.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintErrorpb(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *NotLeader) Size() (n int) { + var l int + _ = l + if m.RegionId != 0 { + n += 1 + sovErrorpb(uint64(m.RegionId)) + } + if m.Leader != nil { + l = m.Leader.Size() + n += 1 + l + sovErrorpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StoreNotMatch) Size() (n int) { + var l int + _ = l + if m.RequestStoreId != 0 { + n += 1 + sovErrorpb(uint64(m.RequestStoreId)) + } + if m.ActualStoreId != 0 { + n += 1 + sovErrorpb(uint64(m.ActualStoreId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RegionNotFound) Size() (n int) { + var l int + _ = l + if m.RegionId != 0 { + n += 1 + sovErrorpb(uint64(m.RegionId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *KeyNotInRegion) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovErrorpb(uint64(l)) + } + if m.RegionId != 0 { + n += 1 + sovErrorpb(uint64(m.RegionId)) + } + l = len(m.StartKey) + if l > 0 { + n += 1 + l + sovErrorpb(uint64(l)) + } + l = len(m.EndKey) + if l > 0 { + n += 1 + l + sovErrorpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EpochNotMatch) Size() (n int) { + var l int + _ = l + if len(m.CurrentRegions) > 0 { + for _, e := range m.CurrentRegions { + l = e.Size() + n += 1 + l + sovErrorpb(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StaleCommand) Size() (n int) { + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Error) Size() (n int) { + var l int + _ = l + l = len(m.Message) + if l > 0 { + n += 1 + l + sovErrorpb(uint64(l)) + } + if m.NotLeader != nil { + l = m.NotLeader.Size() + n += 1 + l + sovErrorpb(uint64(l)) + } + if m.RegionNotFound != nil { + l = m.RegionNotFound.Size() + n += 1 + l + sovErrorpb(uint64(l)) + } + if m.KeyNotInRegion != nil { + l = m.KeyNotInRegion.Size() + n += 1 + l + sovErrorpb(uint64(l)) + } + if m.EpochNotMatch != nil { + l = m.EpochNotMatch.Size() + n += 1 + l + sovErrorpb(uint64(l)) + } + if m.StaleCommand != nil { + l = m.StaleCommand.Size() + n += 1 + l + sovErrorpb(uint64(l)) + } + if m.StoreNotMatch != nil { + l = m.StoreNotMatch.Size() + n += 1 + l + sovErrorpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovErrorpb(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozErrorpb(x uint64) (n int) { + return sovErrorpb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *NotLeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NotLeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NotLeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType) + } + m.RegionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RegionId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthErrorpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leader == nil { + m.Leader = &metapb.Peer{} + } + if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreNotMatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreNotMatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreNotMatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestStoreId", wireType) + } + m.RequestStoreId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RequestStoreId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActualStoreId", wireType) + } + m.ActualStoreId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ActualStoreId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipErrorpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RegionNotFound) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RegionNotFound: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RegionNotFound: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType) + } + m.RegionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RegionId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipErrorpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyNotInRegion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyNotInRegion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyNotInRegion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthErrorpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType) + } + m.RegionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RegionId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthErrorpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...) + if m.StartKey == nil { + m.StartKey = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthErrorpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...) + if m.EndKey == nil { + m.EndKey = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EpochNotMatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EpochNotMatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EpochNotMatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentRegions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthErrorpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentRegions = append(m.CurrentRegions, &metapb.Region{}) + if err := m.CurrentRegions[len(m.CurrentRegions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StaleCommand) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StaleCommand: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StaleCommand: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipErrorpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Error) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Error: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Error: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NotLeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthErrorpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NotLeader == nil { + m.NotLeader = &NotLeader{} + } + if err := m.NotLeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionNotFound", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthErrorpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionNotFound == nil { + m.RegionNotFound = &RegionNotFound{} + } + if err := m.RegionNotFound.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyNotInRegion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthErrorpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KeyNotInRegion == nil { + m.KeyNotInRegion = &KeyNotInRegion{} + } + if err := m.KeyNotInRegion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochNotMatch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthErrorpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EpochNotMatch == nil { + m.EpochNotMatch = &EpochNotMatch{} + } + if err := m.EpochNotMatch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StaleCommand", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthErrorpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StaleCommand == nil { + m.StaleCommand = &StaleCommand{} + } + if err := m.StaleCommand.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoreNotMatch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthErrorpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StoreNotMatch == nil { + m.StoreNotMatch = &StoreNotMatch{} + } + if err := m.StoreNotMatch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipErrorpb(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowErrorpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowErrorpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowErrorpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthErrorpb + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowErrorpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipErrorpb(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthErrorpb = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowErrorpb = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("errorpb.proto", fileDescriptor_errorpb_6ea187258f91197d) } + +var fileDescriptor_errorpb_6ea187258f91197d = []byte{ + // 499 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0xcd, 0x8e, 0x12, 0x4d, + 0x14, 0xfd, 0x1a, 0x66, 0xf8, 0xb9, 0xd0, 0x0d, 0x5f, 0x47, 0x9d, 0xce, 0x4c, 0x42, 0x48, 0xc7, + 0x18, 0x36, 0x62, 0xc4, 0x85, 0x89, 0x0b, 0x13, 0xc7, 0x8c, 0x91, 0xa0, 0xc4, 0xd4, 0x3c, 0x40, + 0xa7, 0x86, 0xbe, 0x32, 0x04, 0xa8, 0xc2, 0xaa, 0x62, 0xc1, 0x9b, 0xf8, 0x00, 0x3e, 0x8c, 0x4b, + 0x1f, 0xc1, 0xe0, 0x8b, 0x98, 0xba, 0xd5, 0x34, 0xd4, 0x2c, 0xdc, 0xf5, 0xfd, 0x39, 0xe7, 0xde, + 0x7b, 0x4e, 0x35, 0x84, 0xa8, 0x94, 0x54, 0x9b, 0xbb, 0xe1, 0x46, 0x49, 0x23, 0xe3, 0x7a, 0x11, + 0x5e, 0xb6, 0xd7, 0x68, 0xf8, 0x21, 0x7d, 0xf9, 0x68, 0x2e, 0xe7, 0x92, 0x3e, 0x5f, 0xd8, 0x2f, + 0x97, 0x4d, 0xa7, 0xd0, 0x9c, 0x4a, 0xf3, 0x09, 0x79, 0x8e, 0x2a, 0xbe, 0x82, 0xa6, 0xc2, 0xf9, + 0x42, 0x8a, 0x6c, 0x91, 0x27, 0x41, 0x3f, 0x18, 0x9c, 0xb1, 0x86, 0x4b, 0x8c, 0xf3, 0xf8, 0x29, + 0xd4, 0x56, 0xd4, 0x96, 0x54, 0xfa, 0xc1, 0xa0, 0x35, 0x6a, 0x0f, 0x0b, 0xfa, 0x2f, 0x88, 0x8a, + 0x15, 0xb5, 0x94, 0x43, 0x78, 0x6b, 0xa4, 0xc2, 0xa9, 0x34, 0x9f, 0xb9, 0x99, 0xdd, 0xc7, 0x03, + 0xe8, 0x2a, 0xfc, 0xb6, 0x45, 0x6d, 0x32, 0x6d, 0x0b, 0x47, 0xea, 0xa8, 0xc8, 0x53, 0xff, 0x38, + 0x8f, 0x9f, 0x41, 0x87, 0xcf, 0xcc, 0x96, 0xaf, 0x8e, 0x8d, 0x15, 0x6a, 0x0c, 0x5d, 0xba, 0xe8, + 0x4b, 0x9f, 0x43, 0xc4, 0x68, 0xa9, 0xa9, 0x34, 0x1f, 0xe4, 0x56, 0xe4, 0xff, 0xdc, 0x3b, 0xdd, + 0x42, 0x34, 0xc1, 0xdd, 0x54, 0x9a, 0xb1, 0x70, 0xb0, 0xb8, 0x0b, 0xd5, 0x25, 0xee, 0xa8, 0xb1, + 0xcd, 0xec, 0xa7, 0x4f, 0x50, 0x79, 0x70, 0xf8, 0x15, 0x34, 0xb5, 0xe1, 0xca, 0x64, 0x16, 0x54, + 0x25, 0x50, 0x83, 0x12, 0x13, 0xdc, 0xc5, 0x17, 0x50, 0x47, 0x91, 0x53, 0xe9, 0x8c, 0x4a, 0x35, + 0x14, 0xf9, 0x04, 0x77, 0xe9, 0x47, 0x08, 0x6f, 0x36, 0x72, 0x76, 0x5f, 0x0a, 0xf1, 0x1a, 0x3a, + 0xb3, 0xad, 0x52, 0x28, 0x4c, 0xe6, 0xa8, 0x75, 0x12, 0xf4, 0xab, 0x83, 0xd6, 0x28, 0x3a, 0x08, + 0xe9, 0xd6, 0x63, 0x51, 0xd1, 0xe6, 0x42, 0x9d, 0x46, 0xd0, 0xbe, 0x35, 0x7c, 0x85, 0xef, 0xe5, + 0x7a, 0xcd, 0x45, 0x9e, 0xfe, 0xa8, 0xc2, 0xf9, 0x8d, 0xb5, 0x38, 0x4e, 0xa0, 0xbe, 0x46, 0xad, + 0xf9, 0x1c, 0xe9, 0x98, 0x26, 0x3b, 0x84, 0xf1, 0x4b, 0x00, 0x21, 0x4d, 0xe6, 0x19, 0x16, 0x0f, + 0x0f, 0xef, 0xa4, 0x74, 0x9c, 0x35, 0x45, 0x69, 0xfe, 0x3b, 0x6b, 0x14, 0x69, 0x60, 0x91, 0x5f, + 0xad, 0xb0, 0x74, 0x6d, 0x6b, 0x74, 0x51, 0x02, 0x7d, 0xdd, 0xad, 0x83, 0x9e, 0x0f, 0xd7, 0xf0, + 0xff, 0x12, 0x77, 0x84, 0x5f, 0x88, 0xe2, 0x4a, 0x92, 0xe5, 0x94, 0xc3, 0x37, 0x83, 0x45, 0x4b, + 0xdf, 0x9c, 0xb7, 0xd0, 0x41, 0xab, 0x1b, 0xb1, 0xac, 0xad, 0x72, 0xc9, 0x39, 0x31, 0x3c, 0x29, + 0x19, 0x3c, 0x5d, 0x59, 0x88, 0x9e, 0xcc, 0x6f, 0x20, 0xd4, 0x56, 0xad, 0x6c, 0xe6, 0xe4, 0x4a, + 0xea, 0x84, 0x7e, 0x5c, 0xa2, 0x4f, 0xb5, 0x64, 0x6d, 0x7d, 0x12, 0xd9, 0xd9, 0xee, 0xe9, 0x1d, + 0x67, 0x37, 0x1e, 0xcc, 0xf6, 0x1e, 0x37, 0x0b, 0xb5, 0x17, 0xb6, 0xdc, 0x64, 0x5a, 0xe8, 0xba, + 0xfb, 0x73, 0xdf, 0x0b, 0x7e, 0xed, 0x7b, 0xc1, 0xef, 0x7d, 0x2f, 0xf8, 0xfe, 0xa7, 0xf7, 0xdf, + 0x5d, 0x8d, 0x7e, 0xb9, 0x57, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x26, 0x03, 0xaa, 0x44, 0xb0, + 0x03, 0x00, 0x00, +} diff --git a/proto/pkg/kvrpcpb/kvrpcpb.pb.go b/proto/pkg/kvrpcpb/kvrpcpb.pb.go new file mode 100644 index 00000000..d7ce8606 --- /dev/null +++ b/proto/pkg/kvrpcpb/kvrpcpb.pb.go @@ -0,0 +1,8054 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kvrpcpb.proto + +package kvrpcpb + +import ( + "fmt" + "io" + "math" + + proto "github.com/golang/protobuf/proto" + + _ "github.com/gogo/protobuf/gogoproto" + + errorpb "github.com/pingcap-incubator/tinykv/proto/pkg/errorpb" + + metapb "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Op int32 + +const ( + Op_Put Op = 0 + Op_Del Op = 1 + Op_Rollback Op = 2 + // Used by TinySQL but not TinyKV. + Op_Lock Op = 3 +) + +var Op_name = map[int32]string{ + 0: "Put", + 1: "Del", + 2: "Rollback", + 3: "Lock", +} +var Op_value = map[string]int32{ + "Put": 0, + "Del": 1, + "Rollback": 2, + "Lock": 3, +} + +func (x Op) String() string { + return proto.EnumName(Op_name, int32(x)) +} +func (Op) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{0} +} + +type Action int32 + +const ( + Action_NoAction Action = 0 + // The lock is rolled back because it has expired. + Action_TTLExpireRollback Action = 1 + // The lock does not exist, TinyKV left a record of the rollback, but did not + // have to delete a lock. + Action_LockNotExistRollback Action = 2 +) + +var Action_name = map[int32]string{ + 0: "NoAction", + 1: "TTLExpireRollback", + 2: "LockNotExistRollback", +} +var Action_value = map[string]int32{ + "NoAction": 0, + "TTLExpireRollback": 1, + "LockNotExistRollback": 2, +} + +func (x Action) String() string { + return proto.EnumName(Action_name, int32(x)) +} +func (Action) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{1} +} + +// Raw commands. +type RawGetRequest struct { + Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Cf string `protobuf:"bytes,3,opt,name=cf,proto3" json:"cf,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RawGetRequest) Reset() { *m = RawGetRequest{} } +func (m *RawGetRequest) String() string { return proto.CompactTextString(m) } +func (*RawGetRequest) ProtoMessage() {} +func (*RawGetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{0} +} +func (m *RawGetRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RawGetRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RawGetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawGetRequest.Merge(dst, src) +} +func (m *RawGetRequest) XXX_Size() int { + return m.Size() +} +func (m *RawGetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RawGetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RawGetRequest proto.InternalMessageInfo + +func (m *RawGetRequest) GetContext() *Context { + if m != nil { + return m.Context + } + return nil +} + +func (m *RawGetRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *RawGetRequest) GetCf() string { + if m != nil { + return m.Cf + } + return "" +} + +type RawGetResponse struct { + RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + // True if the requested key doesn't exist; another error will not be signalled. + NotFound bool `protobuf:"varint,4,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RawGetResponse) Reset() { *m = RawGetResponse{} } +func (m *RawGetResponse) String() string { return proto.CompactTextString(m) } +func (*RawGetResponse) ProtoMessage() {} +func (*RawGetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{1} +} +func (m *RawGetResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RawGetResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RawGetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawGetResponse.Merge(dst, src) +} +func (m *RawGetResponse) XXX_Size() int { + return m.Size() +} +func (m *RawGetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RawGetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RawGetResponse proto.InternalMessageInfo + +func (m *RawGetResponse) GetRegionError() *errorpb.Error { + if m != nil { + return m.RegionError + } + return nil +} + +func (m *RawGetResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *RawGetResponse) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *RawGetResponse) GetNotFound() bool { + if m != nil { + return m.NotFound + } + return false +} + +type RawPutRequest struct { + Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + Cf string `protobuf:"bytes,4,opt,name=cf,proto3" json:"cf,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RawPutRequest) Reset() { *m = RawPutRequest{} } +func (m *RawPutRequest) String() string { return proto.CompactTextString(m) } +func (*RawPutRequest) ProtoMessage() {} +func (*RawPutRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{2} +} +func (m *RawPutRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawPutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RawPutRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RawPutRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawPutRequest.Merge(dst, src) +} +func (m *RawPutRequest) XXX_Size() int { + return m.Size() +} +func (m *RawPutRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RawPutRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RawPutRequest proto.InternalMessageInfo + +func (m *RawPutRequest) GetContext() *Context { + if m != nil { + return m.Context + } + return nil +} + +func (m *RawPutRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *RawPutRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *RawPutRequest) GetCf() string { + if m != nil { + return m.Cf + } + return "" +} + +type RawPutResponse struct { + RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RawPutResponse) Reset() { *m = RawPutResponse{} } +func (m *RawPutResponse) String() string { return proto.CompactTextString(m) } +func (*RawPutResponse) ProtoMessage() {} +func (*RawPutResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{3} +} +func (m *RawPutResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawPutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RawPutResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RawPutResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawPutResponse.Merge(dst, src) +} +func (m *RawPutResponse) XXX_Size() int { + return m.Size() +} +func (m *RawPutResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RawPutResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RawPutResponse proto.InternalMessageInfo + +func (m *RawPutResponse) GetRegionError() *errorpb.Error { + if m != nil { + return m.RegionError + } + return nil +} + +func (m *RawPutResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type RawDeleteRequest struct { + Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Cf string `protobuf:"bytes,3,opt,name=cf,proto3" json:"cf,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RawDeleteRequest) Reset() { *m = RawDeleteRequest{} } +func (m *RawDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*RawDeleteRequest) ProtoMessage() {} +func (*RawDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{4} +} +func (m *RawDeleteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RawDeleteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RawDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawDeleteRequest.Merge(dst, src) +} +func (m *RawDeleteRequest) XXX_Size() int { + return m.Size() +} +func (m *RawDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RawDeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RawDeleteRequest proto.InternalMessageInfo + +func (m *RawDeleteRequest) GetContext() *Context { + if m != nil { + return m.Context + } + return nil +} + +func (m *RawDeleteRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *RawDeleteRequest) GetCf() string { + if m != nil { + return m.Cf + } + return "" +} + +type RawDeleteResponse struct { + RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RawDeleteResponse) Reset() { *m = RawDeleteResponse{} } +func (m *RawDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*RawDeleteResponse) ProtoMessage() {} +func (*RawDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{5} +} +func (m *RawDeleteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RawDeleteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RawDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawDeleteResponse.Merge(dst, src) +} +func (m *RawDeleteResponse) XXX_Size() int { + return m.Size() +} +func (m *RawDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RawDeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RawDeleteResponse proto.InternalMessageInfo + +func (m *RawDeleteResponse) GetRegionError() *errorpb.Error { + if m != nil { + return m.RegionError + } + return nil +} + +func (m *RawDeleteResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type RawScanRequest struct { + Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` + StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + // The maximum number of values read. + Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` + Cf string `protobuf:"bytes,4,opt,name=cf,proto3" json:"cf,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RawScanRequest) Reset() { *m = RawScanRequest{} } +func (m *RawScanRequest) String() string { return proto.CompactTextString(m) } +func (*RawScanRequest) ProtoMessage() {} +func (*RawScanRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{6} +} +func (m *RawScanRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawScanRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RawScanRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RawScanRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawScanRequest.Merge(dst, src) +} +func (m *RawScanRequest) XXX_Size() int { + return m.Size() +} +func (m *RawScanRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RawScanRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RawScanRequest proto.InternalMessageInfo + +func (m *RawScanRequest) GetContext() *Context { + if m != nil { + return m.Context + } + return nil +} + +func (m *RawScanRequest) GetStartKey() []byte { + if m != nil { + return m.StartKey + } + return nil +} + +func (m *RawScanRequest) GetLimit() uint32 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *RawScanRequest) GetCf() string { + if m != nil { + return m.Cf + } + return "" +} + +type RawScanResponse struct { + RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` + // An error which affects the whole scan. Per-key errors are included in kvs. + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + Kvs []*KvPair `protobuf:"bytes,3,rep,name=kvs" json:"kvs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RawScanResponse) Reset() { *m = RawScanResponse{} } +func (m *RawScanResponse) String() string { return proto.CompactTextString(m) } +func (*RawScanResponse) ProtoMessage() {} +func (*RawScanResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{7} +} +func (m *RawScanResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawScanResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RawScanResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RawScanResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawScanResponse.Merge(dst, src) +} +func (m *RawScanResponse) XXX_Size() int { + return m.Size() +} +func (m *RawScanResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RawScanResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RawScanResponse proto.InternalMessageInfo + +func (m *RawScanResponse) GetRegionError() *errorpb.Error { + if m != nil { + return m.RegionError + } + return nil +} + +func (m *RawScanResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *RawScanResponse) GetKvs() []*KvPair { + if m != nil { + return m.Kvs + } + return nil +} + +// Read the value of a key at the given time. +type GetRequest struct { + Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Version uint64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{8} +} +func (m *GetRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRequest.Merge(dst, src) +} +func (m *GetRequest) XXX_Size() int { + return m.Size() +} +func (m *GetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetRequest proto.InternalMessageInfo + +func (m *GetRequest) GetContext() *Context { + if m != nil { + return m.Context + } + return nil +} + +func (m *GetRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *GetRequest) GetVersion() uint64 { + if m != nil { + return m.Version + } + return 0 +} + +type GetResponse struct { + RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` + Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + // True if the requested key doesn't exist; another error will not be signalled. + NotFound bool `protobuf:"varint,4,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} +func (*GetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{9} +} +func (m *GetResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResponse.Merge(dst, src) +} +func (m *GetResponse) XXX_Size() int { + return m.Size() +} +func (m *GetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResponse proto.InternalMessageInfo + +func (m *GetResponse) GetRegionError() *errorpb.Error { + if m != nil { + return m.RegionError + } + return nil +} + +func (m *GetResponse) GetError() *KeyError { + if m != nil { + return m.Error + } + return nil +} + +func (m *GetResponse) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *GetResponse) GetNotFound() bool { + if m != nil { + return m.NotFound + } + return false +} + +// Prewrite is the first phase of two phase commit. A prewrite commit contains all the +// writes (mutations) which a client would like to make as part of a transaction. The +// request succeeds if none of the keys are locked. In that case all those keys will +// be locked. If the prewrite fails, no changes are made to the DB. +type PrewriteRequest struct { + Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` + Mutations []*Mutation `protobuf:"bytes,2,rep,name=mutations" json:"mutations,omitempty"` + // Key of the primary lock. + PrimaryLock []byte `protobuf:"bytes,3,opt,name=primary_lock,json=primaryLock,proto3" json:"primary_lock,omitempty"` + StartVersion uint64 `protobuf:"varint,4,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` + LockTtl uint64 `protobuf:"varint,5,opt,name=lock_ttl,json=lockTtl,proto3" json:"lock_ttl,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrewriteRequest) Reset() { *m = PrewriteRequest{} } +func (m *PrewriteRequest) String() string { return proto.CompactTextString(m) } +func (*PrewriteRequest) ProtoMessage() {} +func (*PrewriteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{10} +} +func (m *PrewriteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PrewriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PrewriteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *PrewriteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrewriteRequest.Merge(dst, src) +} +func (m *PrewriteRequest) XXX_Size() int { + return m.Size() +} +func (m *PrewriteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PrewriteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PrewriteRequest proto.InternalMessageInfo + +func (m *PrewriteRequest) GetContext() *Context { + if m != nil { + return m.Context + } + return nil +} + +func (m *PrewriteRequest) GetMutations() []*Mutation { + if m != nil { + return m.Mutations + } + return nil +} + +func (m *PrewriteRequest) GetPrimaryLock() []byte { + if m != nil { + return m.PrimaryLock + } + return nil +} + +func (m *PrewriteRequest) GetStartVersion() uint64 { + if m != nil { + return m.StartVersion + } + return 0 +} + +func (m *PrewriteRequest) GetLockTtl() uint64 { + if m != nil { + return m.LockTtl + } + return 0 +} + +// Empty if the prewrite is successful. +type PrewriteResponse struct { + RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` + Errors []*KeyError `protobuf:"bytes,2,rep,name=errors" json:"errors,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrewriteResponse) Reset() { *m = PrewriteResponse{} } +func (m *PrewriteResponse) String() string { return proto.CompactTextString(m) } +func (*PrewriteResponse) ProtoMessage() {} +func (*PrewriteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{11} +} +func (m *PrewriteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PrewriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PrewriteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *PrewriteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrewriteResponse.Merge(dst, src) +} +func (m *PrewriteResponse) XXX_Size() int { + return m.Size() +} +func (m *PrewriteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PrewriteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PrewriteResponse proto.InternalMessageInfo + +func (m *PrewriteResponse) GetRegionError() *errorpb.Error { + if m != nil { + return m.RegionError + } + return nil +} + +func (m *PrewriteResponse) GetErrors() []*KeyError { + if m != nil { + return m.Errors + } + return nil +} + +// Commit is the second phase of 2pc. The client must have successfully prewritten +// the transaction to all nodes. If all keys are locked by the given transaction, +// then the commit should succeed. If any keys are locked by a different +// transaction or are not locked at all (rolled back or expired), the commit +// fails. +type CommitRequest struct { + Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` + // Identifies the transaction, must match the start_version in the transaction's + // prewrite request. + StartVersion uint64 `protobuf:"varint,2,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` + // Must match the keys mutated by the transaction's prewrite request. + Keys [][]byte `protobuf:"bytes,3,rep,name=keys" json:"keys,omitempty"` + // Must be greater than start_version. + CommitVersion uint64 `protobuf:"varint,4,opt,name=commit_version,json=commitVersion,proto3" json:"commit_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CommitRequest) Reset() { *m = CommitRequest{} } +func (m *CommitRequest) String() string { return proto.CompactTextString(m) } +func (*CommitRequest) ProtoMessage() {} +func (*CommitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{12} +} +func (m *CommitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *CommitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitRequest.Merge(dst, src) +} +func (m *CommitRequest) XXX_Size() int { + return m.Size() +} +func (m *CommitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CommitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitRequest proto.InternalMessageInfo + +func (m *CommitRequest) GetContext() *Context { + if m != nil { + return m.Context + } + return nil +} + +func (m *CommitRequest) GetStartVersion() uint64 { + if m != nil { + return m.StartVersion + } + return 0 +} + +func (m *CommitRequest) GetKeys() [][]byte { + if m != nil { + return m.Keys + } + return nil +} + +func (m *CommitRequest) GetCommitVersion() uint64 { + if m != nil { + return m.CommitVersion + } + return 0 +} + +// Empty if the commit is successful. +type CommitResponse struct { + RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` + Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} +func (*CommitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{13} +} +func (m *CommitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *CommitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitResponse.Merge(dst, src) +} +func (m *CommitResponse) XXX_Size() int { + return m.Size() +} +func (m *CommitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CommitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitResponse proto.InternalMessageInfo + +func (m *CommitResponse) GetRegionError() *errorpb.Error { + if m != nil { + return m.RegionError + } + return nil +} + +func (m *CommitResponse) GetError() *KeyError { + if m != nil { + return m.Error + } + return nil +} + +// Read multiple values from the DB. +type ScanRequest struct { + Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` + StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + // The maximum number of values read. + Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` + Version uint64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ScanRequest) Reset() { *m = ScanRequest{} } +func (m *ScanRequest) String() string { return proto.CompactTextString(m) } +func (*ScanRequest) ProtoMessage() {} +func (*ScanRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{14} +} +func (m *ScanRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScanRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ScanRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ScanRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScanRequest.Merge(dst, src) +} +func (m *ScanRequest) XXX_Size() int { + return m.Size() +} +func (m *ScanRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ScanRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ScanRequest proto.InternalMessageInfo + +func (m *ScanRequest) GetContext() *Context { + if m != nil { + return m.Context + } + return nil +} + +func (m *ScanRequest) GetStartKey() []byte { + if m != nil { + return m.StartKey + } + return nil +} + +func (m *ScanRequest) GetLimit() uint32 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *ScanRequest) GetVersion() uint64 { + if m != nil { + return m.Version + } + return 0 +} + +type ScanResponse struct { + RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` + // Other errors are recorded for each key in pairs. + Pairs []*KvPair `protobuf:"bytes,2,rep,name=pairs" json:"pairs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ScanResponse) Reset() { *m = ScanResponse{} } +func (m *ScanResponse) String() string { return proto.CompactTextString(m) } +func (*ScanResponse) ProtoMessage() {} +func (*ScanResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{15} +} +func (m *ScanResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScanResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ScanResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ScanResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScanResponse.Merge(dst, src) +} +func (m *ScanResponse) XXX_Size() int { + return m.Size() +} +func (m *ScanResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ScanResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ScanResponse proto.InternalMessageInfo + +func (m *ScanResponse) GetRegionError() *errorpb.Error { + if m != nil { + return m.RegionError + } + return nil +} + +func (m *ScanResponse) GetPairs() []*KvPair { + if m != nil { + return m.Pairs + } + return nil +} + +// Rollback an un-committed transaction. Will fail if the transaction has already +// been committed or keys are locked by a different transaction. If the keys were never +// locked, no action is needed but it is not an error. If successful all keys will be +// unlocked and all uncommitted values removed. +type BatchRollbackRequest struct { + Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` + StartVersion uint64 `protobuf:"varint,2,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` + Keys [][]byte `protobuf:"bytes,3,rep,name=keys" json:"keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BatchRollbackRequest) Reset() { *m = BatchRollbackRequest{} } +func (m *BatchRollbackRequest) String() string { return proto.CompactTextString(m) } +func (*BatchRollbackRequest) ProtoMessage() {} +func (*BatchRollbackRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{16} +} +func (m *BatchRollbackRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BatchRollbackRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BatchRollbackRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *BatchRollbackRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BatchRollbackRequest.Merge(dst, src) +} +func (m *BatchRollbackRequest) XXX_Size() int { + return m.Size() +} +func (m *BatchRollbackRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BatchRollbackRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BatchRollbackRequest proto.InternalMessageInfo + +func (m *BatchRollbackRequest) GetContext() *Context { + if m != nil { + return m.Context + } + return nil +} + +func (m *BatchRollbackRequest) GetStartVersion() uint64 { + if m != nil { + return m.StartVersion + } + return 0 +} + +func (m *BatchRollbackRequest) GetKeys() [][]byte { + if m != nil { + return m.Keys + } + return nil +} + +// Empty if the rollback is successful. +type BatchRollbackResponse struct { + RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` + Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BatchRollbackResponse) Reset() { *m = BatchRollbackResponse{} } +func (m *BatchRollbackResponse) String() string { return proto.CompactTextString(m) } +func (*BatchRollbackResponse) ProtoMessage() {} +func (*BatchRollbackResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{17} +} +func (m *BatchRollbackResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BatchRollbackResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BatchRollbackResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *BatchRollbackResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BatchRollbackResponse.Merge(dst, src) +} +func (m *BatchRollbackResponse) XXX_Size() int { + return m.Size() +} +func (m *BatchRollbackResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BatchRollbackResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BatchRollbackResponse proto.InternalMessageInfo + +func (m *BatchRollbackResponse) GetRegionError() *errorpb.Error { + if m != nil { + return m.RegionError + } + return nil +} + +func (m *BatchRollbackResponse) GetError() *KeyError { + if m != nil { + return m.Error + } + return nil +} + +// CheckTxnStatus reports on the status of a transaction and may take action to +// rollback expired locks. +// If the transaction has previously been rolled back or committed, return that information. +// If the TTL of the transaction is exhausted, abort that transaction and roll back the primary lock. +// Otherwise, returns the TTL information. +type CheckTxnStatusRequest struct { + Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` + PrimaryKey []byte `protobuf:"bytes,2,opt,name=primary_key,json=primaryKey,proto3" json:"primary_key,omitempty"` + LockTs uint64 `protobuf:"varint,3,opt,name=lock_ts,json=lockTs,proto3" json:"lock_ts,omitempty"` + CurrentTs uint64 `protobuf:"varint,4,opt,name=current_ts,json=currentTs,proto3" json:"current_ts,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CheckTxnStatusRequest) Reset() { *m = CheckTxnStatusRequest{} } +func (m *CheckTxnStatusRequest) String() string { return proto.CompactTextString(m) } +func (*CheckTxnStatusRequest) ProtoMessage() {} +func (*CheckTxnStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{18} +} +func (m *CheckTxnStatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CheckTxnStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CheckTxnStatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *CheckTxnStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckTxnStatusRequest.Merge(dst, src) +} +func (m *CheckTxnStatusRequest) XXX_Size() int { + return m.Size() +} +func (m *CheckTxnStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CheckTxnStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CheckTxnStatusRequest proto.InternalMessageInfo + +func (m *CheckTxnStatusRequest) GetContext() *Context { + if m != nil { + return m.Context + } + return nil +} + +func (m *CheckTxnStatusRequest) GetPrimaryKey() []byte { + if m != nil { + return m.PrimaryKey + } + return nil +} + +func (m *CheckTxnStatusRequest) GetLockTs() uint64 { + if m != nil { + return m.LockTs + } + return 0 +} + +func (m *CheckTxnStatusRequest) GetCurrentTs() uint64 { + if m != nil { + return m.CurrentTs + } + return 0 +} + +type CheckTxnStatusResponse struct { + RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` + // Three kinds of txn status: + // locked: lock_ttl > 0 + // committed: commit_version > 0 + // rolled back: lock_ttl == 0 && commit_version == 0 + LockTtl uint64 `protobuf:"varint,2,opt,name=lock_ttl,json=lockTtl,proto3" json:"lock_ttl,omitempty"` + CommitVersion uint64 `protobuf:"varint,3,opt,name=commit_version,json=commitVersion,proto3" json:"commit_version,omitempty"` + // The action performed by TinyKV in response to the CheckTxnStatus request. + Action Action `protobuf:"varint,4,opt,name=action,proto3,enum=kvrpcpb.Action" json:"action,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CheckTxnStatusResponse) Reset() { *m = CheckTxnStatusResponse{} } +func (m *CheckTxnStatusResponse) String() string { return proto.CompactTextString(m) } +func (*CheckTxnStatusResponse) ProtoMessage() {} +func (*CheckTxnStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{19} +} +func (m *CheckTxnStatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CheckTxnStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CheckTxnStatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *CheckTxnStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckTxnStatusResponse.Merge(dst, src) +} +func (m *CheckTxnStatusResponse) XXX_Size() int { + return m.Size() +} +func (m *CheckTxnStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CheckTxnStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CheckTxnStatusResponse proto.InternalMessageInfo + +func (m *CheckTxnStatusResponse) GetRegionError() *errorpb.Error { + if m != nil { + return m.RegionError + } + return nil +} + +func (m *CheckTxnStatusResponse) GetLockTtl() uint64 { + if m != nil { + return m.LockTtl + } + return 0 +} + +func (m *CheckTxnStatusResponse) GetCommitVersion() uint64 { + if m != nil { + return m.CommitVersion + } + return 0 +} + +func (m *CheckTxnStatusResponse) GetAction() Action { + if m != nil { + return m.Action + } + return Action_NoAction +} + +// Resolve lock will find all locks belonging to the transaction with the given start timestamp. +// If commit_version is 0, TinyKV will rollback all locks. If commit_version is greater than +// 0 it will commit those locks with the given commit timestamp. +// The client will make a resolve lock request for all secondary keys once it has successfully +// committed or rolled back the primary key. +type ResolveLockRequest struct { + Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` + StartVersion uint64 `protobuf:"varint,2,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` + CommitVersion uint64 `protobuf:"varint,3,opt,name=commit_version,json=commitVersion,proto3" json:"commit_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResolveLockRequest) Reset() { *m = ResolveLockRequest{} } +func (m *ResolveLockRequest) String() string { return proto.CompactTextString(m) } +func (*ResolveLockRequest) ProtoMessage() {} +func (*ResolveLockRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{20} +} +func (m *ResolveLockRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResolveLockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResolveLockRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ResolveLockRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResolveLockRequest.Merge(dst, src) +} +func (m *ResolveLockRequest) XXX_Size() int { + return m.Size() +} +func (m *ResolveLockRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResolveLockRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ResolveLockRequest proto.InternalMessageInfo + +func (m *ResolveLockRequest) GetContext() *Context { + if m != nil { + return m.Context + } + return nil +} + +func (m *ResolveLockRequest) GetStartVersion() uint64 { + if m != nil { + return m.StartVersion + } + return 0 +} + +func (m *ResolveLockRequest) GetCommitVersion() uint64 { + if m != nil { + return m.CommitVersion + } + return 0 +} + +// Empty if the lock is resolved successfully. +type ResolveLockResponse struct { + RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` + Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResolveLockResponse) Reset() { *m = ResolveLockResponse{} } +func (m *ResolveLockResponse) String() string { return proto.CompactTextString(m) } +func (*ResolveLockResponse) ProtoMessage() {} +func (*ResolveLockResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{21} +} +func (m *ResolveLockResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResolveLockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResolveLockResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ResolveLockResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResolveLockResponse.Merge(dst, src) +} +func (m *ResolveLockResponse) XXX_Size() int { + return m.Size() +} +func (m *ResolveLockResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResolveLockResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ResolveLockResponse proto.InternalMessageInfo + +func (m *ResolveLockResponse) GetRegionError() *errorpb.Error { + if m != nil { + return m.RegionError + } + return nil +} + +func (m *ResolveLockResponse) GetError() *KeyError { + if m != nil { + return m.Error + } + return nil +} + +// Either a key/value pair or an error for a particular key. +type KvPair struct { + Error *KeyError `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KvPair) Reset() { *m = KvPair{} } +func (m *KvPair) String() string { return proto.CompactTextString(m) } +func (*KvPair) ProtoMessage() {} +func (*KvPair) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{22} +} +func (m *KvPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KvPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KvPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *KvPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_KvPair.Merge(dst, src) +} +func (m *KvPair) XXX_Size() int { + return m.Size() +} +func (m *KvPair) XXX_DiscardUnknown() { + xxx_messageInfo_KvPair.DiscardUnknown(m) +} + +var xxx_messageInfo_KvPair proto.InternalMessageInfo + +func (m *KvPair) GetError() *KeyError { + if m != nil { + return m.Error + } + return nil +} + +func (m *KvPair) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *KvPair) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type Mutation struct { + Op Op `protobuf:"varint,1,opt,name=op,proto3,enum=kvrpcpb.Op" json:"op,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mutation) Reset() { *m = Mutation{} } +func (m *Mutation) String() string { return proto.CompactTextString(m) } +func (*Mutation) ProtoMessage() {} +func (*Mutation) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{23} +} +func (m *Mutation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mutation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Mutation.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Mutation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mutation.Merge(dst, src) +} +func (m *Mutation) XXX_Size() int { + return m.Size() +} +func (m *Mutation) XXX_DiscardUnknown() { + xxx_messageInfo_Mutation.DiscardUnknown(m) +} + +var xxx_messageInfo_Mutation proto.InternalMessageInfo + +func (m *Mutation) GetOp() Op { + if m != nil { + return m.Op + } + return Op_Put +} + +func (m *Mutation) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Mutation) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// Many responses can include a KeyError for some problem with one of the requested key. +// Only one field is set and it indicates what the client should do in response. +type KeyError struct { + Locked *LockInfo `protobuf:"bytes,1,opt,name=locked" json:"locked,omitempty"` + Retryable string `protobuf:"bytes,2,opt,name=retryable,proto3" json:"retryable,omitempty"` + Abort string `protobuf:"bytes,3,opt,name=abort,proto3" json:"abort,omitempty"` + Conflict *WriteConflict `protobuf:"bytes,4,opt,name=conflict" json:"conflict,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyError) Reset() { *m = KeyError{} } +func (m *KeyError) String() string { return proto.CompactTextString(m) } +func (*KeyError) ProtoMessage() {} +func (*KeyError) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{24} +} +func (m *KeyError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KeyError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KeyError.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *KeyError) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyError.Merge(dst, src) +} +func (m *KeyError) XXX_Size() int { + return m.Size() +} +func (m *KeyError) XXX_DiscardUnknown() { + xxx_messageInfo_KeyError.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyError proto.InternalMessageInfo + +func (m *KeyError) GetLocked() *LockInfo { + if m != nil { + return m.Locked + } + return nil +} + +func (m *KeyError) GetRetryable() string { + if m != nil { + return m.Retryable + } + return "" +} + +func (m *KeyError) GetAbort() string { + if m != nil { + return m.Abort + } + return "" +} + +func (m *KeyError) GetConflict() *WriteConflict { + if m != nil { + return m.Conflict + } + return nil +} + +type LockInfo struct { + PrimaryLock []byte `protobuf:"bytes,1,opt,name=primary_lock,json=primaryLock,proto3" json:"primary_lock,omitempty"` + LockVersion uint64 `protobuf:"varint,2,opt,name=lock_version,json=lockVersion,proto3" json:"lock_version,omitempty"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + LockTtl uint64 `protobuf:"varint,4,opt,name=lock_ttl,json=lockTtl,proto3" json:"lock_ttl,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LockInfo) Reset() { *m = LockInfo{} } +func (m *LockInfo) String() string { return proto.CompactTextString(m) } +func (*LockInfo) ProtoMessage() {} +func (*LockInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{25} +} +func (m *LockInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LockInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LockInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *LockInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_LockInfo.Merge(dst, src) +} +func (m *LockInfo) XXX_Size() int { + return m.Size() +} +func (m *LockInfo) XXX_DiscardUnknown() { + xxx_messageInfo_LockInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_LockInfo proto.InternalMessageInfo + +func (m *LockInfo) GetPrimaryLock() []byte { + if m != nil { + return m.PrimaryLock + } + return nil +} + +func (m *LockInfo) GetLockVersion() uint64 { + if m != nil { + return m.LockVersion + } + return 0 +} + +func (m *LockInfo) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *LockInfo) GetLockTtl() uint64 { + if m != nil { + return m.LockTtl + } + return 0 +} + +type WriteConflict struct { + StartTs uint64 `protobuf:"varint,1,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"` + ConflictTs uint64 `protobuf:"varint,2,opt,name=conflict_ts,json=conflictTs,proto3" json:"conflict_ts,omitempty"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + Primary []byte `protobuf:"bytes,4,opt,name=primary,proto3" json:"primary,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteConflict) Reset() { *m = WriteConflict{} } +func (m *WriteConflict) String() string { return proto.CompactTextString(m) } +func (*WriteConflict) ProtoMessage() {} +func (*WriteConflict) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{26} +} +func (m *WriteConflict) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WriteConflict) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WriteConflict.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *WriteConflict) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteConflict.Merge(dst, src) +} +func (m *WriteConflict) XXX_Size() int { + return m.Size() +} +func (m *WriteConflict) XXX_DiscardUnknown() { + xxx_messageInfo_WriteConflict.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteConflict proto.InternalMessageInfo + +func (m *WriteConflict) GetStartTs() uint64 { + if m != nil { + return m.StartTs + } + return 0 +} + +func (m *WriteConflict) GetConflictTs() uint64 { + if m != nil { + return m.ConflictTs + } + return 0 +} + +func (m *WriteConflict) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *WriteConflict) GetPrimary() []byte { + if m != nil { + return m.Primary + } + return nil +} + +// Miscellaneous data present in each request. +type Context struct { + RegionId uint64 `protobuf:"varint,1,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` + RegionEpoch *metapb.RegionEpoch `protobuf:"bytes,2,opt,name=region_epoch,json=regionEpoch" json:"region_epoch,omitempty"` + Peer *metapb.Peer `protobuf:"bytes,3,opt,name=peer" json:"peer,omitempty"` + Term uint64 `protobuf:"varint,5,opt,name=term,proto3" json:"term,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Context) Reset() { *m = Context{} } +func (m *Context) String() string { return proto.CompactTextString(m) } +func (*Context) ProtoMessage() {} +func (*Context) Descriptor() ([]byte, []int) { + return fileDescriptor_kvrpcpb_5d022e43d1d7c564, []int{27} +} +func (m *Context) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Context) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Context.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Context) XXX_Merge(src proto.Message) { + xxx_messageInfo_Context.Merge(dst, src) +} +func (m *Context) XXX_Size() int { + return m.Size() +} +func (m *Context) XXX_DiscardUnknown() { + xxx_messageInfo_Context.DiscardUnknown(m) +} + +var xxx_messageInfo_Context proto.InternalMessageInfo + +func (m *Context) GetRegionId() uint64 { + if m != nil { + return m.RegionId + } + return 0 +} + +func (m *Context) GetRegionEpoch() *metapb.RegionEpoch { + if m != nil { + return m.RegionEpoch + } + return nil +} + +func (m *Context) GetPeer() *metapb.Peer { + if m != nil { + return m.Peer + } + return nil +} + +func (m *Context) GetTerm() uint64 { + if m != nil { + return m.Term + } + return 0 +} + +func init() { + proto.RegisterType((*RawGetRequest)(nil), "kvrpcpb.RawGetRequest") + proto.RegisterType((*RawGetResponse)(nil), "kvrpcpb.RawGetResponse") + proto.RegisterType((*RawPutRequest)(nil), "kvrpcpb.RawPutRequest") + proto.RegisterType((*RawPutResponse)(nil), "kvrpcpb.RawPutResponse") + proto.RegisterType((*RawDeleteRequest)(nil), "kvrpcpb.RawDeleteRequest") + proto.RegisterType((*RawDeleteResponse)(nil), "kvrpcpb.RawDeleteResponse") + proto.RegisterType((*RawScanRequest)(nil), "kvrpcpb.RawScanRequest") + proto.RegisterType((*RawScanResponse)(nil), "kvrpcpb.RawScanResponse") + proto.RegisterType((*GetRequest)(nil), "kvrpcpb.GetRequest") + proto.RegisterType((*GetResponse)(nil), "kvrpcpb.GetResponse") + proto.RegisterType((*PrewriteRequest)(nil), "kvrpcpb.PrewriteRequest") + proto.RegisterType((*PrewriteResponse)(nil), "kvrpcpb.PrewriteResponse") + proto.RegisterType((*CommitRequest)(nil), "kvrpcpb.CommitRequest") + proto.RegisterType((*CommitResponse)(nil), "kvrpcpb.CommitResponse") + proto.RegisterType((*ScanRequest)(nil), "kvrpcpb.ScanRequest") + proto.RegisterType((*ScanResponse)(nil), "kvrpcpb.ScanResponse") + proto.RegisterType((*BatchRollbackRequest)(nil), "kvrpcpb.BatchRollbackRequest") + proto.RegisterType((*BatchRollbackResponse)(nil), "kvrpcpb.BatchRollbackResponse") + proto.RegisterType((*CheckTxnStatusRequest)(nil), "kvrpcpb.CheckTxnStatusRequest") + proto.RegisterType((*CheckTxnStatusResponse)(nil), "kvrpcpb.CheckTxnStatusResponse") + proto.RegisterType((*ResolveLockRequest)(nil), "kvrpcpb.ResolveLockRequest") + proto.RegisterType((*ResolveLockResponse)(nil), "kvrpcpb.ResolveLockResponse") + proto.RegisterType((*KvPair)(nil), "kvrpcpb.KvPair") + proto.RegisterType((*Mutation)(nil), "kvrpcpb.Mutation") + proto.RegisterType((*KeyError)(nil), "kvrpcpb.KeyError") + proto.RegisterType((*LockInfo)(nil), "kvrpcpb.LockInfo") + proto.RegisterType((*WriteConflict)(nil), "kvrpcpb.WriteConflict") + proto.RegisterType((*Context)(nil), "kvrpcpb.Context") + proto.RegisterEnum("kvrpcpb.Op", Op_name, Op_value) + proto.RegisterEnum("kvrpcpb.Action", Action_name, Action_value) +} +func (m *RawGetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RawGetRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Context != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) + n1, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Cf) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf))) + i += copy(dAtA[i:], m.Cf) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RawGetResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RawGetResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RegionError != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) + n2, err := m.RegionError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if len(m.Error) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + if len(m.Value) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.NotFound { + dAtA[i] = 0x20 + i++ + if m.NotFound { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RawPutRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RawPutRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Context != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) + n3, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if len(m.Cf) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf))) + i += copy(dAtA[i:], m.Cf) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RawPutResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RawPutResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RegionError != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) + n4, err := m.RegionError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if len(m.Error) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RawDeleteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RawDeleteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Context != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) + n5, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Cf) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf))) + i += copy(dAtA[i:], m.Cf) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RawDeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RawDeleteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RegionError != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) + n6, err := m.RegionError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if len(m.Error) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RawScanRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RawScanRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Context != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) + n7, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if len(m.StartKey) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey))) + i += copy(dAtA[i:], m.StartKey) + } + if m.Limit != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Limit)) + } + if len(m.Cf) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf))) + i += copy(dAtA[i:], m.Cf) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RawScanResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RawScanResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RegionError != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) + n8, err := m.RegionError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if len(m.Error) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + if len(m.Kvs) > 0 { + for _, msg := range m.Kvs { + dAtA[i] = 0x1a + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Context != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) + n9, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.Version != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Version)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RegionError != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) + n10, err := m.RegionError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.Error != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size())) + n11, err := m.Error.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if len(m.Value) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.NotFound { + dAtA[i] = 0x20 + i++ + if m.NotFound { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *PrewriteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PrewriteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Context != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) + n12, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if len(m.Mutations) > 0 { + for _, msg := range m.Mutations { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.PrimaryLock) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.PrimaryLock))) + i += copy(dAtA[i:], m.PrimaryLock) + } + if m.StartVersion != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion)) + } + if m.LockTtl != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTtl)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *PrewriteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PrewriteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RegionError != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) + n13, err := m.RegionError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if len(m.Errors) > 0 { + for _, msg := range m.Errors { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *CommitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Context != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) + n14, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.StartVersion != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion)) + } + if len(m.Keys) > 0 { + for _, b := range m.Keys { + dAtA[i] = 0x1a + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b))) + i += copy(dAtA[i:], b) + } + } + if m.CommitVersion != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitVersion)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *CommitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RegionError != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) + n15, err := m.RegionError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.Error != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size())) + n16, err := m.Error.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ScanRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScanRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Context != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) + n17, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if len(m.StartKey) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey))) + i += copy(dAtA[i:], m.StartKey) + } + if m.Limit != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Limit)) + } + if m.Version != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Version)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ScanResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScanResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RegionError != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) + n18, err := m.RegionError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if len(m.Pairs) > 0 { + for _, msg := range m.Pairs { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *BatchRollbackRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BatchRollbackRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Context != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) + n19, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.StartVersion != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion)) + } + if len(m.Keys) > 0 { + for _, b := range m.Keys { + dAtA[i] = 0x1a + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b))) + i += copy(dAtA[i:], b) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *BatchRollbackResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BatchRollbackResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RegionError != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) + n20, err := m.RegionError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.Error != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size())) + n21, err := m.Error.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *CheckTxnStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckTxnStatusRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Context != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) + n22, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if len(m.PrimaryKey) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.PrimaryKey))) + i += copy(dAtA[i:], m.PrimaryKey) + } + if m.LockTs != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTs)) + } + if m.CurrentTs != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CurrentTs)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *CheckTxnStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckTxnStatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RegionError != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) + n23, err := m.RegionError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.LockTtl != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTtl)) + } + if m.CommitVersion != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitVersion)) + } + if m.Action != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Action)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ResolveLockRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResolveLockRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Context != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) + n24, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.StartVersion != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion)) + } + if m.CommitVersion != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitVersion)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ResolveLockResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResolveLockResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RegionError != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) + n25, err := m.RegionError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + if m.Error != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size())) + n26, err := m.Error.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *KvPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KvPair) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Error != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size())) + n27, err := m.Error.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Mutation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mutation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Op != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Op)) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *KeyError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyError) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Locked != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Locked.Size())) + n28, err := m.Locked.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if len(m.Retryable) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Retryable))) + i += copy(dAtA[i:], m.Retryable) + } + if len(m.Abort) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Abort))) + i += copy(dAtA[i:], m.Abort) + } + if m.Conflict != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Conflict.Size())) + n29, err := m.Conflict.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *LockInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LockInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.PrimaryLock) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.PrimaryLock))) + i += copy(dAtA[i:], m.PrimaryLock) + } + if m.LockVersion != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockVersion)) + } + if len(m.Key) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.LockTtl != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTtl)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *WriteConflict) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WriteConflict) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.StartTs != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartTs)) + } + if m.ConflictTs != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ConflictTs)) + } + if len(m.Key) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Primary) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Primary))) + i += copy(dAtA[i:], m.Primary) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Context) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Context) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RegionId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionId)) + } + if m.RegionEpoch != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionEpoch.Size())) + n30, err := m.RegionEpoch.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + if m.Peer != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Peer.Size())) + n31, err := m.Peer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if m.Term != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Term)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintKvrpcpb(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *RawGetRequest) Size() (n int) { + var l int + _ = l + if m.Context != nil { + l = m.Context.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Cf) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RawGetResponse) Size() (n int) { + var l int + _ = l + if m.RegionError != nil { + l = m.RegionError.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.NotFound { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RawPutRequest) Size() (n int) { + var l int + _ = l + if m.Context != nil { + l = m.Context.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Cf) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RawPutResponse) Size() (n int) { + var l int + _ = l + if m.RegionError != nil { + l = m.RegionError.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RawDeleteRequest) Size() (n int) { + var l int + _ = l + if m.Context != nil { + l = m.Context.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Cf) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RawDeleteResponse) Size() (n int) { + var l int + _ = l + if m.RegionError != nil { + l = m.RegionError.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RawScanRequest) Size() (n int) { + var l int + _ = l + if m.Context != nil { + l = m.Context.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.StartKey) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sovKvrpcpb(uint64(m.Limit)) + } + l = len(m.Cf) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RawScanResponse) Size() (n int) { + var l int + _ = l + if m.RegionError != nil { + l = m.RegionError.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if len(m.Kvs) > 0 { + for _, e := range m.Kvs { + l = e.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetRequest) Size() (n int) { + var l int + _ = l + if m.Context != nil { + l = m.Context.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.Version != 0 { + n += 1 + sovKvrpcpb(uint64(m.Version)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetResponse) Size() (n int) { + var l int + _ = l + if m.RegionError != nil { + l = m.RegionError.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.NotFound { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PrewriteRequest) Size() (n int) { + var l int + _ = l + if m.Context != nil { + l = m.Context.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if len(m.Mutations) > 0 { + for _, e := range m.Mutations { + l = e.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + } + l = len(m.PrimaryLock) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.StartVersion != 0 { + n += 1 + sovKvrpcpb(uint64(m.StartVersion)) + } + if m.LockTtl != 0 { + n += 1 + sovKvrpcpb(uint64(m.LockTtl)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PrewriteResponse) Size() (n int) { + var l int + _ = l + if m.RegionError != nil { + l = m.RegionError.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if len(m.Errors) > 0 { + for _, e := range m.Errors { + l = e.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CommitRequest) Size() (n int) { + var l int + _ = l + if m.Context != nil { + l = m.Context.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.StartVersion != 0 { + n += 1 + sovKvrpcpb(uint64(m.StartVersion)) + } + if len(m.Keys) > 0 { + for _, b := range m.Keys { + l = len(b) + n += 1 + l + sovKvrpcpb(uint64(l)) + } + } + if m.CommitVersion != 0 { + n += 1 + sovKvrpcpb(uint64(m.CommitVersion)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CommitResponse) Size() (n int) { + var l int + _ = l + if m.RegionError != nil { + l = m.RegionError.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ScanRequest) Size() (n int) { + var l int + _ = l + if m.Context != nil { + l = m.Context.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.StartKey) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sovKvrpcpb(uint64(m.Limit)) + } + if m.Version != 0 { + n += 1 + sovKvrpcpb(uint64(m.Version)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ScanResponse) Size() (n int) { + var l int + _ = l + if m.RegionError != nil { + l = m.RegionError.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if len(m.Pairs) > 0 { + for _, e := range m.Pairs { + l = e.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BatchRollbackRequest) Size() (n int) { + var l int + _ = l + if m.Context != nil { + l = m.Context.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.StartVersion != 0 { + n += 1 + sovKvrpcpb(uint64(m.StartVersion)) + } + if len(m.Keys) > 0 { + for _, b := range m.Keys { + l = len(b) + n += 1 + l + sovKvrpcpb(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BatchRollbackResponse) Size() (n int) { + var l int + _ = l + if m.RegionError != nil { + l = m.RegionError.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CheckTxnStatusRequest) Size() (n int) { + var l int + _ = l + if m.Context != nil { + l = m.Context.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.PrimaryKey) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.LockTs != 0 { + n += 1 + sovKvrpcpb(uint64(m.LockTs)) + } + if m.CurrentTs != 0 { + n += 1 + sovKvrpcpb(uint64(m.CurrentTs)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CheckTxnStatusResponse) Size() (n int) { + var l int + _ = l + if m.RegionError != nil { + l = m.RegionError.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.LockTtl != 0 { + n += 1 + sovKvrpcpb(uint64(m.LockTtl)) + } + if m.CommitVersion != 0 { + n += 1 + sovKvrpcpb(uint64(m.CommitVersion)) + } + if m.Action != 0 { + n += 1 + sovKvrpcpb(uint64(m.Action)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResolveLockRequest) Size() (n int) { + var l int + _ = l + if m.Context != nil { + l = m.Context.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.StartVersion != 0 { + n += 1 + sovKvrpcpb(uint64(m.StartVersion)) + } + if m.CommitVersion != 0 { + n += 1 + sovKvrpcpb(uint64(m.CommitVersion)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResolveLockResponse) Size() (n int) { + var l int + _ = l + if m.RegionError != nil { + l = m.RegionError.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *KvPair) Size() (n int) { + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Mutation) Size() (n int) { + var l int + _ = l + if m.Op != 0 { + n += 1 + sovKvrpcpb(uint64(m.Op)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *KeyError) Size() (n int) { + var l int + _ = l + if m.Locked != nil { + l = m.Locked.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Retryable) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Abort) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.Conflict != nil { + l = m.Conflict.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LockInfo) Size() (n int) { + var l int + _ = l + l = len(m.PrimaryLock) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.LockVersion != 0 { + n += 1 + sovKvrpcpb(uint64(m.LockVersion)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.LockTtl != 0 { + n += 1 + sovKvrpcpb(uint64(m.LockTtl)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WriteConflict) Size() (n int) { + var l int + _ = l + if m.StartTs != 0 { + n += 1 + sovKvrpcpb(uint64(m.StartTs)) + } + if m.ConflictTs != 0 { + n += 1 + sovKvrpcpb(uint64(m.ConflictTs)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + l = len(m.Primary) + if l > 0 { + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Context) Size() (n int) { + var l int + _ = l + if m.RegionId != 0 { + n += 1 + sovKvrpcpb(uint64(m.RegionId)) + } + if m.RegionEpoch != nil { + l = m.RegionEpoch.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.Peer != nil { + l = m.Peer.Size() + n += 1 + l + sovKvrpcpb(uint64(l)) + } + if m.Term != 0 { + n += 1 + sovKvrpcpb(uint64(m.Term)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovKvrpcpb(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozKvrpcpb(x uint64) (n int) { + return sovKvrpcpb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RawGetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawGetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Context == nil { + m.Context = &Context{} + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cf = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RawGetResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawGetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionError == nil { + m.RegionError = &errorpb.Error{} + } + if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NotFound", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NotFound = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RawPutRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawPutRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawPutRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Context == nil { + m.Context = &Context{} + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cf = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RawPutResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawPutResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawPutResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionError == nil { + m.RegionError = &errorpb.Error{} + } + if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RawDeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawDeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Context == nil { + m.Context = &Context{} + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cf = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RawDeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawDeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionError == nil { + m.RegionError = &errorpb.Error{} + } + if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RawScanRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawScanRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawScanRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Context == nil { + m.Context = &Context{} + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...) + if m.StartKey == nil { + m.StartKey = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cf = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RawScanResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawScanResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawScanResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionError == nil { + m.RegionError = &errorpb.Error{} + } + if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kvs = append(m.Kvs, &KvPair{}) + if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Context == nil { + m.Context = &Context{} + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionError == nil { + m.RegionError = &errorpb.Error{} + } + if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &KeyError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NotFound", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NotFound = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PrewriteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PrewriteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PrewriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Context == nil { + m.Context = &Context{} + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mutations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mutations = append(m.Mutations, &Mutation{}) + if err := m.Mutations[len(m.Mutations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryLock", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrimaryLock = append(m.PrimaryLock[:0], dAtA[iNdEx:postIndex]...) + if m.PrimaryLock == nil { + m.PrimaryLock = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType) + } + m.StartVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartVersion |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LockTtl", wireType) + } + m.LockTtl = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LockTtl |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PrewriteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PrewriteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PrewriteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionError == nil { + m.RegionError = &errorpb.Error{} + } + if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Errors = append(m.Errors, &KeyError{}) + if err := m.Errors[len(m.Errors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Context == nil { + m.Context = &Context{} + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType) + } + m.StartVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartVersion |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) + copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CommitVersion", wireType) + } + m.CommitVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CommitVersion |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionError == nil { + m.RegionError = &errorpb.Error{} + } + if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &KeyError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScanRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScanRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScanRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Context == nil { + m.Context = &Context{} + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...) + if m.StartKey == nil { + m.StartKey = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScanResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScanResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScanResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionError == nil { + m.RegionError = &errorpb.Error{} + } + if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pairs = append(m.Pairs, &KvPair{}) + if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BatchRollbackRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BatchRollbackRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BatchRollbackRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Context == nil { + m.Context = &Context{} + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType) + } + m.StartVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartVersion |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) + copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BatchRollbackResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BatchRollbackResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BatchRollbackResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionError == nil { + m.RegionError = &errorpb.Error{} + } + if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &KeyError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckTxnStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckTxnStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckTxnStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Context == nil { + m.Context = &Context{} + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrimaryKey = append(m.PrimaryKey[:0], dAtA[iNdEx:postIndex]...) + if m.PrimaryKey == nil { + m.PrimaryKey = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LockTs", wireType) + } + m.LockTs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LockTs |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentTs", wireType) + } + m.CurrentTs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentTs |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckTxnStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckTxnStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckTxnStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionError == nil { + m.RegionError = &errorpb.Error{} + } + if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LockTtl", wireType) + } + m.LockTtl = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LockTtl |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CommitVersion", wireType) + } + m.CommitVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CommitVersion |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (Action(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResolveLockRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveLockRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveLockRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Context == nil { + m.Context = &Context{} + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType) + } + m.StartVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartVersion |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CommitVersion", wireType) + } + m.CommitVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CommitVersion |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResolveLockResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveLockResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveLockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionError == nil { + m.RegionError = &errorpb.Error{} + } + if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &KeyError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KvPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KvPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KvPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &KeyError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mutation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mutation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mutation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) + } + m.Op = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Op |= (Op(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Locked", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Locked == nil { + m.Locked = &LockInfo{} + } + if err := m.Locked.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Retryable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Retryable = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Abort", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Abort = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conflict", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Conflict == nil { + m.Conflict = &WriteConflict{} + } + if err := m.Conflict.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LockInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LockInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LockInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryLock", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrimaryLock = append(m.PrimaryLock[:0], dAtA[iNdEx:postIndex]...) + if m.PrimaryLock == nil { + m.PrimaryLock = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LockVersion", wireType) + } + m.LockVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LockVersion |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LockTtl", wireType) + } + m.LockTtl = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LockTtl |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WriteConflict) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WriteConflict: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WriteConflict: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType) + } + m.StartTs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTs |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ConflictTs", wireType) + } + m.ConflictTs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ConflictTs |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Primary = append(m.Primary[:0], dAtA[iNdEx:postIndex]...) + if m.Primary == nil { + m.Primary = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Context) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Context: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Context: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType) + } + m.RegionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RegionId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionEpoch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionEpoch == nil { + m.RegionEpoch = &metapb.RegionEpoch{} + } + if err := m.RegionEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKvrpcpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Peer == nil { + m.Peer = &metapb.Peer{} + } + if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipKvrpcpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKvrpcpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipKvrpcpb(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthKvrpcpb + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKvrpcpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipKvrpcpb(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthKvrpcpb = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowKvrpcpb = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("kvrpcpb.proto", fileDescriptor_kvrpcpb_5d022e43d1d7c564) } + +var fileDescriptor_kvrpcpb_5d022e43d1d7c564 = []byte{ + // 1077 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xac, 0x1d, 0x7b, 0xfd, 0x76, 0xed, 0x38, 0xd3, 0xa4, 0x2c, 0x0d, 0x04, 0x67, 0x51, + 0xd5, 0x90, 0x43, 0x2a, 0x8c, 0xc4, 0x9d, 0xa6, 0xa1, 0xaa, 0x5a, 0x9a, 0x68, 0x6a, 0x81, 0x2a, + 0x81, 0xc2, 0x66, 0x33, 0x49, 0x56, 0x5e, 0xef, 0x6c, 0x67, 0xc7, 0x4e, 0x22, 0x54, 0x71, 0xe3, + 0xc4, 0x91, 0x03, 0x12, 0xe5, 0x6b, 0xf0, 0x19, 0x38, 0xc2, 0x37, 0x40, 0xe1, 0x8b, 0xa0, 0xf9, + 0xb3, 0xbb, 0x76, 0x1c, 0x89, 0xc8, 0x4d, 0x72, 0xf2, 0xbc, 0x3f, 0x3b, 0xef, 0xf7, 0xde, 0xfc, + 0xde, 0x9b, 0x31, 0x34, 0xfb, 0x23, 0x9e, 0x86, 0xe9, 0xfe, 0x66, 0xca, 0x99, 0x60, 0xb8, 0x6e, + 0xc4, 0x7b, 0xee, 0x80, 0x8a, 0x20, 0x57, 0xdf, 0x6b, 0x52, 0xce, 0x19, 0x2f, 0xc4, 0xa5, 0x23, + 0x76, 0xc4, 0xd4, 0xf2, 0xa1, 0x5c, 0x69, 0xad, 0xff, 0x1d, 0x34, 0x49, 0x70, 0xf2, 0x84, 0x0a, + 0x42, 0x5f, 0x0f, 0x69, 0x26, 0xf0, 0x06, 0xd4, 0x43, 0x96, 0x08, 0x7a, 0x2a, 0x3c, 0xd4, 0x41, + 0xeb, 0x4e, 0xb7, 0xbd, 0x99, 0x47, 0xdb, 0xd2, 0x7a, 0x92, 0x3b, 0xe0, 0x36, 0x54, 0xfa, 0xf4, + 0xcc, 0xb3, 0x3a, 0x68, 0xdd, 0x25, 0x72, 0x89, 0x5b, 0x60, 0x85, 0x87, 0x5e, 0xa5, 0x83, 0xd6, + 0x1b, 0xc4, 0x0a, 0x0f, 0xfd, 0x9f, 0x11, 0xb4, 0xf2, 0xfd, 0xb3, 0x94, 0x25, 0x19, 0xc5, 0x9f, + 0x82, 0xcb, 0xe9, 0x51, 0xc4, 0x92, 0x3d, 0x85, 0xcf, 0x44, 0x69, 0x6d, 0xe6, 0x68, 0xb7, 0xe5, + 0x2f, 0x71, 0xb4, 0x8f, 0x12, 0xf0, 0x12, 0xcc, 0x6b, 0x5f, 0x4b, 0x6d, 0xac, 0x05, 0xa9, 0x1d, + 0x05, 0xf1, 0x90, 0xaa, 0x70, 0x2e, 0xd1, 0x02, 0x5e, 0x81, 0x46, 0xc2, 0xc4, 0xde, 0x21, 0x1b, + 0x26, 0x07, 0x5e, 0xb5, 0x83, 0xd6, 0x6d, 0x62, 0x27, 0x4c, 0x7c, 0x29, 0x65, 0x3f, 0x53, 0xd9, + 0xee, 0x0e, 0xaf, 0x29, 0xdb, 0xcb, 0x11, 0xe8, 0x1a, 0x54, 0x8b, 0x1a, 0xbc, 0x52, 0x25, 0x50, + 0x41, 0xaf, 0xb9, 0x04, 0xfe, 0xf7, 0xd0, 0x26, 0xc1, 0xc9, 0x63, 0x1a, 0x53, 0x41, 0x6f, 0xe6, + 0x00, 0xbf, 0x85, 0xc5, 0xb1, 0x08, 0xd7, 0x8d, 0xff, 0x47, 0x55, 0x9a, 0x97, 0x61, 0x90, 0xcc, + 0x82, 0x7e, 0x05, 0x1a, 0x99, 0x08, 0xb8, 0xd8, 0x2b, 0x73, 0xb0, 0x95, 0xe2, 0x99, 0x3e, 0x9b, + 0x38, 0x1a, 0x44, 0x42, 0xe5, 0xd2, 0x24, 0x5a, 0x98, 0x3a, 0x9b, 0x37, 0xb0, 0x50, 0x00, 0xb8, + 0x6e, 0x7e, 0xae, 0x41, 0xa5, 0x3f, 0xca, 0xbc, 0x4a, 0xa7, 0xb2, 0xee, 0x74, 0x17, 0x8a, 0x34, + 0x9e, 0x8d, 0x76, 0x83, 0x88, 0x13, 0x69, 0xf3, 0x0f, 0x00, 0xae, 0xad, 0xf5, 0x3c, 0xa8, 0x8f, + 0x28, 0xcf, 0x22, 0x96, 0xa8, 0x94, 0xab, 0x24, 0x17, 0xfd, 0xb7, 0x08, 0x9c, 0x77, 0xec, 0xc0, + 0x07, 0xe3, 0x19, 0x3a, 0xdd, 0xc5, 0x32, 0x1b, 0x7a, 0xa6, 0xdd, 0x67, 0x6f, 0xca, 0xbf, 0x11, + 0x2c, 0xec, 0x72, 0x7a, 0xc2, 0xa3, 0xd9, 0x48, 0xfc, 0x10, 0x1a, 0x83, 0xa1, 0x08, 0x44, 0xc4, + 0x92, 0xcc, 0xb3, 0x54, 0xb5, 0x4b, 0x7c, 0x5f, 0x19, 0x0b, 0x29, 0x7d, 0xf0, 0x1a, 0xb8, 0x29, + 0x8f, 0x06, 0x01, 0x3f, 0xdb, 0x8b, 0x59, 0xd8, 0x37, 0x50, 0x1d, 0xa3, 0x7b, 0xce, 0xc2, 0x3e, + 0xfe, 0x18, 0x9a, 0x9a, 0x5a, 0x79, 0x49, 0xab, 0xaa, 0xa4, 0xae, 0x52, 0x7e, 0xad, 0x75, 0xf8, + 0x7d, 0xb0, 0xe5, 0xf7, 0x7b, 0x42, 0xc4, 0xde, 0xbc, 0x2e, 0xb9, 0x94, 0x7b, 0x22, 0xf6, 0x53, + 0x68, 0x97, 0x29, 0xcd, 0x5e, 0xf6, 0x4f, 0xa0, 0xa6, 0xac, 0xd3, 0x79, 0x15, 0x75, 0x37, 0x0e, + 0xfe, 0x6f, 0x08, 0x9a, 0x5b, 0x6c, 0x30, 0x88, 0x66, 0xa2, 0xd3, 0x54, 0xbe, 0xd6, 0x25, 0xf9, + 0x62, 0xa8, 0xf6, 0xe9, 0x99, 0x66, 0xb4, 0x4b, 0xd4, 0x1a, 0xdf, 0x87, 0x56, 0xa8, 0xa2, 0x5e, + 0xa8, 0x54, 0x53, 0x6b, 0xcd, 0xa7, 0x7e, 0x0c, 0xad, 0x1c, 0xdc, 0xcd, 0x93, 0xd0, 0xff, 0x09, + 0x81, 0x73, 0x8b, 0x43, 0x65, 0xac, 0xf3, 0xaa, 0x93, 0x9d, 0x77, 0x0c, 0xee, 0xbb, 0xce, 0x96, + 0xfb, 0x30, 0x9f, 0x06, 0x51, 0xc1, 0x80, 0xa9, 0x39, 0xa2, 0xad, 0xfe, 0x0f, 0xb0, 0xf4, 0x28, + 0x10, 0xe1, 0x31, 0x61, 0x71, 0xbc, 0x1f, 0x84, 0xfd, 0xdb, 0x24, 0x81, 0x9f, 0xc1, 0xf2, 0x85, + 0xe0, 0xb7, 0x70, 0xc8, 0x6f, 0x11, 0x2c, 0x6f, 0x1d, 0xd3, 0xb0, 0xdf, 0x3b, 0x4d, 0x5e, 0x8a, + 0x40, 0x0c, 0xb3, 0x59, 0x72, 0xfe, 0x08, 0xf2, 0xbe, 0x1f, 0x3b, 0x70, 0x30, 0x2a, 0x79, 0xe4, + 0xef, 0x41, 0x5d, 0x37, 0x79, 0x66, 0xc6, 0x6a, 0x4d, 0xf5, 0x78, 0x86, 0x3f, 0x04, 0x08, 0x87, + 0x9c, 0xd3, 0x44, 0x48, 0x9b, 0x3e, 0xf8, 0x86, 0xd1, 0xf4, 0x32, 0xff, 0x0f, 0x04, 0x77, 0x2f, + 0xc2, 0x9b, 0xbd, 0x2a, 0xe3, 0xa3, 0xc6, 0x9a, 0x18, 0x35, 0x97, 0x74, 0x60, 0xe5, 0x92, 0x0e, + 0xc4, 0x0f, 0xa0, 0x16, 0x84, 0x22, 0xe7, 0x68, 0x6b, 0x8c, 0x48, 0x5f, 0x28, 0x35, 0x31, 0x66, + 0xf9, 0x64, 0xc3, 0x84, 0x66, 0x2c, 0x1e, 0x51, 0x39, 0x0a, 0x6f, 0x8c, 0x48, 0x57, 0xc3, 0xed, + 0xbf, 0x86, 0x3b, 0x13, 0x68, 0x6e, 0x81, 0x59, 0xaf, 0xa0, 0xa6, 0x9b, 0xab, 0xfc, 0x04, 0xfd, + 0xcf, 0xb5, 0x77, 0xc5, 0xb7, 0xa1, 0xbf, 0x03, 0x76, 0x7e, 0x23, 0xe1, 0x15, 0xb0, 0x58, 0xaa, + 0x76, 0x6e, 0x75, 0x9d, 0x62, 0xe7, 0x9d, 0x94, 0x58, 0x2c, 0xbd, 0xf2, 0x86, 0xbf, 0x23, 0xb0, + 0x73, 0x30, 0xf2, 0xba, 0x90, 0xac, 0xa0, 0x07, 0x53, 0x78, 0x65, 0xed, 0x9e, 0x26, 0x87, 0x8c, + 0x18, 0x07, 0xfc, 0x01, 0x34, 0x38, 0x15, 0xfc, 0x2c, 0xd8, 0x8f, 0xa9, 0x79, 0xb6, 0x94, 0x0a, + 0x19, 0x2b, 0xd8, 0x67, 0x5c, 0x98, 0x87, 0xa0, 0x16, 0x70, 0x17, 0xec, 0x90, 0x25, 0x87, 0x71, + 0x14, 0x0a, 0x45, 0x22, 0xa7, 0x7b, 0xb7, 0x08, 0xf0, 0x8d, 0xbc, 0xea, 0xb6, 0x8c, 0x95, 0x14, + 0x7e, 0xfe, 0x1b, 0xb0, 0xf3, 0xd8, 0x53, 0xf7, 0x2e, 0x9a, 0xbe, 0x77, 0xd7, 0xc0, 0x55, 0x3c, + 0x9f, 0x24, 0x8e, 0x23, 0x75, 0x39, 0x6f, 0x4c, 0x65, 0x2a, 0x65, 0x65, 0xc6, 0x9b, 0xa3, 0x3a, + 0x79, 0x0f, 0x9f, 0x40, 0x73, 0x02, 0x99, 0xf4, 0xd5, 0xd4, 0x14, 0x99, 0x8a, 0x5f, 0x25, 0x75, + 0x25, 0xf7, 0x32, 0x39, 0x0a, 0x72, 0xd8, 0xd2, 0xaa, 0x43, 0x43, 0xae, 0xea, 0x65, 0x97, 0x44, + 0xf6, 0xa0, 0x6e, 0xd0, 0xab, 0xc0, 0x2e, 0xc9, 0x45, 0xff, 0x17, 0x04, 0xf5, 0xad, 0xf2, 0x4a, + 0x31, 0x5c, 0x8d, 0x0e, 0x4c, 0x50, 0x5b, 0x2b, 0x9e, 0x1e, 0xe0, 0xcf, 0x4b, 0x22, 0xa7, 0x2c, + 0x3c, 0x36, 0xe4, 0xbc, 0xb3, 0x69, 0xfe, 0xca, 0x11, 0x4d, 0x60, 0x69, 0x2a, 0xd8, 0x2c, 0x05, + 0xdc, 0x81, 0x6a, 0x4a, 0x29, 0x57, 0x68, 0x9c, 0xae, 0x9b, 0xfb, 0xef, 0x52, 0xca, 0x89, 0xb2, + 0xc8, 0x49, 0x2d, 0x28, 0x1f, 0x98, 0xa7, 0x89, 0x5a, 0x6f, 0x6c, 0x82, 0xb5, 0x93, 0xe2, 0x3a, + 0x54, 0x76, 0x87, 0xa2, 0x3d, 0x27, 0x17, 0x8f, 0x69, 0xdc, 0x46, 0xd8, 0x05, 0x3b, 0x1f, 0xde, + 0x6d, 0x0b, 0xdb, 0x50, 0x95, 0xa7, 0xd1, 0xae, 0x6c, 0x3c, 0x81, 0x9a, 0x1e, 0x0f, 0xd2, 0xe3, + 0x05, 0xd3, 0xeb, 0xf6, 0x1c, 0x5e, 0x86, 0xc5, 0x5e, 0xef, 0xf9, 0xf6, 0x69, 0x1a, 0x71, 0x5a, + 0x7c, 0x88, 0xb0, 0x07, 0x4b, 0xf2, 0xc3, 0x17, 0x4c, 0x6c, 0x9f, 0x46, 0x99, 0x28, 0xb7, 0x7c, + 0xd4, 0xfe, 0xf3, 0x7c, 0x15, 0xfd, 0x75, 0xbe, 0x8a, 0xfe, 0x39, 0x5f, 0x45, 0xbf, 0xfe, 0xbb, + 0x3a, 0xb7, 0x5f, 0x53, 0x7f, 0x40, 0x3f, 0xfb, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xd2, 0x59, 0xd6, + 0x79, 0xcd, 0x0e, 0x00, 0x00, +} diff --git a/proto/pkg/metapb/metapb.pb.go b/proto/pkg/metapb/metapb.pb.go new file mode 100644 index 00000000..fd4e14b3 --- /dev/null +++ b/proto/pkg/metapb/metapb.pb.go @@ -0,0 +1,1395 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: metapb.proto + +package metapb + +import ( + "fmt" + "io" + "math" + + proto "github.com/golang/protobuf/proto" + + _ "github.com/gogo/protobuf/gogoproto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type StoreState int32 + +const ( + StoreState_Up StoreState = 0 + StoreState_Offline StoreState = 1 + StoreState_Tombstone StoreState = 2 +) + +var StoreState_name = map[int32]string{ + 0: "Up", + 1: "Offline", + 2: "Tombstone", +} +var StoreState_value = map[string]int32{ + "Up": 0, + "Offline": 1, + "Tombstone": 2, +} + +func (x StoreState) String() string { + return proto.EnumName(StoreState_name, int32(x)) +} +func (StoreState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metapb_33de520265e54ab4, []int{0} +} + +type Cluster struct { + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // max peer count for a region. + // scheduler will do the auto-balance if region peer count mismatches. + MaxPeerCount uint32 `protobuf:"varint,2,opt,name=max_peer_count,json=maxPeerCount,proto3" json:"max_peer_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { + return fileDescriptor_metapb_33de520265e54ab4, []int{0} +} +func (m *Cluster) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Cluster) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cluster.Merge(dst, src) +} +func (m *Cluster) XXX_Size() int { + return m.Size() +} +func (m *Cluster) XXX_DiscardUnknown() { + xxx_messageInfo_Cluster.DiscardUnknown(m) +} + +var xxx_messageInfo_Cluster proto.InternalMessageInfo + +func (m *Cluster) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Cluster) GetMaxPeerCount() uint32 { + if m != nil { + return m.MaxPeerCount + } + return 0 +} + +type Store struct { + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Address to handle client requests (kv, cop, etc.) + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + State StoreState `protobuf:"varint,3,opt,name=state,proto3,enum=metapb.StoreState" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Store) Reset() { *m = Store{} } +func (m *Store) String() string { return proto.CompactTextString(m) } +func (*Store) ProtoMessage() {} +func (*Store) Descriptor() ([]byte, []int) { + return fileDescriptor_metapb_33de520265e54ab4, []int{1} +} +func (m *Store) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Store) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Store.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Store) XXX_Merge(src proto.Message) { + xxx_messageInfo_Store.Merge(dst, src) +} +func (m *Store) XXX_Size() int { + return m.Size() +} +func (m *Store) XXX_DiscardUnknown() { + xxx_messageInfo_Store.DiscardUnknown(m) +} + +var xxx_messageInfo_Store proto.InternalMessageInfo + +func (m *Store) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Store) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *Store) GetState() StoreState { + if m != nil { + return m.State + } + return StoreState_Up +} + +type RegionEpoch struct { + // Conf change version, auto increment when add or remove peer + ConfVer uint64 `protobuf:"varint,1,opt,name=conf_ver,json=confVer,proto3" json:"conf_ver,omitempty"` + // Region version, auto increment when split or merge + Version uint64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegionEpoch) Reset() { *m = RegionEpoch{} } +func (m *RegionEpoch) String() string { return proto.CompactTextString(m) } +func (*RegionEpoch) ProtoMessage() {} +func (*RegionEpoch) Descriptor() ([]byte, []int) { + return fileDescriptor_metapb_33de520265e54ab4, []int{2} +} +func (m *RegionEpoch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RegionEpoch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RegionEpoch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RegionEpoch) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegionEpoch.Merge(dst, src) +} +func (m *RegionEpoch) XXX_Size() int { + return m.Size() +} +func (m *RegionEpoch) XXX_DiscardUnknown() { + xxx_messageInfo_RegionEpoch.DiscardUnknown(m) +} + +var xxx_messageInfo_RegionEpoch proto.InternalMessageInfo + +func (m *RegionEpoch) GetConfVer() uint64 { + if m != nil { + return m.ConfVer + } + return 0 +} + +func (m *RegionEpoch) GetVersion() uint64 { + if m != nil { + return m.Version + } + return 0 +} + +type Region struct { + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Region key range [start_key, end_key). + StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + EndKey []byte `protobuf:"bytes,3,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` + RegionEpoch *RegionEpoch `protobuf:"bytes,4,opt,name=region_epoch,json=regionEpoch" json:"region_epoch,omitempty"` + Peers []*Peer `protobuf:"bytes,5,rep,name=peers" json:"peers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Region) Reset() { *m = Region{} } +func (m *Region) String() string { return proto.CompactTextString(m) } +func (*Region) ProtoMessage() {} +func (*Region) Descriptor() ([]byte, []int) { + return fileDescriptor_metapb_33de520265e54ab4, []int{3} +} +func (m *Region) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Region) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Region.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Region) XXX_Merge(src proto.Message) { + xxx_messageInfo_Region.Merge(dst, src) +} +func (m *Region) XXX_Size() int { + return m.Size() +} +func (m *Region) XXX_DiscardUnknown() { + xxx_messageInfo_Region.DiscardUnknown(m) +} + +var xxx_messageInfo_Region proto.InternalMessageInfo + +func (m *Region) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Region) GetStartKey() []byte { + if m != nil { + return m.StartKey + } + return nil +} + +func (m *Region) GetEndKey() []byte { + if m != nil { + return m.EndKey + } + return nil +} + +func (m *Region) GetRegionEpoch() *RegionEpoch { + if m != nil { + return m.RegionEpoch + } + return nil +} + +func (m *Region) GetPeers() []*Peer { + if m != nil { + return m.Peers + } + return nil +} + +type Peer struct { + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + StoreId uint64 `protobuf:"varint,2,opt,name=store_id,json=storeId,proto3" json:"store_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Peer) Reset() { *m = Peer{} } +func (m *Peer) String() string { return proto.CompactTextString(m) } +func (*Peer) ProtoMessage() {} +func (*Peer) Descriptor() ([]byte, []int) { + return fileDescriptor_metapb_33de520265e54ab4, []int{4} +} +func (m *Peer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Peer.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Peer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Peer.Merge(dst, src) +} +func (m *Peer) XXX_Size() int { + return m.Size() +} +func (m *Peer) XXX_DiscardUnknown() { + xxx_messageInfo_Peer.DiscardUnknown(m) +} + +var xxx_messageInfo_Peer proto.InternalMessageInfo + +func (m *Peer) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Peer) GetStoreId() uint64 { + if m != nil { + return m.StoreId + } + return 0 +} + +func init() { + proto.RegisterType((*Cluster)(nil), "metapb.Cluster") + proto.RegisterType((*Store)(nil), "metapb.Store") + proto.RegisterType((*RegionEpoch)(nil), "metapb.RegionEpoch") + proto.RegisterType((*Region)(nil), "metapb.Region") + proto.RegisterType((*Peer)(nil), "metapb.Peer") + proto.RegisterEnum("metapb.StoreState", StoreState_name, StoreState_value) +} +func (m *Cluster) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Id != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintMetapb(dAtA, i, uint64(m.Id)) + } + if m.MaxPeerCount != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintMetapb(dAtA, i, uint64(m.MaxPeerCount)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Store) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Store) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Id != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintMetapb(dAtA, i, uint64(m.Id)) + } + if len(m.Address) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintMetapb(dAtA, i, uint64(len(m.Address))) + i += copy(dAtA[i:], m.Address) + } + if m.State != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintMetapb(dAtA, i, uint64(m.State)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RegionEpoch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegionEpoch) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ConfVer != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintMetapb(dAtA, i, uint64(m.ConfVer)) + } + if m.Version != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintMetapb(dAtA, i, uint64(m.Version)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Region) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Region) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Id != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintMetapb(dAtA, i, uint64(m.Id)) + } + if len(m.StartKey) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintMetapb(dAtA, i, uint64(len(m.StartKey))) + i += copy(dAtA[i:], m.StartKey) + } + if len(m.EndKey) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintMetapb(dAtA, i, uint64(len(m.EndKey))) + i += copy(dAtA[i:], m.EndKey) + } + if m.RegionEpoch != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMetapb(dAtA, i, uint64(m.RegionEpoch.Size())) + n1, err := m.RegionEpoch.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.Peers) > 0 { + for _, msg := range m.Peers { + dAtA[i] = 0x2a + i++ + i = encodeVarintMetapb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Peer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Peer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Id != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintMetapb(dAtA, i, uint64(m.Id)) + } + if m.StoreId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintMetapb(dAtA, i, uint64(m.StoreId)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintMetapb(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Cluster) Size() (n int) { + var l int + _ = l + if m.Id != 0 { + n += 1 + sovMetapb(uint64(m.Id)) + } + if m.MaxPeerCount != 0 { + n += 1 + sovMetapb(uint64(m.MaxPeerCount)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Store) Size() (n int) { + var l int + _ = l + if m.Id != 0 { + n += 1 + sovMetapb(uint64(m.Id)) + } + l = len(m.Address) + if l > 0 { + n += 1 + l + sovMetapb(uint64(l)) + } + if m.State != 0 { + n += 1 + sovMetapb(uint64(m.State)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RegionEpoch) Size() (n int) { + var l int + _ = l + if m.ConfVer != 0 { + n += 1 + sovMetapb(uint64(m.ConfVer)) + } + if m.Version != 0 { + n += 1 + sovMetapb(uint64(m.Version)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Region) Size() (n int) { + var l int + _ = l + if m.Id != 0 { + n += 1 + sovMetapb(uint64(m.Id)) + } + l = len(m.StartKey) + if l > 0 { + n += 1 + l + sovMetapb(uint64(l)) + } + l = len(m.EndKey) + if l > 0 { + n += 1 + l + sovMetapb(uint64(l)) + } + if m.RegionEpoch != nil { + l = m.RegionEpoch.Size() + n += 1 + l + sovMetapb(uint64(l)) + } + if len(m.Peers) > 0 { + for _, e := range m.Peers { + l = e.Size() + n += 1 + l + sovMetapb(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Peer) Size() (n int) { + var l int + _ = l + if m.Id != 0 { + n += 1 + sovMetapb(uint64(m.Id)) + } + if m.StoreId != 0 { + n += 1 + sovMetapb(uint64(m.StoreId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMetapb(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozMetapb(x uint64) (n int) { + return sovMetapb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Cluster) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Cluster: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Cluster: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPeerCount", wireType) + } + m.MaxPeerCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxPeerCount |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetapb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetapb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Store) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Store: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Store: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetapb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (StoreState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetapb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetapb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RegionEpoch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RegionEpoch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RegionEpoch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfVer", wireType) + } + m.ConfVer = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ConfVer |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetapb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetapb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Region) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Region: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Region: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMetapb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...) + if m.StartKey == nil { + m.StartKey = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMetapb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...) + if m.EndKey == nil { + m.EndKey = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionEpoch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetapb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionEpoch == nil { + m.RegionEpoch = &RegionEpoch{} + } + if err := m.RegionEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Peers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetapb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Peers = append(m.Peers, &Peer{}) + if err := m.Peers[len(m.Peers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetapb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetapb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Peer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Peer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StoreId", wireType) + } + m.StoreId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetapb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StoreId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetapb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetapb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetapb(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetapb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetapb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetapb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthMetapb + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetapb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipMetapb(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthMetapb = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetapb = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("metapb.proto", fileDescriptor_metapb_33de520265e54ab4) } + +var fileDescriptor_metapb_33de520265e54ab4 = []byte{ + // 390 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x52, 0xdd, 0x8a, 0xd3, 0x40, + 0x18, 0xdd, 0x49, 0xf3, 0xd3, 0x7e, 0xc9, 0x96, 0x30, 0x0a, 0x66, 0x15, 0x42, 0x08, 0x5e, 0x04, + 0x2f, 0x56, 0xad, 0xe0, 0xad, 0xb0, 0x8b, 0x17, 0xe2, 0x85, 0x32, 0xab, 0xde, 0x78, 0x11, 0xd2, + 0xce, 0xd7, 0x1a, 0xdc, 0xcc, 0x84, 0x99, 0xd9, 0xa5, 0x7d, 0x13, 0x9f, 0xc1, 0x27, 0xf1, 0xd2, + 0x47, 0x90, 0xfa, 0x22, 0x32, 0x93, 0x06, 0x0b, 0xbd, 0xcb, 0xf9, 0x4e, 0xce, 0x77, 0xce, 0x77, + 0x18, 0x48, 0x3a, 0x34, 0x4d, 0xbf, 0xbc, 0xec, 0x95, 0x34, 0x92, 0x86, 0x03, 0x7a, 0xfc, 0x70, + 0x23, 0x37, 0xd2, 0x8d, 0x9e, 0xdb, 0xaf, 0x81, 0x2d, 0xdf, 0x40, 0x74, 0x7d, 0x7b, 0xa7, 0x0d, + 0x2a, 0x3a, 0x07, 0xaf, 0xe5, 0x19, 0x29, 0x48, 0xe5, 0x33, 0xaf, 0xe5, 0xf4, 0x29, 0xcc, 0xbb, + 0x66, 0x5b, 0xf7, 0x88, 0xaa, 0x5e, 0xc9, 0x3b, 0x61, 0x32, 0xaf, 0x20, 0xd5, 0x39, 0x4b, 0xba, + 0x66, 0xfb, 0x11, 0x51, 0x5d, 0xdb, 0x59, 0xf9, 0x15, 0x82, 0x1b, 0x23, 0x15, 0x9e, 0xc8, 0x33, + 0x88, 0x1a, 0xce, 0x15, 0x6a, 0xed, 0x74, 0x33, 0x36, 0x42, 0x5a, 0x41, 0xa0, 0x4d, 0x63, 0x30, + 0x9b, 0x14, 0xa4, 0x9a, 0x2f, 0xe8, 0xe5, 0x21, 0xaf, 0xdb, 0x73, 0x63, 0x19, 0x36, 0xfc, 0x50, + 0x5e, 0x41, 0xcc, 0x70, 0xd3, 0x4a, 0xf1, 0xb6, 0x97, 0xab, 0x6f, 0xf4, 0x02, 0xa6, 0x2b, 0x29, + 0xd6, 0xf5, 0x3d, 0xaa, 0x83, 0x51, 0x64, 0xf1, 0x17, 0x54, 0xd6, 0xed, 0x1e, 0x95, 0x6e, 0xa5, + 0x70, 0x6e, 0x3e, 0x1b, 0x61, 0xf9, 0x93, 0x40, 0x38, 0x2c, 0x39, 0x89, 0xf8, 0x04, 0x66, 0xda, + 0x34, 0xca, 0xd4, 0xdf, 0x71, 0xe7, 0x64, 0x09, 0x9b, 0xba, 0xc1, 0x7b, 0xdc, 0xd1, 0x47, 0x10, + 0xa1, 0xe0, 0x8e, 0x9a, 0x38, 0x2a, 0x44, 0xc1, 0x2d, 0xf1, 0x1a, 0x12, 0xe5, 0xf6, 0xd5, 0x68, + 0x53, 0x65, 0x7e, 0x41, 0xaa, 0x78, 0xf1, 0x60, 0xbc, 0xe2, 0x28, 0x30, 0x8b, 0xd5, 0x51, 0xfa, + 0x12, 0x02, 0xdb, 0xa5, 0xce, 0x82, 0x62, 0x52, 0xc5, 0x8b, 0x64, 0x14, 0xd8, 0x2e, 0xd9, 0x40, + 0x95, 0x2f, 0xc1, 0xb7, 0xf0, 0x24, 0xe9, 0x05, 0x4c, 0xb5, 0x6d, 0xa7, 0x6e, 0xf9, 0x78, 0x9f, + 0xc3, 0xef, 0xf8, 0xb3, 0x17, 0x00, 0xff, 0x8b, 0xa3, 0x21, 0x78, 0x9f, 0xfb, 0xf4, 0x8c, 0xc6, + 0x10, 0x7d, 0x58, 0xaf, 0x6f, 0x5b, 0x81, 0x29, 0xa1, 0xe7, 0x30, 0xfb, 0x24, 0xbb, 0xa5, 0x36, + 0x52, 0x60, 0xea, 0x5d, 0xa5, 0xbf, 0xf6, 0x39, 0xf9, 0xbd, 0xcf, 0xc9, 0x9f, 0x7d, 0x4e, 0x7e, + 0xfc, 0xcd, 0xcf, 0x96, 0xa1, 0x7b, 0x0c, 0xaf, 0xfe, 0x05, 0x00, 0x00, 0xff, 0xff, 0x77, 0xf7, + 0x52, 0x80, 0x3a, 0x02, 0x00, 0x00, +} diff --git a/proto/pkg/raft_cmdpb/raft_cmdpb.pb.go b/proto/pkg/raft_cmdpb/raft_cmdpb.pb.go new file mode 100644 index 00000000..68321e44 --- /dev/null +++ b/proto/pkg/raft_cmdpb/raft_cmdpb.pb.go @@ -0,0 +1,5829 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: raft_cmdpb.proto + +package raft_cmdpb + +import ( + "fmt" + "io" + "math" + + proto "github.com/golang/protobuf/proto" + + eraftpb "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + errorpb "github.com/pingcap-incubator/tinykv/proto/pkg/errorpb" + + metapb "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type CmdType int32 + +const ( + CmdType_Invalid CmdType = 0 + CmdType_Get CmdType = 1 + CmdType_Put CmdType = 3 + CmdType_Delete CmdType = 4 + CmdType_Snap CmdType = 5 +) + +var CmdType_name = map[int32]string{ + 0: "Invalid", + 1: "Get", + 3: "Put", + 4: "Delete", + 5: "Snap", +} +var CmdType_value = map[string]int32{ + "Invalid": 0, + "Get": 1, + "Put": 3, + "Delete": 4, + "Snap": 5, +} + +func (x CmdType) String() string { + return proto.EnumName(CmdType_name, int32(x)) +} +func (CmdType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{0} +} + +type AdminCmdType int32 + +const ( + AdminCmdType_InvalidAdmin AdminCmdType = 0 + AdminCmdType_ChangePeer AdminCmdType = 1 + AdminCmdType_CompactLog AdminCmdType = 3 + AdminCmdType_TransferLeader AdminCmdType = 4 + AdminCmdType_Split AdminCmdType = 10 +) + +var AdminCmdType_name = map[int32]string{ + 0: "InvalidAdmin", + 1: "ChangePeer", + 3: "CompactLog", + 4: "TransferLeader", + 10: "Split", +} +var AdminCmdType_value = map[string]int32{ + "InvalidAdmin": 0, + "ChangePeer": 1, + "CompactLog": 3, + "TransferLeader": 4, + "Split": 10, +} + +func (x AdminCmdType) String() string { + return proto.EnumName(AdminCmdType_name, int32(x)) +} +func (AdminCmdType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{1} +} + +type GetRequest struct { + Cf string `protobuf:"bytes,1,opt,name=cf,proto3" json:"cf,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{0} +} +func (m *GetRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRequest.Merge(dst, src) +} +func (m *GetRequest) XXX_Size() int { + return m.Size() +} +func (m *GetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetRequest proto.InternalMessageInfo + +func (m *GetRequest) GetCf() string { + if m != nil { + return m.Cf + } + return "" +} + +func (m *GetRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +type GetResponse struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} +func (*GetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{1} +} +func (m *GetResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResponse.Merge(dst, src) +} +func (m *GetResponse) XXX_Size() int { + return m.Size() +} +func (m *GetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResponse proto.InternalMessageInfo + +func (m *GetResponse) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type PutRequest struct { + Cf string `protobuf:"bytes,1,opt,name=cf,proto3" json:"cf,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} +func (*PutRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{2} +} +func (m *PutRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *PutRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutRequest.Merge(dst, src) +} +func (m *PutRequest) XXX_Size() int { + return m.Size() +} +func (m *PutRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PutRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PutRequest proto.InternalMessageInfo + +func (m *PutRequest) GetCf() string { + if m != nil { + return m.Cf + } + return "" +} + +func (m *PutRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type PutResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} +func (*PutResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{3} +} +func (m *PutResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *PutResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutResponse.Merge(dst, src) +} +func (m *PutResponse) XXX_Size() int { + return m.Size() +} +func (m *PutResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PutResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PutResponse proto.InternalMessageInfo + +type DeleteRequest struct { + Cf string `protobuf:"bytes,1,opt,name=cf,proto3" json:"cf,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{4} +} +func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *DeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRequest.Merge(dst, src) +} +func (m *DeleteRequest) XXX_Size() int { + return m.Size() +} +func (m *DeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo + +func (m *DeleteRequest) GetCf() string { + if m != nil { + return m.Cf + } + return "" +} + +func (m *DeleteRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +type DeleteResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } +func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteResponse) ProtoMessage() {} +func (*DeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{5} +} +func (m *DeleteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *DeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteResponse.Merge(dst, src) +} +func (m *DeleteResponse) XXX_Size() int { + return m.Size() +} +func (m *DeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo + +type SnapRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SnapRequest) Reset() { *m = SnapRequest{} } +func (m *SnapRequest) String() string { return proto.CompactTextString(m) } +func (*SnapRequest) ProtoMessage() {} +func (*SnapRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{6} +} +func (m *SnapRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SnapRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SnapRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SnapRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapRequest.Merge(dst, src) +} +func (m *SnapRequest) XXX_Size() int { + return m.Size() +} +func (m *SnapRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SnapRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapRequest proto.InternalMessageInfo + +type SnapResponse struct { + Region *metapb.Region `protobuf:"bytes,1,opt,name=region" json:"region,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SnapResponse) Reset() { *m = SnapResponse{} } +func (m *SnapResponse) String() string { return proto.CompactTextString(m) } +func (*SnapResponse) ProtoMessage() {} +func (*SnapResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{7} +} +func (m *SnapResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SnapResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SnapResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SnapResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapResponse.Merge(dst, src) +} +func (m *SnapResponse) XXX_Size() int { + return m.Size() +} +func (m *SnapResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SnapResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapResponse proto.InternalMessageInfo + +func (m *SnapResponse) GetRegion() *metapb.Region { + if m != nil { + return m.Region + } + return nil +} + +type Request struct { + CmdType CmdType `protobuf:"varint,1,opt,name=cmd_type,json=cmdType,proto3,enum=raft_cmdpb.CmdType" json:"cmd_type,omitempty"` + Get *GetRequest `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"` + Put *PutRequest `protobuf:"bytes,4,opt,name=put" json:"put,omitempty"` + Delete *DeleteRequest `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"` + Snap *SnapRequest `protobuf:"bytes,6,opt,name=snap" json:"snap,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{8} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(dst, src) +} +func (m *Request) XXX_Size() int { + return m.Size() +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +func (m *Request) GetCmdType() CmdType { + if m != nil { + return m.CmdType + } + return CmdType_Invalid +} + +func (m *Request) GetGet() *GetRequest { + if m != nil { + return m.Get + } + return nil +} + +func (m *Request) GetPut() *PutRequest { + if m != nil { + return m.Put + } + return nil +} + +func (m *Request) GetDelete() *DeleteRequest { + if m != nil { + return m.Delete + } + return nil +} + +func (m *Request) GetSnap() *SnapRequest { + if m != nil { + return m.Snap + } + return nil +} + +type Response struct { + CmdType CmdType `protobuf:"varint,1,opt,name=cmd_type,json=cmdType,proto3,enum=raft_cmdpb.CmdType" json:"cmd_type,omitempty"` + Get *GetResponse `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"` + Put *PutResponse `protobuf:"bytes,4,opt,name=put" json:"put,omitempty"` + Delete *DeleteResponse `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"` + Snap *SnapResponse `protobuf:"bytes,6,opt,name=snap" json:"snap,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{9} +} +func (m *Response) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Response.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Response.Merge(dst, src) +} +func (m *Response) XXX_Size() int { + return m.Size() +} +func (m *Response) XXX_DiscardUnknown() { + xxx_messageInfo_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Response proto.InternalMessageInfo + +func (m *Response) GetCmdType() CmdType { + if m != nil { + return m.CmdType + } + return CmdType_Invalid +} + +func (m *Response) GetGet() *GetResponse { + if m != nil { + return m.Get + } + return nil +} + +func (m *Response) GetPut() *PutResponse { + if m != nil { + return m.Put + } + return nil +} + +func (m *Response) GetDelete() *DeleteResponse { + if m != nil { + return m.Delete + } + return nil +} + +func (m *Response) GetSnap() *SnapResponse { + if m != nil { + return m.Snap + } + return nil +} + +type ChangePeerRequest struct { + // This can be only called in internal RaftStore now. + ChangeType eraftpb.ConfChangeType `protobuf:"varint,1,opt,name=change_type,json=changeType,proto3,enum=eraftpb.ConfChangeType" json:"change_type,omitempty"` + Peer *metapb.Peer `protobuf:"bytes,2,opt,name=peer" json:"peer,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChangePeerRequest) Reset() { *m = ChangePeerRequest{} } +func (m *ChangePeerRequest) String() string { return proto.CompactTextString(m) } +func (*ChangePeerRequest) ProtoMessage() {} +func (*ChangePeerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{10} +} +func (m *ChangePeerRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChangePeerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChangePeerRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ChangePeerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChangePeerRequest.Merge(dst, src) +} +func (m *ChangePeerRequest) XXX_Size() int { + return m.Size() +} +func (m *ChangePeerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ChangePeerRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ChangePeerRequest proto.InternalMessageInfo + +func (m *ChangePeerRequest) GetChangeType() eraftpb.ConfChangeType { + if m != nil { + return m.ChangeType + } + return eraftpb.ConfChangeType_AddNode +} + +func (m *ChangePeerRequest) GetPeer() *metapb.Peer { + if m != nil { + return m.Peer + } + return nil +} + +type ChangePeerResponse struct { + Region *metapb.Region `protobuf:"bytes,1,opt,name=region" json:"region,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChangePeerResponse) Reset() { *m = ChangePeerResponse{} } +func (m *ChangePeerResponse) String() string { return proto.CompactTextString(m) } +func (*ChangePeerResponse) ProtoMessage() {} +func (*ChangePeerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{11} +} +func (m *ChangePeerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChangePeerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChangePeerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ChangePeerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChangePeerResponse.Merge(dst, src) +} +func (m *ChangePeerResponse) XXX_Size() int { + return m.Size() +} +func (m *ChangePeerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ChangePeerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ChangePeerResponse proto.InternalMessageInfo + +func (m *ChangePeerResponse) GetRegion() *metapb.Region { + if m != nil { + return m.Region + } + return nil +} + +type SplitRequest struct { + // This can be only called in internal RaftStore now. + // The split_key has to exist in the splitting region. + SplitKey []byte `protobuf:"bytes,1,opt,name=split_key,json=splitKey,proto3" json:"split_key,omitempty"` + // We split the region into two. The first uses the origin + // parent region id, and the second uses the new_region_id. + // We must guarantee that the new_region_id is global unique. + NewRegionId uint64 `protobuf:"varint,2,opt,name=new_region_id,json=newRegionId,proto3" json:"new_region_id,omitempty"` + // The peer ids for the new split region. + NewPeerIds []uint64 `protobuf:"varint,3,rep,packed,name=new_peer_ids,json=newPeerIds" json:"new_peer_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SplitRequest) Reset() { *m = SplitRequest{} } +func (m *SplitRequest) String() string { return proto.CompactTextString(m) } +func (*SplitRequest) ProtoMessage() {} +func (*SplitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{12} +} +func (m *SplitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SplitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SplitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SplitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SplitRequest.Merge(dst, src) +} +func (m *SplitRequest) XXX_Size() int { + return m.Size() +} +func (m *SplitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SplitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SplitRequest proto.InternalMessageInfo + +func (m *SplitRequest) GetSplitKey() []byte { + if m != nil { + return m.SplitKey + } + return nil +} + +func (m *SplitRequest) GetNewRegionId() uint64 { + if m != nil { + return m.NewRegionId + } + return 0 +} + +func (m *SplitRequest) GetNewPeerIds() []uint64 { + if m != nil { + return m.NewPeerIds + } + return nil +} + +type SplitResponse struct { + // SplitResponse contains the region where specific keys have split into. + Regions []*metapb.Region `protobuf:"bytes,1,rep,name=regions" json:"regions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SplitResponse) Reset() { *m = SplitResponse{} } +func (m *SplitResponse) String() string { return proto.CompactTextString(m) } +func (*SplitResponse) ProtoMessage() {} +func (*SplitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{13} +} +func (m *SplitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SplitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SplitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SplitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SplitResponse.Merge(dst, src) +} +func (m *SplitResponse) XXX_Size() int { + return m.Size() +} +func (m *SplitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SplitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SplitResponse proto.InternalMessageInfo + +func (m *SplitResponse) GetRegions() []*metapb.Region { + if m != nil { + return m.Regions + } + return nil +} + +type CompactLogRequest struct { + CompactIndex uint64 `protobuf:"varint,1,opt,name=compact_index,json=compactIndex,proto3" json:"compact_index,omitempty"` + CompactTerm uint64 `protobuf:"varint,2,opt,name=compact_term,json=compactTerm,proto3" json:"compact_term,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompactLogRequest) Reset() { *m = CompactLogRequest{} } +func (m *CompactLogRequest) String() string { return proto.CompactTextString(m) } +func (*CompactLogRequest) ProtoMessage() {} +func (*CompactLogRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{14} +} +func (m *CompactLogRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CompactLogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CompactLogRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *CompactLogRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompactLogRequest.Merge(dst, src) +} +func (m *CompactLogRequest) XXX_Size() int { + return m.Size() +} +func (m *CompactLogRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CompactLogRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CompactLogRequest proto.InternalMessageInfo + +func (m *CompactLogRequest) GetCompactIndex() uint64 { + if m != nil { + return m.CompactIndex + } + return 0 +} + +func (m *CompactLogRequest) GetCompactTerm() uint64 { + if m != nil { + return m.CompactTerm + } + return 0 +} + +type CompactLogResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompactLogResponse) Reset() { *m = CompactLogResponse{} } +func (m *CompactLogResponse) String() string { return proto.CompactTextString(m) } +func (*CompactLogResponse) ProtoMessage() {} +func (*CompactLogResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{15} +} +func (m *CompactLogResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CompactLogResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CompactLogResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *CompactLogResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompactLogResponse.Merge(dst, src) +} +func (m *CompactLogResponse) XXX_Size() int { + return m.Size() +} +func (m *CompactLogResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CompactLogResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CompactLogResponse proto.InternalMessageInfo + +type TransferLeaderRequest struct { + Peer *metapb.Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TransferLeaderRequest) Reset() { *m = TransferLeaderRequest{} } +func (m *TransferLeaderRequest) String() string { return proto.CompactTextString(m) } +func (*TransferLeaderRequest) ProtoMessage() {} +func (*TransferLeaderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{16} +} +func (m *TransferLeaderRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TransferLeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TransferLeaderRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TransferLeaderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransferLeaderRequest.Merge(dst, src) +} +func (m *TransferLeaderRequest) XXX_Size() int { + return m.Size() +} +func (m *TransferLeaderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TransferLeaderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TransferLeaderRequest proto.InternalMessageInfo + +func (m *TransferLeaderRequest) GetPeer() *metapb.Peer { + if m != nil { + return m.Peer + } + return nil +} + +type TransferLeaderResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TransferLeaderResponse) Reset() { *m = TransferLeaderResponse{} } +func (m *TransferLeaderResponse) String() string { return proto.CompactTextString(m) } +func (*TransferLeaderResponse) ProtoMessage() {} +func (*TransferLeaderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{17} +} +func (m *TransferLeaderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TransferLeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TransferLeaderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TransferLeaderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransferLeaderResponse.Merge(dst, src) +} +func (m *TransferLeaderResponse) XXX_Size() int { + return m.Size() +} +func (m *TransferLeaderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TransferLeaderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TransferLeaderResponse proto.InternalMessageInfo + +type AdminRequest struct { + CmdType AdminCmdType `protobuf:"varint,1,opt,name=cmd_type,json=cmdType,proto3,enum=raft_cmdpb.AdminCmdType" json:"cmd_type,omitempty"` + ChangePeer *ChangePeerRequest `protobuf:"bytes,2,opt,name=change_peer,json=changePeer" json:"change_peer,omitempty"` + CompactLog *CompactLogRequest `protobuf:"bytes,4,opt,name=compact_log,json=compactLog" json:"compact_log,omitempty"` + TransferLeader *TransferLeaderRequest `protobuf:"bytes,5,opt,name=transfer_leader,json=transferLeader" json:"transfer_leader,omitempty"` + Split *SplitRequest `protobuf:"bytes,10,opt,name=split" json:"split,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AdminRequest) Reset() { *m = AdminRequest{} } +func (m *AdminRequest) String() string { return proto.CompactTextString(m) } +func (*AdminRequest) ProtoMessage() {} +func (*AdminRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{18} +} +func (m *AdminRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdminRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AdminRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *AdminRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdminRequest.Merge(dst, src) +} +func (m *AdminRequest) XXX_Size() int { + return m.Size() +} +func (m *AdminRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AdminRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AdminRequest proto.InternalMessageInfo + +func (m *AdminRequest) GetCmdType() AdminCmdType { + if m != nil { + return m.CmdType + } + return AdminCmdType_InvalidAdmin +} + +func (m *AdminRequest) GetChangePeer() *ChangePeerRequest { + if m != nil { + return m.ChangePeer + } + return nil +} + +func (m *AdminRequest) GetCompactLog() *CompactLogRequest { + if m != nil { + return m.CompactLog + } + return nil +} + +func (m *AdminRequest) GetTransferLeader() *TransferLeaderRequest { + if m != nil { + return m.TransferLeader + } + return nil +} + +func (m *AdminRequest) GetSplit() *SplitRequest { + if m != nil { + return m.Split + } + return nil +} + +type AdminResponse struct { + CmdType AdminCmdType `protobuf:"varint,1,opt,name=cmd_type,json=cmdType,proto3,enum=raft_cmdpb.AdminCmdType" json:"cmd_type,omitempty"` + ChangePeer *ChangePeerResponse `protobuf:"bytes,2,opt,name=change_peer,json=changePeer" json:"change_peer,omitempty"` + CompactLog *CompactLogResponse `protobuf:"bytes,4,opt,name=compact_log,json=compactLog" json:"compact_log,omitempty"` + TransferLeader *TransferLeaderResponse `protobuf:"bytes,5,opt,name=transfer_leader,json=transferLeader" json:"transfer_leader,omitempty"` + Split *SplitResponse `protobuf:"bytes,10,opt,name=split" json:"split,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AdminResponse) Reset() { *m = AdminResponse{} } +func (m *AdminResponse) String() string { return proto.CompactTextString(m) } +func (*AdminResponse) ProtoMessage() {} +func (*AdminResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{19} +} +func (m *AdminResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdminResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AdminResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *AdminResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdminResponse.Merge(dst, src) +} +func (m *AdminResponse) XXX_Size() int { + return m.Size() +} +func (m *AdminResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AdminResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AdminResponse proto.InternalMessageInfo + +func (m *AdminResponse) GetCmdType() AdminCmdType { + if m != nil { + return m.CmdType + } + return AdminCmdType_InvalidAdmin +} + +func (m *AdminResponse) GetChangePeer() *ChangePeerResponse { + if m != nil { + return m.ChangePeer + } + return nil +} + +func (m *AdminResponse) GetCompactLog() *CompactLogResponse { + if m != nil { + return m.CompactLog + } + return nil +} + +func (m *AdminResponse) GetTransferLeader() *TransferLeaderResponse { + if m != nil { + return m.TransferLeader + } + return nil +} + +func (m *AdminResponse) GetSplit() *SplitResponse { + if m != nil { + return m.Split + } + return nil +} + +type RaftRequestHeader struct { + RegionId uint64 `protobuf:"varint,1,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` + Peer *metapb.Peer `protobuf:"bytes,2,opt,name=peer" json:"peer,omitempty"` + RegionEpoch *metapb.RegionEpoch `protobuf:"bytes,4,opt,name=region_epoch,json=regionEpoch" json:"region_epoch,omitempty"` + Term uint64 `protobuf:"varint,5,opt,name=term,proto3" json:"term,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RaftRequestHeader) Reset() { *m = RaftRequestHeader{} } +func (m *RaftRequestHeader) String() string { return proto.CompactTextString(m) } +func (*RaftRequestHeader) ProtoMessage() {} +func (*RaftRequestHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{20} +} +func (m *RaftRequestHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RaftRequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RaftRequestHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RaftRequestHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_RaftRequestHeader.Merge(dst, src) +} +func (m *RaftRequestHeader) XXX_Size() int { + return m.Size() +} +func (m *RaftRequestHeader) XXX_DiscardUnknown() { + xxx_messageInfo_RaftRequestHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_RaftRequestHeader proto.InternalMessageInfo + +func (m *RaftRequestHeader) GetRegionId() uint64 { + if m != nil { + return m.RegionId + } + return 0 +} + +func (m *RaftRequestHeader) GetPeer() *metapb.Peer { + if m != nil { + return m.Peer + } + return nil +} + +func (m *RaftRequestHeader) GetRegionEpoch() *metapb.RegionEpoch { + if m != nil { + return m.RegionEpoch + } + return nil +} + +func (m *RaftRequestHeader) GetTerm() uint64 { + if m != nil { + return m.Term + } + return 0 +} + +type RaftResponseHeader struct { + Error *errorpb.Error `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + Uuid []byte `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` + CurrentTerm uint64 `protobuf:"varint,3,opt,name=current_term,json=currentTerm,proto3" json:"current_term,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RaftResponseHeader) Reset() { *m = RaftResponseHeader{} } +func (m *RaftResponseHeader) String() string { return proto.CompactTextString(m) } +func (*RaftResponseHeader) ProtoMessage() {} +func (*RaftResponseHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{21} +} +func (m *RaftResponseHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RaftResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RaftResponseHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RaftResponseHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_RaftResponseHeader.Merge(dst, src) +} +func (m *RaftResponseHeader) XXX_Size() int { + return m.Size() +} +func (m *RaftResponseHeader) XXX_DiscardUnknown() { + xxx_messageInfo_RaftResponseHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_RaftResponseHeader proto.InternalMessageInfo + +func (m *RaftResponseHeader) GetError() *errorpb.Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *RaftResponseHeader) GetUuid() []byte { + if m != nil { + return m.Uuid + } + return nil +} + +func (m *RaftResponseHeader) GetCurrentTerm() uint64 { + if m != nil { + return m.CurrentTerm + } + return 0 +} + +type RaftCmdRequest struct { + Header *RaftRequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // We can't enclose normal requests and administrator request + // at same time. + Requests []*Request `protobuf:"bytes,2,rep,name=requests" json:"requests,omitempty"` + AdminRequest *AdminRequest `protobuf:"bytes,3,opt,name=admin_request,json=adminRequest" json:"admin_request,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RaftCmdRequest) Reset() { *m = RaftCmdRequest{} } +func (m *RaftCmdRequest) String() string { return proto.CompactTextString(m) } +func (*RaftCmdRequest) ProtoMessage() {} +func (*RaftCmdRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{22} +} +func (m *RaftCmdRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RaftCmdRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RaftCmdRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RaftCmdRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RaftCmdRequest.Merge(dst, src) +} +func (m *RaftCmdRequest) XXX_Size() int { + return m.Size() +} +func (m *RaftCmdRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RaftCmdRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RaftCmdRequest proto.InternalMessageInfo + +func (m *RaftCmdRequest) GetHeader() *RaftRequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *RaftCmdRequest) GetRequests() []*Request { + if m != nil { + return m.Requests + } + return nil +} + +func (m *RaftCmdRequest) GetAdminRequest() *AdminRequest { + if m != nil { + return m.AdminRequest + } + return nil +} + +type RaftCmdResponse struct { + Header *RaftResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Responses []*Response `protobuf:"bytes,2,rep,name=responses" json:"responses,omitempty"` + AdminResponse *AdminResponse `protobuf:"bytes,3,opt,name=admin_response,json=adminResponse" json:"admin_response,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RaftCmdResponse) Reset() { *m = RaftCmdResponse{} } +func (m *RaftCmdResponse) String() string { return proto.CompactTextString(m) } +func (*RaftCmdResponse) ProtoMessage() {} +func (*RaftCmdResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_cmdpb_7d27a800501f9188, []int{23} +} +func (m *RaftCmdResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RaftCmdResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RaftCmdResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RaftCmdResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RaftCmdResponse.Merge(dst, src) +} +func (m *RaftCmdResponse) XXX_Size() int { + return m.Size() +} +func (m *RaftCmdResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RaftCmdResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RaftCmdResponse proto.InternalMessageInfo + +func (m *RaftCmdResponse) GetHeader() *RaftResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *RaftCmdResponse) GetResponses() []*Response { + if m != nil { + return m.Responses + } + return nil +} + +func (m *RaftCmdResponse) GetAdminResponse() *AdminResponse { + if m != nil { + return m.AdminResponse + } + return nil +} + +func init() { + proto.RegisterType((*GetRequest)(nil), "raft_cmdpb.GetRequest") + proto.RegisterType((*GetResponse)(nil), "raft_cmdpb.GetResponse") + proto.RegisterType((*PutRequest)(nil), "raft_cmdpb.PutRequest") + proto.RegisterType((*PutResponse)(nil), "raft_cmdpb.PutResponse") + proto.RegisterType((*DeleteRequest)(nil), "raft_cmdpb.DeleteRequest") + proto.RegisterType((*DeleteResponse)(nil), "raft_cmdpb.DeleteResponse") + proto.RegisterType((*SnapRequest)(nil), "raft_cmdpb.SnapRequest") + proto.RegisterType((*SnapResponse)(nil), "raft_cmdpb.SnapResponse") + proto.RegisterType((*Request)(nil), "raft_cmdpb.Request") + proto.RegisterType((*Response)(nil), "raft_cmdpb.Response") + proto.RegisterType((*ChangePeerRequest)(nil), "raft_cmdpb.ChangePeerRequest") + proto.RegisterType((*ChangePeerResponse)(nil), "raft_cmdpb.ChangePeerResponse") + proto.RegisterType((*SplitRequest)(nil), "raft_cmdpb.SplitRequest") + proto.RegisterType((*SplitResponse)(nil), "raft_cmdpb.SplitResponse") + proto.RegisterType((*CompactLogRequest)(nil), "raft_cmdpb.CompactLogRequest") + proto.RegisterType((*CompactLogResponse)(nil), "raft_cmdpb.CompactLogResponse") + proto.RegisterType((*TransferLeaderRequest)(nil), "raft_cmdpb.TransferLeaderRequest") + proto.RegisterType((*TransferLeaderResponse)(nil), "raft_cmdpb.TransferLeaderResponse") + proto.RegisterType((*AdminRequest)(nil), "raft_cmdpb.AdminRequest") + proto.RegisterType((*AdminResponse)(nil), "raft_cmdpb.AdminResponse") + proto.RegisterType((*RaftRequestHeader)(nil), "raft_cmdpb.RaftRequestHeader") + proto.RegisterType((*RaftResponseHeader)(nil), "raft_cmdpb.RaftResponseHeader") + proto.RegisterType((*RaftCmdRequest)(nil), "raft_cmdpb.RaftCmdRequest") + proto.RegisterType((*RaftCmdResponse)(nil), "raft_cmdpb.RaftCmdResponse") + proto.RegisterEnum("raft_cmdpb.CmdType", CmdType_name, CmdType_value) + proto.RegisterEnum("raft_cmdpb.AdminCmdType", AdminCmdType_name, AdminCmdType_value) +} +func (m *GetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Cf) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(len(m.Cf))) + i += copy(dAtA[i:], m.Cf) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Value) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *PutRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Cf) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(len(m.Cf))) + i += copy(dAtA[i:], m.Cf) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *PutResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *DeleteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Cf) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(len(m.Cf))) + i += copy(dAtA[i:], m.Cf) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *DeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SnapRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SnapResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Region != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Region.Size())) + n1, err := m.Region.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.CmdType != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.CmdType)) + } + if m.Get != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Get.Size())) + n2, err := m.Get.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.Put != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Put.Size())) + n3, err := m.Put.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Delete != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Delete.Size())) + n4, err := m.Delete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Snap != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Snap.Size())) + n5, err := m.Snap.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Response) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.CmdType != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.CmdType)) + } + if m.Get != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Get.Size())) + n6, err := m.Get.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.Put != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Put.Size())) + n7, err := m.Put.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Delete != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Delete.Size())) + n8, err := m.Delete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Snap != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Snap.Size())) + n9, err := m.Snap.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ChangePeerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChangePeerRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ChangeType != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.ChangeType)) + } + if m.Peer != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Peer.Size())) + n10, err := m.Peer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ChangePeerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChangePeerResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Region != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Region.Size())) + n11, err := m.Region.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SplitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SplitRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SplitKey) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(len(m.SplitKey))) + i += copy(dAtA[i:], m.SplitKey) + } + if m.NewRegionId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.NewRegionId)) + } + if len(m.NewPeerIds) > 0 { + dAtA13 := make([]byte, len(m.NewPeerIds)*10) + var j12 int + for _, num := range m.NewPeerIds { + for num >= 1<<7 { + dAtA13[j12] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j12++ + } + dAtA13[j12] = uint8(num) + j12++ + } + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(j12)) + i += copy(dAtA[i:], dAtA13[:j12]) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SplitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SplitResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Regions) > 0 { + for _, msg := range m.Regions { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *CompactLogRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactLogRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.CompactIndex != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.CompactIndex)) + } + if m.CompactTerm != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.CompactTerm)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *CompactLogResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactLogResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *TransferLeaderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransferLeaderRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Peer != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Peer.Size())) + n14, err := m.Peer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *TransferLeaderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransferLeaderResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *AdminRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdminRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.CmdType != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.CmdType)) + } + if m.ChangePeer != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.ChangePeer.Size())) + n15, err := m.ChangePeer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.CompactLog != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.CompactLog.Size())) + n16, err := m.CompactLog.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.TransferLeader != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.TransferLeader.Size())) + n17, err := m.TransferLeader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.Split != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Split.Size())) + n18, err := m.Split.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *AdminResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdminResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.CmdType != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.CmdType)) + } + if m.ChangePeer != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.ChangePeer.Size())) + n19, err := m.ChangePeer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.CompactLog != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.CompactLog.Size())) + n20, err := m.CompactLog.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.TransferLeader != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.TransferLeader.Size())) + n21, err := m.TransferLeader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.Split != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Split.Size())) + n22, err := m.Split.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RaftRequestHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftRequestHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RegionId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.RegionId)) + } + if m.Peer != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Peer.Size())) + n23, err := m.Peer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.RegionEpoch != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.RegionEpoch.Size())) + n24, err := m.RegionEpoch.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.Term != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Term)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RaftResponseHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftResponseHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Error != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Error.Size())) + n25, err := m.Error.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + if len(m.Uuid) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(len(m.Uuid))) + i += copy(dAtA[i:], m.Uuid) + } + if m.CurrentTerm != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.CurrentTerm)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RaftCmdRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftCmdRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Header.Size())) + n26, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if len(m.Requests) > 0 { + for _, msg := range m.Requests { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.AdminRequest != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.AdminRequest.Size())) + n27, err := m.AdminRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RaftCmdResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftCmdResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.Header.Size())) + n28, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if len(m.Responses) > 0 { + for _, msg := range m.Responses { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.AdminResponse != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftCmdpb(dAtA, i, uint64(m.AdminResponse.Size())) + n29, err := m.AdminResponse.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintRaftCmdpb(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *GetRequest) Size() (n int) { + var l int + _ = l + l = len(m.Cf) + if l > 0 { + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetResponse) Size() (n int) { + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PutRequest) Size() (n int) { + var l int + _ = l + l = len(m.Cf) + if l > 0 { + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PutResponse) Size() (n int) { + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeleteRequest) Size() (n int) { + var l int + _ = l + l = len(m.Cf) + if l > 0 { + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeleteResponse) Size() (n int) { + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SnapRequest) Size() (n int) { + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SnapResponse) Size() (n int) { + var l int + _ = l + if m.Region != nil { + l = m.Region.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Request) Size() (n int) { + var l int + _ = l + if m.CmdType != 0 { + n += 1 + sovRaftCmdpb(uint64(m.CmdType)) + } + if m.Get != nil { + l = m.Get.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.Put != nil { + l = m.Put.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.Delete != nil { + l = m.Delete.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.Snap != nil { + l = m.Snap.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Response) Size() (n int) { + var l int + _ = l + if m.CmdType != 0 { + n += 1 + sovRaftCmdpb(uint64(m.CmdType)) + } + if m.Get != nil { + l = m.Get.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.Put != nil { + l = m.Put.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.Delete != nil { + l = m.Delete.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.Snap != nil { + l = m.Snap.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ChangePeerRequest) Size() (n int) { + var l int + _ = l + if m.ChangeType != 0 { + n += 1 + sovRaftCmdpb(uint64(m.ChangeType)) + } + if m.Peer != nil { + l = m.Peer.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ChangePeerResponse) Size() (n int) { + var l int + _ = l + if m.Region != nil { + l = m.Region.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SplitRequest) Size() (n int) { + var l int + _ = l + l = len(m.SplitKey) + if l > 0 { + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.NewRegionId != 0 { + n += 1 + sovRaftCmdpb(uint64(m.NewRegionId)) + } + if len(m.NewPeerIds) > 0 { + l = 0 + for _, e := range m.NewPeerIds { + l += sovRaftCmdpb(uint64(e)) + } + n += 1 + sovRaftCmdpb(uint64(l)) + l + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SplitResponse) Size() (n int) { + var l int + _ = l + if len(m.Regions) > 0 { + for _, e := range m.Regions { + l = e.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CompactLogRequest) Size() (n int) { + var l int + _ = l + if m.CompactIndex != 0 { + n += 1 + sovRaftCmdpb(uint64(m.CompactIndex)) + } + if m.CompactTerm != 0 { + n += 1 + sovRaftCmdpb(uint64(m.CompactTerm)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CompactLogResponse) Size() (n int) { + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TransferLeaderRequest) Size() (n int) { + var l int + _ = l + if m.Peer != nil { + l = m.Peer.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TransferLeaderResponse) Size() (n int) { + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AdminRequest) Size() (n int) { + var l int + _ = l + if m.CmdType != 0 { + n += 1 + sovRaftCmdpb(uint64(m.CmdType)) + } + if m.ChangePeer != nil { + l = m.ChangePeer.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.CompactLog != nil { + l = m.CompactLog.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.TransferLeader != nil { + l = m.TransferLeader.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.Split != nil { + l = m.Split.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AdminResponse) Size() (n int) { + var l int + _ = l + if m.CmdType != 0 { + n += 1 + sovRaftCmdpb(uint64(m.CmdType)) + } + if m.ChangePeer != nil { + l = m.ChangePeer.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.CompactLog != nil { + l = m.CompactLog.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.TransferLeader != nil { + l = m.TransferLeader.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.Split != nil { + l = m.Split.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RaftRequestHeader) Size() (n int) { + var l int + _ = l + if m.RegionId != 0 { + n += 1 + sovRaftCmdpb(uint64(m.RegionId)) + } + if m.Peer != nil { + l = m.Peer.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.RegionEpoch != nil { + l = m.RegionEpoch.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.Term != 0 { + n += 1 + sovRaftCmdpb(uint64(m.Term)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RaftResponseHeader) Size() (n int) { + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.CurrentTerm != 0 { + n += 1 + sovRaftCmdpb(uint64(m.CurrentTerm)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RaftCmdRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + } + if m.AdminRequest != nil { + l = m.AdminRequest.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RaftCmdResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if len(m.Responses) > 0 { + for _, e := range m.Responses { + l = e.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + } + if m.AdminResponse != nil { + l = m.AdminResponse.Size() + n += 1 + l + sovRaftCmdpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRaftCmdpb(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaftCmdpb(x uint64) (n int) { + return sovRaftCmdpb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cf = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PutRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cf = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PutResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cf = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Region == nil { + m.Region = &metapb.Region{} + } + if err := m.Region.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CmdType", wireType) + } + m.CmdType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CmdType |= (CmdType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Get", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Get == nil { + m.Get = &GetRequest{} + } + if err := m.Get.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Put == nil { + m.Put = &PutRequest{} + } + if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Delete == nil { + m.Delete = &DeleteRequest{} + } + if err := m.Delete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Snap == nil { + m.Snap = &SnapRequest{} + } + if err := m.Snap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Response: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CmdType", wireType) + } + m.CmdType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CmdType |= (CmdType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Get", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Get == nil { + m.Get = &GetResponse{} + } + if err := m.Get.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Put == nil { + m.Put = &PutResponse{} + } + if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Delete == nil { + m.Delete = &DeleteResponse{} + } + if err := m.Delete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Snap == nil { + m.Snap = &SnapResponse{} + } + if err := m.Snap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChangePeerRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChangePeerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChangePeerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ChangeType", wireType) + } + m.ChangeType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ChangeType |= (eraftpb.ConfChangeType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Peer == nil { + m.Peer = &metapb.Peer{} + } + if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChangePeerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChangePeerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChangePeerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Region == nil { + m.Region = &metapb.Region{} + } + if err := m.Region.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SplitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SplitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SplitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SplitKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SplitKey = append(m.SplitKey[:0], dAtA[iNdEx:postIndex]...) + if m.SplitKey == nil { + m.SplitKey = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NewRegionId", wireType) + } + m.NewRegionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NewRegionId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NewPeerIds = append(m.NewPeerIds, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NewPeerIds = append(m.NewPeerIds, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NewPeerIds", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SplitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SplitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SplitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Regions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Regions = append(m.Regions, &metapb.Region{}) + if err := m.Regions[len(m.Regions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactLogRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactLogRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactIndex", wireType) + } + m.CompactIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactTerm", wireType) + } + m.CompactTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactLogResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactLogResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactLogResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TransferLeaderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TransferLeaderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TransferLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Peer == nil { + m.Peer = &metapb.Peer{} + } + if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TransferLeaderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TransferLeaderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TransferLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdminRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdminRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdminRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CmdType", wireType) + } + m.CmdType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CmdType |= (AdminCmdType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChangePeer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ChangePeer == nil { + m.ChangePeer = &ChangePeerRequest{} + } + if err := m.ChangePeer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactLog", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompactLog == nil { + m.CompactLog = &CompactLogRequest{} + } + if err := m.CompactLog.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TransferLeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TransferLeader == nil { + m.TransferLeader = &TransferLeaderRequest{} + } + if err := m.TransferLeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Split", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Split == nil { + m.Split = &SplitRequest{} + } + if err := m.Split.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdminResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdminResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdminResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CmdType", wireType) + } + m.CmdType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CmdType |= (AdminCmdType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChangePeer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ChangePeer == nil { + m.ChangePeer = &ChangePeerResponse{} + } + if err := m.ChangePeer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactLog", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompactLog == nil { + m.CompactLog = &CompactLogResponse{} + } + if err := m.CompactLog.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TransferLeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TransferLeader == nil { + m.TransferLeader = &TransferLeaderResponse{} + } + if err := m.TransferLeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Split", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Split == nil { + m.Split = &SplitResponse{} + } + if err := m.Split.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RaftRequestHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftRequestHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftRequestHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType) + } + m.RegionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RegionId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Peer == nil { + m.Peer = &metapb.Peer{} + } + if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionEpoch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionEpoch == nil { + m.RegionEpoch = &metapb.RegionEpoch{} + } + if err := m.RegionEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RaftResponseHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftResponseHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &errorpb.Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = append(m.Uuid[:0], dAtA[iNdEx:postIndex]...) + if m.Uuid == nil { + m.Uuid = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentTerm", wireType) + } + m.CurrentTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RaftCmdRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftCmdRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftCmdRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RaftRequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, &Request{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AdminRequest == nil { + m.AdminRequest = &AdminRequest{} + } + if err := m.AdminRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RaftCmdResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftCmdResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftCmdResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RaftResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Responses = append(m.Responses, &Response{}) + if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftCmdpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AdminResponse == nil { + m.AdminResponse = &AdminResponse{} + } + if err := m.AdminResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftCmdpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftCmdpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaftCmdpb(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaftCmdpb + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftCmdpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaftCmdpb(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaftCmdpb = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaftCmdpb = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("raft_cmdpb.proto", fileDescriptor_raft_cmdpb_7d27a800501f9188) } + +var fileDescriptor_raft_cmdpb_7d27a800501f9188 = []byte{ + // 1068 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x0e, 0x45, 0xea, 0xc7, 0x43, 0x52, 0xa1, 0x37, 0x6e, 0x4c, 0x3b, 0xa8, 0xa0, 0x30, 0x45, + 0xe1, 0xa4, 0x85, 0x82, 0x38, 0xa8, 0xd1, 0x00, 0x6d, 0xd2, 0xd6, 0x09, 0x52, 0x37, 0x39, 0x18, + 0x1b, 0xdf, 0x7a, 0x20, 0x18, 0x72, 0x25, 0x0b, 0x95, 0x48, 0x9a, 0xa4, 0xe2, 0xfa, 0x4d, 0x7a, + 0xea, 0x6b, 0xf4, 0x98, 0x4b, 0x0f, 0x3d, 0xf6, 0x11, 0x0a, 0xf7, 0xdc, 0x4b, 0x9f, 0xa0, 0xd8, + 0xdd, 0x59, 0xfe, 0x48, 0x72, 0x9b, 0xf4, 0x24, 0xee, 0xec, 0xcc, 0xc7, 0x6f, 0xbe, 0x9d, 0x6f, + 0x45, 0x70, 0xb2, 0x60, 0x5c, 0xf8, 0xe1, 0x3c, 0x4a, 0x5f, 0x8f, 0xd2, 0x2c, 0x29, 0x12, 0x02, + 0x55, 0x64, 0xd7, 0x9a, 0xb3, 0x22, 0x50, 0x3b, 0xbb, 0x36, 0xcb, 0xb2, 0x24, 0xab, 0x2f, 0x83, + 0x71, 0xa1, 0x96, 0xde, 0x08, 0xe0, 0x39, 0x2b, 0x28, 0x3b, 0x5b, 0xb0, 0xbc, 0x20, 0x7d, 0x68, + 0x85, 0x63, 0x57, 0x1b, 0x6a, 0x7b, 0x1b, 0xb4, 0x15, 0x8e, 0x89, 0x03, 0xfa, 0x0f, 0xec, 0xc2, + 0x6d, 0x0d, 0xb5, 0x3d, 0x8b, 0xf2, 0x47, 0xef, 0x0e, 0x98, 0x22, 0x3f, 0x4f, 0x93, 0x38, 0x67, + 0x64, 0x0b, 0xda, 0x6f, 0x82, 0xd9, 0x82, 0x89, 0x1a, 0x8b, 0xca, 0x85, 0xf7, 0x14, 0xe0, 0x78, + 0xf1, 0xee, 0xa0, 0x15, 0x8a, 0x5e, 0x47, 0xb1, 0xc1, 0x14, 0x28, 0xf2, 0x55, 0xde, 0x03, 0xb0, + 0x9f, 0xb2, 0x19, 0x2b, 0xd8, 0xbb, 0x93, 0x75, 0xa0, 0xaf, 0x4a, 0x10, 0xc4, 0x06, 0xf3, 0x55, + 0x1c, 0xa4, 0x08, 0xe1, 0x1d, 0x80, 0x25, 0x97, 0xd8, 0xce, 0xc7, 0xd0, 0xc9, 0xd8, 0x64, 0x9a, + 0xc4, 0x02, 0xd6, 0xdc, 0xef, 0x8f, 0x50, 0x4a, 0x2a, 0xa2, 0x14, 0x77, 0xbd, 0xbf, 0x34, 0xe8, + 0x2a, 0x1a, 0x23, 0xe8, 0x85, 0xf3, 0xc8, 0x2f, 0x2e, 0x52, 0xa9, 0x42, 0x7f, 0xff, 0xc6, 0xa8, + 0x76, 0x3c, 0x87, 0xf3, 0xe8, 0xe4, 0x22, 0x65, 0xb4, 0x1b, 0xca, 0x07, 0xb2, 0x07, 0xfa, 0x84, + 0x15, 0x82, 0xa6, 0xb9, 0x7f, 0xb3, 0x9e, 0x5a, 0x1d, 0x04, 0xe5, 0x29, 0x3c, 0x33, 0x5d, 0x14, + 0xae, 0xb1, 0x9a, 0x59, 0xa9, 0x4b, 0x79, 0x0a, 0x79, 0x00, 0x9d, 0x48, 0x34, 0xea, 0xb6, 0x45, + 0xf2, 0x4e, 0x3d, 0xb9, 0xa1, 0x1a, 0xc5, 0x44, 0xf2, 0x09, 0x18, 0x79, 0x1c, 0xa4, 0x6e, 0x47, + 0x14, 0x6c, 0xd7, 0x0b, 0x6a, 0x0a, 0x51, 0x91, 0xe4, 0xfd, 0xad, 0x41, 0xaf, 0x14, 0xe9, 0x7d, + 0x1b, 0xbe, 0x5b, 0x6f, 0x78, 0x7b, 0xa5, 0x61, 0x89, 0x2a, 0x3b, 0xbe, 0x5b, 0xef, 0x78, 0x7b, + 0xa5, 0x63, 0x95, 0xca, 0x5b, 0xde, 0x5f, 0x6a, 0x79, 0x77, 0x5d, 0xcb, 0x58, 0xa0, 0x7a, 0xfe, + 0xb4, 0xd1, 0xb3, 0xbb, 0xda, 0x33, 0xe6, 0xcb, 0xa6, 0x13, 0xd8, 0x3c, 0x3c, 0x0d, 0xe2, 0x09, + 0x3b, 0x66, 0x2c, 0x53, 0xa7, 0xfd, 0x39, 0x98, 0xa1, 0x08, 0xd6, 0xfb, 0xdf, 0x1e, 0x29, 0x53, + 0x1d, 0x26, 0xf1, 0x58, 0x16, 0x09, 0x0d, 0x20, 0x2c, 0x9f, 0xc9, 0x10, 0x8c, 0x94, 0xb1, 0x0c, + 0x75, 0xb0, 0xd4, 0x64, 0x09, 0x70, 0xb1, 0xe3, 0x7d, 0x01, 0xa4, 0xfe, 0xc2, 0xf7, 0x9c, 0xc9, + 0x33, 0xb0, 0x5e, 0xa5, 0xb3, 0x69, 0x69, 0xbb, 0x5b, 0xb0, 0x91, 0xf3, 0xb5, 0xcf, 0x4d, 0x21, + 0xed, 0xd9, 0x13, 0x81, 0x17, 0xec, 0x82, 0x78, 0x60, 0xc7, 0xec, 0xdc, 0x97, 0xa5, 0xfe, 0x34, + 0x12, 0xac, 0x0c, 0x6a, 0xc6, 0xec, 0x5c, 0xc2, 0x1e, 0x45, 0x64, 0x08, 0x16, 0xcf, 0xe1, 0xd4, + 0xfc, 0x69, 0x94, 0xbb, 0xfa, 0x50, 0xdf, 0x33, 0x28, 0xc4, 0xec, 0x9c, 0xf3, 0x3b, 0x8a, 0x72, + 0xef, 0x11, 0xd8, 0xf8, 0x4a, 0xe4, 0xba, 0x07, 0x5d, 0x09, 0x99, 0xbb, 0xda, 0x50, 0x5f, 0x43, + 0x56, 0x6d, 0x7b, 0xdf, 0xc3, 0xe6, 0x61, 0x32, 0x4f, 0x83, 0xb0, 0x78, 0x99, 0x4c, 0x14, 0xe5, + 0x3b, 0x60, 0x87, 0x32, 0xe8, 0x4f, 0xe3, 0x88, 0xfd, 0x28, 0x68, 0x1b, 0xd4, 0xc2, 0xe0, 0x11, + 0x8f, 0x91, 0xdb, 0xa0, 0xd6, 0x7e, 0xc1, 0xb2, 0xb9, 0x62, 0x8e, 0xb1, 0x13, 0x96, 0xcd, 0xbd, + 0x2d, 0x20, 0x75, 0x70, 0xf4, 0xfe, 0x23, 0xf8, 0xe0, 0x24, 0x0b, 0xe2, 0x7c, 0xcc, 0xb2, 0x97, + 0x2c, 0x88, 0xaa, 0x33, 0x55, 0x27, 0xa3, 0x5d, 0x79, 0x32, 0x2e, 0xdc, 0x5c, 0x2e, 0x45, 0xd0, + 0xb7, 0x2d, 0xb0, 0xbe, 0x8e, 0xe6, 0xd3, 0x58, 0x81, 0x3d, 0x5c, 0x71, 0x47, 0x63, 0xce, 0x44, + 0xee, 0x8a, 0x45, 0x1e, 0x97, 0x53, 0x55, 0x1b, 0x91, 0x0f, 0x1b, 0xae, 0x5a, 0x9e, 0x44, 0x35, + 0x5b, 0x3c, 0x24, 0xea, 0x51, 0x93, 0x59, 0x32, 0x41, 0xff, 0x34, 0xeb, 0x97, 0xc5, 0xa6, 0x10, + 0x96, 0x21, 0xf2, 0x1d, 0x5c, 0x2f, 0xb0, 0x3f, 0x7f, 0x26, 0x1a, 0x44, 0x57, 0xdd, 0xae, 0x63, + 0xac, 0x55, 0x8f, 0xf6, 0x8b, 0x46, 0x98, 0x8c, 0xa0, 0x2d, 0xc6, 0xcc, 0x85, 0x35, 0x2e, 0xab, + 0x0d, 0x28, 0x95, 0x69, 0xde, 0xaf, 0x2d, 0xb0, 0x51, 0x41, 0x9c, 0xa2, 0xff, 0x25, 0xe1, 0x93, + 0x75, 0x12, 0x0e, 0xae, 0x92, 0x10, 0x8d, 0x5e, 0xd7, 0xf0, 0xc9, 0x3a, 0x0d, 0x07, 0x57, 0x69, + 0x58, 0x02, 0x54, 0x22, 0xbe, 0xb8, 0x4a, 0x44, 0xef, 0xdf, 0x44, 0x44, 0xa0, 0x65, 0x15, 0xef, + 0x37, 0x55, 0xdc, 0x59, 0xa3, 0x22, 0x56, 0xa2, 0x8c, 0x3f, 0x6b, 0xb0, 0x49, 0x83, 0xb1, 0x52, + 0xf7, 0x5b, 0x09, 0x73, 0x0b, 0x36, 0x2a, 0x8f, 0x4b, 0x37, 0xf5, 0xb2, 0xca, 0xe0, 0xff, 0x71, + 0x23, 0x91, 0x03, 0xb0, 0xb0, 0x9c, 0xa5, 0x49, 0x78, 0x8a, 0xa2, 0xdc, 0x68, 0x9a, 0xfa, 0x19, + 0xdf, 0xa2, 0x66, 0x56, 0x2d, 0x08, 0x01, 0x43, 0x78, 0xb3, 0x2d, 0xde, 0x28, 0x9e, 0xbd, 0x33, + 0x20, 0x92, 0x9f, 0xe4, 0x8d, 0x04, 0x3f, 0x82, 0xb6, 0xf8, 0x3e, 0x29, 0x2f, 0x37, 0xf5, 0xb5, + 0xf2, 0x8c, 0xff, 0x52, 0xb9, 0xc9, 0xf1, 0x16, 0x0b, 0xbc, 0xa5, 0x2c, 0x2a, 0x9e, 0xc5, 0x3d, + 0xb0, 0xc8, 0x32, 0x16, 0xe3, 0x3d, 0xa0, 0xe3, 0x3d, 0x20, 0x63, 0xe2, 0x1e, 0xf8, 0x45, 0x83, + 0x3e, 0x7f, 0xe7, 0xe1, 0x3c, 0x52, 0xf6, 0xfc, 0x0c, 0x3a, 0xa7, 0xf2, 0x6c, 0xb4, 0x55, 0x93, + 0xac, 0xe8, 0x47, 0x31, 0x99, 0xdc, 0x87, 0x5e, 0x26, 0x37, 0x72, 0xb7, 0x25, 0x6e, 0xb6, 0xc6, + 0x7f, 0x9e, 0x1a, 0xe9, 0x32, 0x89, 0x7c, 0x09, 0x76, 0xc0, 0xe7, 0xd4, 0xc7, 0x88, 0xa0, 0x67, + 0xae, 0x19, 0x64, 0x55, 0x6a, 0x05, 0xb5, 0x95, 0xf7, 0x56, 0x83, 0xeb, 0x25, 0x73, 0xb4, 0xc5, + 0xc1, 0x12, 0xf5, 0xc1, 0x2a, 0xf5, 0xba, 0xb4, 0x25, 0xf7, 0x7d, 0x3e, 0x03, 0x72, 0x47, 0x91, + 0xdf, 0x6a, 0x92, 0xc7, 0x49, 0xaa, 0xd2, 0xc8, 0x57, 0xd0, 0x57, 0xf4, 0x65, 0x08, 0xf9, 0xef, + 0xac, 0xe1, 0x8f, 0xd5, 0x76, 0x50, 0x5f, 0xde, 0x7b, 0x0c, 0x5d, 0xf4, 0x28, 0x31, 0xa1, 0x7b, + 0x14, 0xbf, 0x09, 0x66, 0xd3, 0xc8, 0xb9, 0x46, 0xba, 0xa0, 0x3f, 0x67, 0x85, 0xa3, 0xf1, 0x87, + 0xe3, 0x45, 0xe1, 0xe8, 0x04, 0xa0, 0x23, 0xff, 0xaf, 0x1d, 0x83, 0xf4, 0xc0, 0xe0, 0xff, 0xc4, + 0x4e, 0xfb, 0x9e, 0x8f, 0xf7, 0xaa, 0x02, 0x71, 0xc0, 0x42, 0x10, 0x11, 0x76, 0xae, 0x91, 0x3e, + 0x40, 0x65, 0x69, 0x47, 0x13, 0xeb, 0xd2, 0x8d, 0x8e, 0x4e, 0x08, 0xf4, 0x9b, 0x66, 0x73, 0x0c, + 0xb2, 0x01, 0x6d, 0xe1, 0x1e, 0x07, 0xbe, 0x71, 0x7e, 0xbb, 0x1c, 0x68, 0xbf, 0x5f, 0x0e, 0xb4, + 0x3f, 0x2e, 0x07, 0xda, 0x4f, 0x7f, 0x0e, 0xae, 0xbd, 0xee, 0x88, 0x4f, 0xe2, 0x87, 0xff, 0x04, + 0x00, 0x00, 0xff, 0xff, 0x26, 0xb1, 0x93, 0xfc, 0x5e, 0x0b, 0x00, 0x00, +} diff --git a/proto/pkg/raft_serverpb/raft_serverpb.pb.go b/proto/pkg/raft_serverpb/raft_serverpb.pb.go new file mode 100644 index 00000000..f2141222 --- /dev/null +++ b/proto/pkg/raft_serverpb/raft_serverpb.pb.go @@ -0,0 +1,3078 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: raft_serverpb.proto + +package raft_serverpb + +import ( + "fmt" + "io" + "math" + + proto "github.com/golang/protobuf/proto" + + eraftpb "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + metapb "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Normal indicates that this Peer is normal; +// Tombstone shows that this Peer has been removed from Region and cannot join in Raft Group. +type PeerState int32 + +const ( + PeerState_Normal PeerState = 0 + PeerState_Tombstone PeerState = 2 +) + +var PeerState_name = map[int32]string{ + 0: "Normal", + 2: "Tombstone", +} +var PeerState_value = map[string]int32{ + "Normal": 0, + "Tombstone": 2, +} + +func (x PeerState) String() string { + return proto.EnumName(PeerState_name, int32(x)) +} +func (PeerState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_raft_serverpb_af91ddafe2dcf6b5, []int{0} +} + +// The message sent between Raft peer, it wraps the raft meessage with some meta information. +type RaftMessage struct { + RegionId uint64 `protobuf:"varint,1,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` + FromPeer *metapb.Peer `protobuf:"bytes,2,opt,name=from_peer,json=fromPeer" json:"from_peer,omitempty"` + ToPeer *metapb.Peer `protobuf:"bytes,3,opt,name=to_peer,json=toPeer" json:"to_peer,omitempty"` + Message *eraftpb.Message `protobuf:"bytes,4,opt,name=message" json:"message,omitempty"` + RegionEpoch *metapb.RegionEpoch `protobuf:"bytes,5,opt,name=region_epoch,json=regionEpoch" json:"region_epoch,omitempty"` + // true means to_peer is a tombstone peer and it should remove itself. + IsTombstone bool `protobuf:"varint,6,opt,name=is_tombstone,json=isTombstone,proto3" json:"is_tombstone,omitempty"` + // Region key range [start_key, end_key). + StartKey []byte `protobuf:"bytes,7,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + EndKey []byte `protobuf:"bytes,8,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RaftMessage) Reset() { *m = RaftMessage{} } +func (m *RaftMessage) String() string { return proto.CompactTextString(m) } +func (*RaftMessage) ProtoMessage() {} +func (*RaftMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_serverpb_af91ddafe2dcf6b5, []int{0} +} +func (m *RaftMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RaftMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RaftMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RaftMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_RaftMessage.Merge(dst, src) +} +func (m *RaftMessage) XXX_Size() int { + return m.Size() +} +func (m *RaftMessage) XXX_DiscardUnknown() { + xxx_messageInfo_RaftMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_RaftMessage proto.InternalMessageInfo + +func (m *RaftMessage) GetRegionId() uint64 { + if m != nil { + return m.RegionId + } + return 0 +} + +func (m *RaftMessage) GetFromPeer() *metapb.Peer { + if m != nil { + return m.FromPeer + } + return nil +} + +func (m *RaftMessage) GetToPeer() *metapb.Peer { + if m != nil { + return m.ToPeer + } + return nil +} + +func (m *RaftMessage) GetMessage() *eraftpb.Message { + if m != nil { + return m.Message + } + return nil +} + +func (m *RaftMessage) GetRegionEpoch() *metapb.RegionEpoch { + if m != nil { + return m.RegionEpoch + } + return nil +} + +func (m *RaftMessage) GetIsTombstone() bool { + if m != nil { + return m.IsTombstone + } + return false +} + +func (m *RaftMessage) GetStartKey() []byte { + if m != nil { + return m.StartKey + } + return nil +} + +func (m *RaftMessage) GetEndKey() []byte { + if m != nil { + return m.EndKey + } + return nil +} + +// Used to store the persistent state for Raft, including the hard state for raft and the last index of the raft log. +type RaftLocalState struct { + HardState *eraftpb.HardState `protobuf:"bytes,1,opt,name=hard_state,json=hardState" json:"hard_state,omitempty"` + LastIndex uint64 `protobuf:"varint,2,opt,name=last_index,json=lastIndex,proto3" json:"last_index,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RaftLocalState) Reset() { *m = RaftLocalState{} } +func (m *RaftLocalState) String() string { return proto.CompactTextString(m) } +func (*RaftLocalState) ProtoMessage() {} +func (*RaftLocalState) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_serverpb_af91ddafe2dcf6b5, []int{1} +} +func (m *RaftLocalState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RaftLocalState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RaftLocalState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RaftLocalState) XXX_Merge(src proto.Message) { + xxx_messageInfo_RaftLocalState.Merge(dst, src) +} +func (m *RaftLocalState) XXX_Size() int { + return m.Size() +} +func (m *RaftLocalState) XXX_DiscardUnknown() { + xxx_messageInfo_RaftLocalState.DiscardUnknown(m) +} + +var xxx_messageInfo_RaftLocalState proto.InternalMessageInfo + +func (m *RaftLocalState) GetHardState() *eraftpb.HardState { + if m != nil { + return m.HardState + } + return nil +} + +func (m *RaftLocalState) GetLastIndex() uint64 { + if m != nil { + return m.LastIndex + } + return 0 +} + +// Used to store the persistent state for Raft state machine. +type RaftApplyState struct { + AppliedIndex uint64 `protobuf:"varint,1,opt,name=applied_index,json=appliedIndex,proto3" json:"applied_index,omitempty"` + TruncatedState *RaftTruncatedState `protobuf:"bytes,2,opt,name=truncated_state,json=truncatedState" json:"truncated_state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RaftApplyState) Reset() { *m = RaftApplyState{} } +func (m *RaftApplyState) String() string { return proto.CompactTextString(m) } +func (*RaftApplyState) ProtoMessage() {} +func (*RaftApplyState) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_serverpb_af91ddafe2dcf6b5, []int{2} +} +func (m *RaftApplyState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RaftApplyState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RaftApplyState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RaftApplyState) XXX_Merge(src proto.Message) { + xxx_messageInfo_RaftApplyState.Merge(dst, src) +} +func (m *RaftApplyState) XXX_Size() int { + return m.Size() +} +func (m *RaftApplyState) XXX_DiscardUnknown() { + xxx_messageInfo_RaftApplyState.DiscardUnknown(m) +} + +var xxx_messageInfo_RaftApplyState proto.InternalMessageInfo + +func (m *RaftApplyState) GetAppliedIndex() uint64 { + if m != nil { + return m.AppliedIndex + } + return 0 +} + +func (m *RaftApplyState) GetTruncatedState() *RaftTruncatedState { + if m != nil { + return m.TruncatedState + } + return nil +} + +// The truncated state for Raft log compaction. +type RaftTruncatedState struct { + Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Term uint64 `protobuf:"varint,2,opt,name=term,proto3" json:"term,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RaftTruncatedState) Reset() { *m = RaftTruncatedState{} } +func (m *RaftTruncatedState) String() string { return proto.CompactTextString(m) } +func (*RaftTruncatedState) ProtoMessage() {} +func (*RaftTruncatedState) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_serverpb_af91ddafe2dcf6b5, []int{3} +} +func (m *RaftTruncatedState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RaftTruncatedState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RaftTruncatedState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RaftTruncatedState) XXX_Merge(src proto.Message) { + xxx_messageInfo_RaftTruncatedState.Merge(dst, src) +} +func (m *RaftTruncatedState) XXX_Size() int { + return m.Size() +} +func (m *RaftTruncatedState) XXX_DiscardUnknown() { + xxx_messageInfo_RaftTruncatedState.DiscardUnknown(m) +} + +var xxx_messageInfo_RaftTruncatedState proto.InternalMessageInfo + +func (m *RaftTruncatedState) GetIndex() uint64 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *RaftTruncatedState) GetTerm() uint64 { + if m != nil { + return m.Term + } + return 0 +} + +// Used to store Region information and the corresponding Peer state on this Store. +type RegionLocalState struct { + State PeerState `protobuf:"varint,1,opt,name=state,proto3,enum=raft_serverpb.PeerState" json:"state,omitempty"` + Region *metapb.Region `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegionLocalState) Reset() { *m = RegionLocalState{} } +func (m *RegionLocalState) String() string { return proto.CompactTextString(m) } +func (*RegionLocalState) ProtoMessage() {} +func (*RegionLocalState) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_serverpb_af91ddafe2dcf6b5, []int{4} +} +func (m *RegionLocalState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RegionLocalState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RegionLocalState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RegionLocalState) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegionLocalState.Merge(dst, src) +} +func (m *RegionLocalState) XXX_Size() int { + return m.Size() +} +func (m *RegionLocalState) XXX_DiscardUnknown() { + xxx_messageInfo_RegionLocalState.DiscardUnknown(m) +} + +var xxx_messageInfo_RegionLocalState proto.InternalMessageInfo + +func (m *RegionLocalState) GetState() PeerState { + if m != nil { + return m.State + } + return PeerState_Normal +} + +func (m *RegionLocalState) GetRegion() *metapb.Region { + if m != nil { + return m.Region + } + return nil +} + +// The persistent identification for Store. +// It used to recover the store id after restart. +type StoreIdent struct { + ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + StoreId uint64 `protobuf:"varint,2,opt,name=store_id,json=storeId,proto3" json:"store_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StoreIdent) Reset() { *m = StoreIdent{} } +func (m *StoreIdent) String() string { return proto.CompactTextString(m) } +func (*StoreIdent) ProtoMessage() {} +func (*StoreIdent) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_serverpb_af91ddafe2dcf6b5, []int{5} +} +func (m *StoreIdent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreIdent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreIdent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *StoreIdent) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreIdent.Merge(dst, src) +} +func (m *StoreIdent) XXX_Size() int { + return m.Size() +} +func (m *StoreIdent) XXX_DiscardUnknown() { + xxx_messageInfo_StoreIdent.DiscardUnknown(m) +} + +var xxx_messageInfo_StoreIdent proto.InternalMessageInfo + +func (m *StoreIdent) GetClusterId() uint64 { + if m != nil { + return m.ClusterId + } + return 0 +} + +func (m *StoreIdent) GetStoreId() uint64 { + if m != nil { + return m.StoreId + } + return 0 +} + +// Snapshot sending and reciveing related messages. +// Not included in the course scope. +type KeyValue struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_serverpb_af91ddafe2dcf6b5, []int{6} +} +func (m *KeyValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *KeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyValue.Merge(dst, src) +} +func (m *KeyValue) XXX_Size() int { + return m.Size() +} +func (m *KeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_KeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyValue proto.InternalMessageInfo + +func (m *KeyValue) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *KeyValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type RaftSnapshotData struct { + Region *metapb.Region `protobuf:"bytes,1,opt,name=region" json:"region,omitempty"` + FileSize uint64 `protobuf:"varint,2,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"` + Data []*KeyValue `protobuf:"bytes,3,rep,name=data" json:"data,omitempty"` + Meta *SnapshotMeta `protobuf:"bytes,5,opt,name=meta" json:"meta,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RaftSnapshotData) Reset() { *m = RaftSnapshotData{} } +func (m *RaftSnapshotData) String() string { return proto.CompactTextString(m) } +func (*RaftSnapshotData) ProtoMessage() {} +func (*RaftSnapshotData) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_serverpb_af91ddafe2dcf6b5, []int{7} +} +func (m *RaftSnapshotData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RaftSnapshotData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RaftSnapshotData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RaftSnapshotData) XXX_Merge(src proto.Message) { + xxx_messageInfo_RaftSnapshotData.Merge(dst, src) +} +func (m *RaftSnapshotData) XXX_Size() int { + return m.Size() +} +func (m *RaftSnapshotData) XXX_DiscardUnknown() { + xxx_messageInfo_RaftSnapshotData.DiscardUnknown(m) +} + +var xxx_messageInfo_RaftSnapshotData proto.InternalMessageInfo + +func (m *RaftSnapshotData) GetRegion() *metapb.Region { + if m != nil { + return m.Region + } + return nil +} + +func (m *RaftSnapshotData) GetFileSize() uint64 { + if m != nil { + return m.FileSize + } + return 0 +} + +func (m *RaftSnapshotData) GetData() []*KeyValue { + if m != nil { + return m.Data + } + return nil +} + +func (m *RaftSnapshotData) GetMeta() *SnapshotMeta { + if m != nil { + return m.Meta + } + return nil +} + +type SnapshotCFFile struct { + Cf string `protobuf:"bytes,1,opt,name=cf,proto3" json:"cf,omitempty"` + Size_ uint64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` + Checksum uint32 `protobuf:"varint,3,opt,name=checksum,proto3" json:"checksum,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SnapshotCFFile) Reset() { *m = SnapshotCFFile{} } +func (m *SnapshotCFFile) String() string { return proto.CompactTextString(m) } +func (*SnapshotCFFile) ProtoMessage() {} +func (*SnapshotCFFile) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_serverpb_af91ddafe2dcf6b5, []int{8} +} +func (m *SnapshotCFFile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SnapshotCFFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SnapshotCFFile.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SnapshotCFFile) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapshotCFFile.Merge(dst, src) +} +func (m *SnapshotCFFile) XXX_Size() int { + return m.Size() +} +func (m *SnapshotCFFile) XXX_DiscardUnknown() { + xxx_messageInfo_SnapshotCFFile.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapshotCFFile proto.InternalMessageInfo + +func (m *SnapshotCFFile) GetCf() string { + if m != nil { + return m.Cf + } + return "" +} + +func (m *SnapshotCFFile) GetSize_() uint64 { + if m != nil { + return m.Size_ + } + return 0 +} + +func (m *SnapshotCFFile) GetChecksum() uint32 { + if m != nil { + return m.Checksum + } + return 0 +} + +type SnapshotMeta struct { + CfFiles []*SnapshotCFFile `protobuf:"bytes,1,rep,name=cf_files,json=cfFiles" json:"cf_files,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SnapshotMeta) Reset() { *m = SnapshotMeta{} } +func (m *SnapshotMeta) String() string { return proto.CompactTextString(m) } +func (*SnapshotMeta) ProtoMessage() {} +func (*SnapshotMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_serverpb_af91ddafe2dcf6b5, []int{9} +} +func (m *SnapshotMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SnapshotMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SnapshotMeta.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SnapshotMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapshotMeta.Merge(dst, src) +} +func (m *SnapshotMeta) XXX_Size() int { + return m.Size() +} +func (m *SnapshotMeta) XXX_DiscardUnknown() { + xxx_messageInfo_SnapshotMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapshotMeta proto.InternalMessageInfo + +func (m *SnapshotMeta) GetCfFiles() []*SnapshotCFFile { + if m != nil { + return m.CfFiles + } + return nil +} + +type SnapshotChunk struct { + Message *RaftMessage `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SnapshotChunk) Reset() { *m = SnapshotChunk{} } +func (m *SnapshotChunk) String() string { return proto.CompactTextString(m) } +func (*SnapshotChunk) ProtoMessage() {} +func (*SnapshotChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_serverpb_af91ddafe2dcf6b5, []int{10} +} +func (m *SnapshotChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SnapshotChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SnapshotChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapshotChunk.Merge(dst, src) +} +func (m *SnapshotChunk) XXX_Size() int { + return m.Size() +} +func (m *SnapshotChunk) XXX_DiscardUnknown() { + xxx_messageInfo_SnapshotChunk.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapshotChunk proto.InternalMessageInfo + +func (m *SnapshotChunk) GetMessage() *RaftMessage { + if m != nil { + return m.Message + } + return nil +} + +func (m *SnapshotChunk) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type Done struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Done) Reset() { *m = Done{} } +func (m *Done) String() string { return proto.CompactTextString(m) } +func (*Done) ProtoMessage() {} +func (*Done) Descriptor() ([]byte, []int) { + return fileDescriptor_raft_serverpb_af91ddafe2dcf6b5, []int{11} +} +func (m *Done) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Done) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Done.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Done) XXX_Merge(src proto.Message) { + xxx_messageInfo_Done.Merge(dst, src) +} +func (m *Done) XXX_Size() int { + return m.Size() +} +func (m *Done) XXX_DiscardUnknown() { + xxx_messageInfo_Done.DiscardUnknown(m) +} + +var xxx_messageInfo_Done proto.InternalMessageInfo + +func init() { + proto.RegisterType((*RaftMessage)(nil), "raft_serverpb.RaftMessage") + proto.RegisterType((*RaftLocalState)(nil), "raft_serverpb.RaftLocalState") + proto.RegisterType((*RaftApplyState)(nil), "raft_serverpb.RaftApplyState") + proto.RegisterType((*RaftTruncatedState)(nil), "raft_serverpb.RaftTruncatedState") + proto.RegisterType((*RegionLocalState)(nil), "raft_serverpb.RegionLocalState") + proto.RegisterType((*StoreIdent)(nil), "raft_serverpb.StoreIdent") + proto.RegisterType((*KeyValue)(nil), "raft_serverpb.KeyValue") + proto.RegisterType((*RaftSnapshotData)(nil), "raft_serverpb.RaftSnapshotData") + proto.RegisterType((*SnapshotCFFile)(nil), "raft_serverpb.SnapshotCFFile") + proto.RegisterType((*SnapshotMeta)(nil), "raft_serverpb.SnapshotMeta") + proto.RegisterType((*SnapshotChunk)(nil), "raft_serverpb.SnapshotChunk") + proto.RegisterType((*Done)(nil), "raft_serverpb.Done") + proto.RegisterEnum("raft_serverpb.PeerState", PeerState_name, PeerState_value) +} +func (m *RaftMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RegionId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.RegionId)) + } + if m.FromPeer != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.FromPeer.Size())) + n1, err := m.FromPeer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.ToPeer != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.ToPeer.Size())) + n2, err := m.ToPeer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.Message != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.Message.Size())) + n3, err := m.Message.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.RegionEpoch != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.RegionEpoch.Size())) + n4, err := m.RegionEpoch.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.IsTombstone { + dAtA[i] = 0x30 + i++ + if m.IsTombstone { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.StartKey) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(len(m.StartKey))) + i += copy(dAtA[i:], m.StartKey) + } + if len(m.EndKey) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(len(m.EndKey))) + i += copy(dAtA[i:], m.EndKey) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RaftLocalState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftLocalState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.HardState != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.HardState.Size())) + n5, err := m.HardState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.LastIndex != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.LastIndex)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RaftApplyState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftApplyState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.AppliedIndex != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.AppliedIndex)) + } + if m.TruncatedState != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.TruncatedState.Size())) + n6, err := m.TruncatedState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RaftTruncatedState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftTruncatedState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Index != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.Index)) + } + if m.Term != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.Term)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RegionLocalState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegionLocalState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.State != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.State)) + } + if m.Region != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.Region.Size())) + n7, err := m.Region.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *StoreIdent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreIdent) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ClusterId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.ClusterId)) + } + if m.StoreId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.StoreId)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *KeyValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RaftSnapshotData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftSnapshotData) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Region != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.Region.Size())) + n8, err := m.Region.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.FileSize != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.FileSize)) + } + if len(m.Data) > 0 { + for _, msg := range m.Data { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Meta != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.Meta.Size())) + n9, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SnapshotCFFile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotCFFile) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Cf) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(len(m.Cf))) + i += copy(dAtA[i:], m.Cf) + } + if m.Size_ != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.Size_)) + } + if m.Checksum != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.Checksum)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SnapshotMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotMeta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CfFiles) > 0 { + for _, msg := range m.CfFiles { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SnapshotChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Message != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(m.Message.Size())) + n10, err := m.Message.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftServerpb(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Done) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Done) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintRaftServerpb(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *RaftMessage) Size() (n int) { + var l int + _ = l + if m.RegionId != 0 { + n += 1 + sovRaftServerpb(uint64(m.RegionId)) + } + if m.FromPeer != nil { + l = m.FromPeer.Size() + n += 1 + l + sovRaftServerpb(uint64(l)) + } + if m.ToPeer != nil { + l = m.ToPeer.Size() + n += 1 + l + sovRaftServerpb(uint64(l)) + } + if m.Message != nil { + l = m.Message.Size() + n += 1 + l + sovRaftServerpb(uint64(l)) + } + if m.RegionEpoch != nil { + l = m.RegionEpoch.Size() + n += 1 + l + sovRaftServerpb(uint64(l)) + } + if m.IsTombstone { + n += 2 + } + l = len(m.StartKey) + if l > 0 { + n += 1 + l + sovRaftServerpb(uint64(l)) + } + l = len(m.EndKey) + if l > 0 { + n += 1 + l + sovRaftServerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RaftLocalState) Size() (n int) { + var l int + _ = l + if m.HardState != nil { + l = m.HardState.Size() + n += 1 + l + sovRaftServerpb(uint64(l)) + } + if m.LastIndex != 0 { + n += 1 + sovRaftServerpb(uint64(m.LastIndex)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RaftApplyState) Size() (n int) { + var l int + _ = l + if m.AppliedIndex != 0 { + n += 1 + sovRaftServerpb(uint64(m.AppliedIndex)) + } + if m.TruncatedState != nil { + l = m.TruncatedState.Size() + n += 1 + l + sovRaftServerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RaftTruncatedState) Size() (n int) { + var l int + _ = l + if m.Index != 0 { + n += 1 + sovRaftServerpb(uint64(m.Index)) + } + if m.Term != 0 { + n += 1 + sovRaftServerpb(uint64(m.Term)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RegionLocalState) Size() (n int) { + var l int + _ = l + if m.State != 0 { + n += 1 + sovRaftServerpb(uint64(m.State)) + } + if m.Region != nil { + l = m.Region.Size() + n += 1 + l + sovRaftServerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StoreIdent) Size() (n int) { + var l int + _ = l + if m.ClusterId != 0 { + n += 1 + sovRaftServerpb(uint64(m.ClusterId)) + } + if m.StoreId != 0 { + n += 1 + sovRaftServerpb(uint64(m.StoreId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *KeyValue) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRaftServerpb(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovRaftServerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RaftSnapshotData) Size() (n int) { + var l int + _ = l + if m.Region != nil { + l = m.Region.Size() + n += 1 + l + sovRaftServerpb(uint64(l)) + } + if m.FileSize != 0 { + n += 1 + sovRaftServerpb(uint64(m.FileSize)) + } + if len(m.Data) > 0 { + for _, e := range m.Data { + l = e.Size() + n += 1 + l + sovRaftServerpb(uint64(l)) + } + } + if m.Meta != nil { + l = m.Meta.Size() + n += 1 + l + sovRaftServerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SnapshotCFFile) Size() (n int) { + var l int + _ = l + l = len(m.Cf) + if l > 0 { + n += 1 + l + sovRaftServerpb(uint64(l)) + } + if m.Size_ != 0 { + n += 1 + sovRaftServerpb(uint64(m.Size_)) + } + if m.Checksum != 0 { + n += 1 + sovRaftServerpb(uint64(m.Checksum)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SnapshotMeta) Size() (n int) { + var l int + _ = l + if len(m.CfFiles) > 0 { + for _, e := range m.CfFiles { + l = e.Size() + n += 1 + l + sovRaftServerpb(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SnapshotChunk) Size() (n int) { + var l int + _ = l + if m.Message != nil { + l = m.Message.Size() + n += 1 + l + sovRaftServerpb(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovRaftServerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Done) Size() (n int) { + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRaftServerpb(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaftServerpb(x uint64) (n int) { + return sovRaftServerpb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RaftMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType) + } + m.RegionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RegionId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromPeer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FromPeer == nil { + m.FromPeer = &metapb.Peer{} + } + if err := m.FromPeer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ToPeer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ToPeer == nil { + m.ToPeer = &metapb.Peer{} + } + if err := m.ToPeer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Message == nil { + m.Message = &eraftpb.Message{} + } + if err := m.Message.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionEpoch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionEpoch == nil { + m.RegionEpoch = &metapb.RegionEpoch{} + } + if err := m.RegionEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsTombstone", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IsTombstone = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...) + if m.StartKey == nil { + m.StartKey = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...) + if m.EndKey == nil { + m.EndKey = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftServerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftServerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RaftLocalState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftLocalState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftLocalState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HardState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HardState == nil { + m.HardState = &eraftpb.HardState{} + } + if err := m.HardState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastIndex", wireType) + } + m.LastIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaftServerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftServerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RaftApplyState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftApplyState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftApplyState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AppliedIndex", wireType) + } + m.AppliedIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AppliedIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TruncatedState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TruncatedState == nil { + m.TruncatedState = &RaftTruncatedState{} + } + if err := m.TruncatedState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftServerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftServerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RaftTruncatedState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftTruncatedState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftTruncatedState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaftServerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftServerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RegionLocalState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RegionLocalState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RegionLocalState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (PeerState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Region == nil { + m.Region = &metapb.Region{} + } + if err := m.Region.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftServerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftServerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreIdent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreIdent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreIdent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + } + m.ClusterId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClusterId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StoreId", wireType) + } + m.StoreId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StoreId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaftServerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftServerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftServerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftServerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RaftSnapshotData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftSnapshotData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftSnapshotData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Region == nil { + m.Region = &metapb.Region{} + } + if err := m.Region.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FileSize", wireType) + } + m.FileSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FileSize |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data, &KeyValue{}) + if err := m.Data[len(m.Data)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Meta == nil { + m.Meta = &SnapshotMeta{} + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftServerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftServerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotCFFile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotCFFile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotCFFile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cf = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType) + } + m.Checksum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Checksum |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaftServerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftServerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CfFiles", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CfFiles = append(m.CfFiles, &SnapshotCFFile{}) + if err := m.CfFiles[len(m.CfFiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftServerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftServerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotChunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Message == nil { + m.Message = &RaftMessage{} + } + if err := m.Message.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaftServerpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftServerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftServerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Done) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Done: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Done: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaftServerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftServerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaftServerpb(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaftServerpb + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftServerpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaftServerpb(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaftServerpb = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaftServerpb = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("raft_serverpb.proto", fileDescriptor_raft_serverpb_af91ddafe2dcf6b5) } + +var fileDescriptor_raft_serverpb_af91ddafe2dcf6b5 = []byte{ + // 736 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xdd, 0x6e, 0xf3, 0x44, + 0x10, 0xfd, 0x9c, 0xa4, 0x89, 0x3d, 0x71, 0x42, 0xb4, 0x1f, 0x52, 0x4d, 0xaa, 0x46, 0xa9, 0x11, + 0x55, 0x28, 0x52, 0x10, 0x01, 0x21, 0xae, 0x90, 0x80, 0x52, 0x35, 0x94, 0xa2, 0x6a, 0x53, 0x21, + 0x71, 0x65, 0x6d, 0xec, 0x71, 0x63, 0xea, 0xd8, 0xd6, 0xee, 0xa6, 0x22, 0xbd, 0xe3, 0x2d, 0x78, + 0x11, 0x2e, 0x78, 0x03, 0x2e, 0x79, 0x04, 0x54, 0x5e, 0x04, 0xed, 0xae, 0x9d, 0x9f, 0xb6, 0x70, + 0xe5, 0xf9, 0x39, 0xb3, 0x7b, 0xe6, 0xcc, 0x78, 0xe1, 0x2d, 0x67, 0xb1, 0x0c, 0x04, 0xf2, 0x07, + 0xe4, 0xc5, 0x7c, 0x5c, 0xf0, 0x5c, 0xe6, 0xa4, 0xb3, 0x17, 0xec, 0x77, 0x50, 0xf9, 0x55, 0xb6, + 0xef, 0x2e, 0x51, 0xb2, 0xca, 0xf3, 0xff, 0xa8, 0x41, 0x9b, 0xb2, 0x58, 0x5e, 0xa3, 0x10, 0xec, + 0x0e, 0xc9, 0x11, 0x38, 0x1c, 0xef, 0x92, 0x3c, 0x0b, 0x92, 0xc8, 0xb3, 0x86, 0xd6, 0xa8, 0x41, + 0x6d, 0x13, 0x98, 0x46, 0xe4, 0x43, 0x70, 0x62, 0x9e, 0x2f, 0x83, 0x02, 0x91, 0x7b, 0xb5, 0xa1, + 0x35, 0x6a, 0x4f, 0xdc, 0x71, 0x79, 0xdc, 0x0d, 0x22, 0xa7, 0xb6, 0x4a, 0x2b, 0x8b, 0x7c, 0x00, + 0x2d, 0x99, 0x1b, 0x60, 0xfd, 0x15, 0x60, 0x53, 0xe6, 0x1a, 0x76, 0x06, 0xad, 0xa5, 0xb9, 0xd9, + 0x6b, 0x68, 0x58, 0x6f, 0x5c, 0xb1, 0x2d, 0x19, 0xd1, 0x0a, 0x40, 0x3e, 0x07, 0xb7, 0xa4, 0x86, + 0x45, 0x1e, 0x2e, 0xbc, 0x03, 0x5d, 0xf0, 0xb6, 0x3a, 0x97, 0xea, 0xdc, 0xb7, 0x2a, 0x45, 0xdb, + 0x7c, 0xeb, 0x90, 0x13, 0x70, 0x13, 0x11, 0xc8, 0x7c, 0x39, 0x17, 0x32, 0xcf, 0xd0, 0x6b, 0x0e, + 0xad, 0x91, 0x4d, 0xdb, 0x89, 0xb8, 0xad, 0x42, 0xaa, 0x6b, 0x21, 0x19, 0x97, 0xc1, 0x3d, 0xae, + 0xbd, 0xd6, 0xd0, 0x1a, 0xb9, 0xd4, 0xd6, 0x81, 0x2b, 0x5c, 0x93, 0x43, 0x68, 0x61, 0x16, 0xe9, + 0x94, 0xad, 0x53, 0x4d, 0xcc, 0xa2, 0x2b, 0x5c, 0xfb, 0x73, 0xe8, 0x2a, 0xe9, 0xbe, 0xcf, 0x43, + 0x96, 0xce, 0x24, 0x93, 0x48, 0x3e, 0x01, 0x58, 0x30, 0x1e, 0x05, 0x42, 0x79, 0x5a, 0xbe, 0xf6, + 0x84, 0x6c, 0x3a, 0xba, 0x64, 0x3c, 0xd2, 0x38, 0xea, 0x2c, 0x2a, 0x93, 0x1c, 0x03, 0xa4, 0x4c, + 0xc8, 0x20, 0xc9, 0x22, 0xfc, 0x45, 0x8b, 0xda, 0xa0, 0x8e, 0x8a, 0x4c, 0x55, 0xc0, 0xff, 0xd5, + 0x32, 0x97, 0x7c, 0x55, 0x14, 0xe9, 0xda, 0x54, 0xbc, 0x0f, 0x1d, 0x56, 0x14, 0x69, 0x82, 0x51, + 0x59, 0x64, 0xc6, 0xe4, 0x96, 0x41, 0x5d, 0x47, 0xbe, 0x83, 0x77, 0x24, 0x5f, 0x65, 0x21, 0x93, + 0x58, 0xd1, 0x31, 0x03, 0x3b, 0x19, 0xef, 0xaf, 0x8c, 0x3a, 0xfc, 0xb6, 0x42, 0x1a, 0x76, 0x5d, + 0xb9, 0xe7, 0xfb, 0x5f, 0x02, 0x79, 0x89, 0x22, 0xef, 0xc2, 0xc1, 0xee, 0xf5, 0xc6, 0x21, 0x04, + 0x1a, 0x12, 0xf9, 0xb2, 0x6c, 0x44, 0xdb, 0xfe, 0xcf, 0xd0, 0x33, 0xc3, 0xd9, 0x51, 0x6a, 0x0c, + 0x07, 0x5b, 0x91, 0xba, 0x13, 0xef, 0x19, 0x2b, 0xb5, 0x1c, 0x86, 0x8c, 0x81, 0x91, 0x53, 0x68, + 0x9a, 0x99, 0x96, 0x6d, 0x74, 0xf7, 0xc7, 0x4e, 0xcb, 0xac, 0x7f, 0x01, 0x30, 0x93, 0x39, 0xc7, + 0x69, 0x84, 0x99, 0x54, 0xe2, 0x86, 0xe9, 0x4a, 0x48, 0xe4, 0xdb, 0x75, 0x76, 0xca, 0xc8, 0x34, + 0x22, 0xef, 0x81, 0x2d, 0x14, 0x58, 0x25, 0x0d, 0xe1, 0x96, 0x30, 0xc5, 0xfe, 0x04, 0xec, 0x2b, + 0x5c, 0xff, 0xc8, 0xd2, 0x15, 0x92, 0x1e, 0xd4, 0xd5, 0xf0, 0x2d, 0x3d, 0x7c, 0x65, 0xaa, 0xde, + 0x1f, 0x54, 0x4a, 0x57, 0xb9, 0xd4, 0x38, 0xfe, 0xef, 0x16, 0xf4, 0x94, 0x50, 0xb3, 0x8c, 0x15, + 0x62, 0x91, 0xcb, 0x73, 0x26, 0xd9, 0x0e, 0x71, 0xeb, 0xff, 0x88, 0xab, 0x15, 0x8c, 0x93, 0x14, + 0x03, 0x91, 0x3c, 0x62, 0x49, 0xc6, 0x56, 0x81, 0x59, 0xf2, 0x88, 0xe4, 0x23, 0x68, 0x44, 0x4c, + 0x32, 0xaf, 0x3e, 0xac, 0x8f, 0xda, 0x93, 0xc3, 0x67, 0x62, 0x55, 0x44, 0xa9, 0x06, 0x91, 0x8f, + 0xa1, 0xa1, 0xae, 0x28, 0xff, 0x8f, 0xa3, 0x67, 0xe0, 0x8a, 0xdc, 0x35, 0x4a, 0x46, 0x35, 0xd0, + 0xbf, 0x81, 0x6e, 0x15, 0xfd, 0xe6, 0xe2, 0x22, 0x49, 0x91, 0x74, 0xa1, 0x16, 0xc6, 0x9a, 0xb0, + 0x43, 0x6b, 0x61, 0xac, 0xa6, 0xba, 0xc3, 0x4b, 0xdb, 0xa4, 0x0f, 0x76, 0xb8, 0xc0, 0xf0, 0x5e, + 0xac, 0x96, 0xfa, 0x17, 0xef, 0xd0, 0x8d, 0xef, 0x5f, 0x82, 0xbb, 0x7b, 0x0f, 0xf9, 0x02, 0xec, + 0x30, 0x0e, 0x54, 0x3b, 0xc2, 0xb3, 0x74, 0x0f, 0xc7, 0xff, 0x41, 0xcb, 0x10, 0xa0, 0xad, 0x30, + 0x56, 0x5f, 0xe1, 0xff, 0x04, 0x9d, 0x4d, 0x6a, 0xb1, 0xca, 0xee, 0xc9, 0x67, 0xdb, 0x17, 0xc3, + 0x08, 0xda, 0x7f, 0x65, 0xa1, 0x5f, 0xbc, 0x1d, 0xa4, 0x14, 0xd0, 0xcc, 0x4b, 0xdb, 0x7e, 0x13, + 0x1a, 0xe7, 0x79, 0x86, 0x67, 0xa7, 0xe0, 0x6c, 0xd6, 0x8d, 0x00, 0x34, 0x7f, 0xc8, 0xf9, 0x92, + 0xa5, 0xbd, 0x37, 0xa4, 0x03, 0xce, 0xe6, 0x89, 0xe8, 0xd5, 0xbe, 0xee, 0xfd, 0xf9, 0x34, 0xb0, + 0xfe, 0x7a, 0x1a, 0x58, 0x7f, 0x3f, 0x0d, 0xac, 0xdf, 0xfe, 0x19, 0xbc, 0x99, 0x37, 0xf5, 0x1b, + 0xfa, 0xe9, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9d, 0xbd, 0x6c, 0x53, 0x86, 0x05, 0x00, 0x00, +} diff --git a/proto/pkg/schedulerpb/schedulerpb.pb.go b/proto/pkg/schedulerpb/schedulerpb.pb.go new file mode 100644 index 00000000..3e3facfd --- /dev/null +++ b/proto/pkg/schedulerpb/schedulerpb.pb.go @@ -0,0 +1,13955 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: schedulerpb.proto + +package schedulerpb + +import ( + "fmt" + "io" + "math" + + proto "github.com/golang/protobuf/proto" + + _ "github.com/gogo/protobuf/gogoproto" + + eraftpb "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + + metapb "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + + context "golang.org/x/net/context" + + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ErrorType int32 + +const ( + ErrorType_OK ErrorType = 0 + ErrorType_UNKNOWN ErrorType = 1 + ErrorType_NOT_BOOTSTRAPPED ErrorType = 2 + ErrorType_STORE_TOMBSTONE ErrorType = 3 + ErrorType_ALREADY_BOOTSTRAPPED ErrorType = 4 + ErrorType_INCOMPATIBLE_VERSION ErrorType = 5 + ErrorType_REGION_NOT_FOUND ErrorType = 6 +) + +var ErrorType_name = map[int32]string{ + 0: "OK", + 1: "UNKNOWN", + 2: "NOT_BOOTSTRAPPED", + 3: "STORE_TOMBSTONE", + 4: "ALREADY_BOOTSTRAPPED", + 5: "INCOMPATIBLE_VERSION", + 6: "REGION_NOT_FOUND", +} +var ErrorType_value = map[string]int32{ + "OK": 0, + "UNKNOWN": 1, + "NOT_BOOTSTRAPPED": 2, + "STORE_TOMBSTONE": 3, + "ALREADY_BOOTSTRAPPED": 4, + "INCOMPATIBLE_VERSION": 5, + "REGION_NOT_FOUND": 6, +} + +func (x ErrorType) String() string { + return proto.EnumName(ErrorType_name, int32(x)) +} +func (ErrorType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{0} +} + +type OperatorStatus int32 + +const ( + OperatorStatus_SUCCESS OperatorStatus = 0 + OperatorStatus_TIMEOUT OperatorStatus = 1 + OperatorStatus_CANCEL OperatorStatus = 2 + OperatorStatus_REPLACE OperatorStatus = 3 + OperatorStatus_RUNNING OperatorStatus = 4 +) + +var OperatorStatus_name = map[int32]string{ + 0: "SUCCESS", + 1: "TIMEOUT", + 2: "CANCEL", + 3: "REPLACE", + 4: "RUNNING", +} +var OperatorStatus_value = map[string]int32{ + "SUCCESS": 0, + "TIMEOUT": 1, + "CANCEL": 2, + "REPLACE": 3, + "RUNNING": 4, +} + +func (x OperatorStatus) String() string { + return proto.EnumName(OperatorStatus_name, int32(x)) +} +func (OperatorStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{1} +} + +type RequestHeader struct { + // cluster_id is the ID of the cluster which be sent to. + ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestHeader) Reset() { *m = RequestHeader{} } +func (m *RequestHeader) String() string { return proto.CompactTextString(m) } +func (*RequestHeader) ProtoMessage() {} +func (*RequestHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{0} +} +func (m *RequestHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RequestHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestHeader.Merge(dst, src) +} +func (m *RequestHeader) XXX_Size() int { + return m.Size() +} +func (m *RequestHeader) XXX_DiscardUnknown() { + xxx_messageInfo_RequestHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestHeader proto.InternalMessageInfo + +func (m *RequestHeader) GetClusterId() uint64 { + if m != nil { + return m.ClusterId + } + return 0 +} + +type ResponseHeader struct { + // cluster_id is the ID of the cluster which sent the response. + ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Error *Error `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } +func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } +func (*ResponseHeader) ProtoMessage() {} +func (*ResponseHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{1} +} +func (m *ResponseHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ResponseHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseHeader.Merge(dst, src) +} +func (m *ResponseHeader) XXX_Size() int { + return m.Size() +} +func (m *ResponseHeader) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseHeader proto.InternalMessageInfo + +func (m *ResponseHeader) GetClusterId() uint64 { + if m != nil { + return m.ClusterId + } + return 0 +} + +func (m *ResponseHeader) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +type Error struct { + Type ErrorType `protobuf:"varint,1,opt,name=type,proto3,enum=schedulerpb.ErrorType" json:"type,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} +func (*Error) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{2} +} +func (m *Error) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Error.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Error) XXX_Merge(src proto.Message) { + xxx_messageInfo_Error.Merge(dst, src) +} +func (m *Error) XXX_Size() int { + return m.Size() +} +func (m *Error) XXX_DiscardUnknown() { + xxx_messageInfo_Error.DiscardUnknown(m) +} + +var xxx_messageInfo_Error proto.InternalMessageInfo + +func (m *Error) GetType() ErrorType { + if m != nil { + return m.Type + } + return ErrorType_OK +} + +func (m *Error) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +type TsoRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TsoRequest) Reset() { *m = TsoRequest{} } +func (m *TsoRequest) String() string { return proto.CompactTextString(m) } +func (*TsoRequest) ProtoMessage() {} +func (*TsoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{3} +} +func (m *TsoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TsoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TsoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TsoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TsoRequest.Merge(dst, src) +} +func (m *TsoRequest) XXX_Size() int { + return m.Size() +} +func (m *TsoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TsoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TsoRequest proto.InternalMessageInfo + +func (m *TsoRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *TsoRequest) GetCount() uint32 { + if m != nil { + return m.Count + } + return 0 +} + +type Timestamp struct { + Physical int64 `protobuf:"varint,1,opt,name=physical,proto3" json:"physical,omitempty"` + Logical int64 `protobuf:"varint,2,opt,name=logical,proto3" json:"logical,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{4} +} +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return m.Size() +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *Timestamp) GetPhysical() int64 { + if m != nil { + return m.Physical + } + return 0 +} + +func (m *Timestamp) GetLogical() int64 { + if m != nil { + return m.Logical + } + return 0 +} + +type TsoResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + Timestamp *Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TsoResponse) Reset() { *m = TsoResponse{} } +func (m *TsoResponse) String() string { return proto.CompactTextString(m) } +func (*TsoResponse) ProtoMessage() {} +func (*TsoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{5} +} +func (m *TsoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TsoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TsoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TsoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TsoResponse.Merge(dst, src) +} +func (m *TsoResponse) XXX_Size() int { + return m.Size() +} +func (m *TsoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TsoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TsoResponse proto.InternalMessageInfo + +func (m *TsoResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *TsoResponse) GetCount() uint32 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *TsoResponse) GetTimestamp() *Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +type BootstrapRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Store *metapb.Store `protobuf:"bytes,2,opt,name=store" json:"store,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BootstrapRequest) Reset() { *m = BootstrapRequest{} } +func (m *BootstrapRequest) String() string { return proto.CompactTextString(m) } +func (*BootstrapRequest) ProtoMessage() {} +func (*BootstrapRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{6} +} +func (m *BootstrapRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BootstrapRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BootstrapRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *BootstrapRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BootstrapRequest.Merge(dst, src) +} +func (m *BootstrapRequest) XXX_Size() int { + return m.Size() +} +func (m *BootstrapRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BootstrapRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BootstrapRequest proto.InternalMessageInfo + +func (m *BootstrapRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *BootstrapRequest) GetStore() *metapb.Store { + if m != nil { + return m.Store + } + return nil +} + +type BootstrapResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BootstrapResponse) Reset() { *m = BootstrapResponse{} } +func (m *BootstrapResponse) String() string { return proto.CompactTextString(m) } +func (*BootstrapResponse) ProtoMessage() {} +func (*BootstrapResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{7} +} +func (m *BootstrapResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BootstrapResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BootstrapResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *BootstrapResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BootstrapResponse.Merge(dst, src) +} +func (m *BootstrapResponse) XXX_Size() int { + return m.Size() +} +func (m *BootstrapResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BootstrapResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BootstrapResponse proto.InternalMessageInfo + +func (m *BootstrapResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type IsBootstrappedRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IsBootstrappedRequest) Reset() { *m = IsBootstrappedRequest{} } +func (m *IsBootstrappedRequest) String() string { return proto.CompactTextString(m) } +func (*IsBootstrappedRequest) ProtoMessage() {} +func (*IsBootstrappedRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{8} +} +func (m *IsBootstrappedRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IsBootstrappedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IsBootstrappedRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *IsBootstrappedRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_IsBootstrappedRequest.Merge(dst, src) +} +func (m *IsBootstrappedRequest) XXX_Size() int { + return m.Size() +} +func (m *IsBootstrappedRequest) XXX_DiscardUnknown() { + xxx_messageInfo_IsBootstrappedRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_IsBootstrappedRequest proto.InternalMessageInfo + +func (m *IsBootstrappedRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +type IsBootstrappedResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Bootstrapped bool `protobuf:"varint,2,opt,name=bootstrapped,proto3" json:"bootstrapped,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IsBootstrappedResponse) Reset() { *m = IsBootstrappedResponse{} } +func (m *IsBootstrappedResponse) String() string { return proto.CompactTextString(m) } +func (*IsBootstrappedResponse) ProtoMessage() {} +func (*IsBootstrappedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{9} +} +func (m *IsBootstrappedResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IsBootstrappedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IsBootstrappedResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *IsBootstrappedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_IsBootstrappedResponse.Merge(dst, src) +} +func (m *IsBootstrappedResponse) XXX_Size() int { + return m.Size() +} +func (m *IsBootstrappedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_IsBootstrappedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_IsBootstrappedResponse proto.InternalMessageInfo + +func (m *IsBootstrappedResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *IsBootstrappedResponse) GetBootstrapped() bool { + if m != nil { + return m.Bootstrapped + } + return false +} + +type AllocIDRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocIDRequest) Reset() { *m = AllocIDRequest{} } +func (m *AllocIDRequest) String() string { return proto.CompactTextString(m) } +func (*AllocIDRequest) ProtoMessage() {} +func (*AllocIDRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{10} +} +func (m *AllocIDRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AllocIDRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *AllocIDRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocIDRequest.Merge(dst, src) +} +func (m *AllocIDRequest) XXX_Size() int { + return m.Size() +} +func (m *AllocIDRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AllocIDRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocIDRequest proto.InternalMessageInfo + +func (m *AllocIDRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +type AllocIDResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocIDResponse) Reset() { *m = AllocIDResponse{} } +func (m *AllocIDResponse) String() string { return proto.CompactTextString(m) } +func (*AllocIDResponse) ProtoMessage() {} +func (*AllocIDResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{11} +} +func (m *AllocIDResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocIDResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AllocIDResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *AllocIDResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocIDResponse.Merge(dst, src) +} +func (m *AllocIDResponse) XXX_Size() int { + return m.Size() +} +func (m *AllocIDResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AllocIDResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocIDResponse proto.InternalMessageInfo + +func (m *AllocIDResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AllocIDResponse) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +type GetStoreRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + StoreId uint64 `protobuf:"varint,2,opt,name=store_id,json=storeId,proto3" json:"store_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetStoreRequest) Reset() { *m = GetStoreRequest{} } +func (m *GetStoreRequest) String() string { return proto.CompactTextString(m) } +func (*GetStoreRequest) ProtoMessage() {} +func (*GetStoreRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{12} +} +func (m *GetStoreRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetStoreRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetStoreRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetStoreRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetStoreRequest.Merge(dst, src) +} +func (m *GetStoreRequest) XXX_Size() int { + return m.Size() +} +func (m *GetStoreRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetStoreRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetStoreRequest proto.InternalMessageInfo + +func (m *GetStoreRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetStoreRequest) GetStoreId() uint64 { + if m != nil { + return m.StoreId + } + return 0 +} + +type GetStoreResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Store *metapb.Store `protobuf:"bytes,2,opt,name=store" json:"store,omitempty"` + Stats *StoreStats `protobuf:"bytes,3,opt,name=stats" json:"stats,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetStoreResponse) Reset() { *m = GetStoreResponse{} } +func (m *GetStoreResponse) String() string { return proto.CompactTextString(m) } +func (*GetStoreResponse) ProtoMessage() {} +func (*GetStoreResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{13} +} +func (m *GetStoreResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetStoreResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetStoreResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetStoreResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetStoreResponse.Merge(dst, src) +} +func (m *GetStoreResponse) XXX_Size() int { + return m.Size() +} +func (m *GetStoreResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetStoreResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetStoreResponse proto.InternalMessageInfo + +func (m *GetStoreResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetStoreResponse) GetStore() *metapb.Store { + if m != nil { + return m.Store + } + return nil +} + +func (m *GetStoreResponse) GetStats() *StoreStats { + if m != nil { + return m.Stats + } + return nil +} + +type PutStoreRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Store *metapb.Store `protobuf:"bytes,2,opt,name=store" json:"store,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutStoreRequest) Reset() { *m = PutStoreRequest{} } +func (m *PutStoreRequest) String() string { return proto.CompactTextString(m) } +func (*PutStoreRequest) ProtoMessage() {} +func (*PutStoreRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{14} +} +func (m *PutStoreRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PutStoreRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PutStoreRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *PutStoreRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutStoreRequest.Merge(dst, src) +} +func (m *PutStoreRequest) XXX_Size() int { + return m.Size() +} +func (m *PutStoreRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PutStoreRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PutStoreRequest proto.InternalMessageInfo + +func (m *PutStoreRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *PutStoreRequest) GetStore() *metapb.Store { + if m != nil { + return m.Store + } + return nil +} + +type PutStoreResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutStoreResponse) Reset() { *m = PutStoreResponse{} } +func (m *PutStoreResponse) String() string { return proto.CompactTextString(m) } +func (*PutStoreResponse) ProtoMessage() {} +func (*PutStoreResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{15} +} +func (m *PutStoreResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PutStoreResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PutStoreResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *PutStoreResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutStoreResponse.Merge(dst, src) +} +func (m *PutStoreResponse) XXX_Size() int { + return m.Size() +} +func (m *PutStoreResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PutStoreResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PutStoreResponse proto.InternalMessageInfo + +func (m *PutStoreResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type GetAllStoresRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // Do NOT return tombstone stores if set to true. + ExcludeTombstoneStores bool `protobuf:"varint,2,opt,name=exclude_tombstone_stores,json=excludeTombstoneStores,proto3" json:"exclude_tombstone_stores,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAllStoresRequest) Reset() { *m = GetAllStoresRequest{} } +func (m *GetAllStoresRequest) String() string { return proto.CompactTextString(m) } +func (*GetAllStoresRequest) ProtoMessage() {} +func (*GetAllStoresRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{16} +} +func (m *GetAllStoresRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetAllStoresRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetAllStoresRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetAllStoresRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAllStoresRequest.Merge(dst, src) +} +func (m *GetAllStoresRequest) XXX_Size() int { + return m.Size() +} +func (m *GetAllStoresRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetAllStoresRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAllStoresRequest proto.InternalMessageInfo + +func (m *GetAllStoresRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetAllStoresRequest) GetExcludeTombstoneStores() bool { + if m != nil { + return m.ExcludeTombstoneStores + } + return false +} + +type GetAllStoresResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Stores []*metapb.Store `protobuf:"bytes,2,rep,name=stores" json:"stores,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAllStoresResponse) Reset() { *m = GetAllStoresResponse{} } +func (m *GetAllStoresResponse) String() string { return proto.CompactTextString(m) } +func (*GetAllStoresResponse) ProtoMessage() {} +func (*GetAllStoresResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{17} +} +func (m *GetAllStoresResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetAllStoresResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetAllStoresResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetAllStoresResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAllStoresResponse.Merge(dst, src) +} +func (m *GetAllStoresResponse) XXX_Size() int { + return m.Size() +} +func (m *GetAllStoresResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetAllStoresResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAllStoresResponse proto.InternalMessageInfo + +func (m *GetAllStoresResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetAllStoresResponse) GetStores() []*metapb.Store { + if m != nil { + return m.Stores + } + return nil +} + +type GetRegionRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + RegionKey []byte `protobuf:"bytes,2,opt,name=region_key,json=regionKey,proto3" json:"region_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRegionRequest) Reset() { *m = GetRegionRequest{} } +func (m *GetRegionRequest) String() string { return proto.CompactTextString(m) } +func (*GetRegionRequest) ProtoMessage() {} +func (*GetRegionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{18} +} +func (m *GetRegionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetRegionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetRegionRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetRegionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRegionRequest.Merge(dst, src) +} +func (m *GetRegionRequest) XXX_Size() int { + return m.Size() +} +func (m *GetRegionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRegionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetRegionRequest proto.InternalMessageInfo + +func (m *GetRegionRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetRegionRequest) GetRegionKey() []byte { + if m != nil { + return m.RegionKey + } + return nil +} + +type GetRegionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Region *metapb.Region `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"` + Leader *metapb.Peer `protobuf:"bytes,3,opt,name=leader" json:"leader,omitempty"` + Slaves []*metapb.Peer `protobuf:"bytes,4,rep,name=slaves" json:"slaves,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRegionResponse) Reset() { *m = GetRegionResponse{} } +func (m *GetRegionResponse) String() string { return proto.CompactTextString(m) } +func (*GetRegionResponse) ProtoMessage() {} +func (*GetRegionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{19} +} +func (m *GetRegionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetRegionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetRegionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetRegionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRegionResponse.Merge(dst, src) +} +func (m *GetRegionResponse) XXX_Size() int { + return m.Size() +} +func (m *GetRegionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetRegionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetRegionResponse proto.InternalMessageInfo + +func (m *GetRegionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetRegionResponse) GetRegion() *metapb.Region { + if m != nil { + return m.Region + } + return nil +} + +func (m *GetRegionResponse) GetLeader() *metapb.Peer { + if m != nil { + return m.Leader + } + return nil +} + +func (m *GetRegionResponse) GetSlaves() []*metapb.Peer { + if m != nil { + return m.Slaves + } + return nil +} + +type GetRegionByIDRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + RegionId uint64 `protobuf:"varint,2,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRegionByIDRequest) Reset() { *m = GetRegionByIDRequest{} } +func (m *GetRegionByIDRequest) String() string { return proto.CompactTextString(m) } +func (*GetRegionByIDRequest) ProtoMessage() {} +func (*GetRegionByIDRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{20} +} +func (m *GetRegionByIDRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetRegionByIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetRegionByIDRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetRegionByIDRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRegionByIDRequest.Merge(dst, src) +} +func (m *GetRegionByIDRequest) XXX_Size() int { + return m.Size() +} +func (m *GetRegionByIDRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRegionByIDRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetRegionByIDRequest proto.InternalMessageInfo + +func (m *GetRegionByIDRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetRegionByIDRequest) GetRegionId() uint64 { + if m != nil { + return m.RegionId + } + return 0 +} + +type ScanRegionsRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + Limit int32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` + EndKey []byte `protobuf:"bytes,4,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ScanRegionsRequest) Reset() { *m = ScanRegionsRequest{} } +func (m *ScanRegionsRequest) String() string { return proto.CompactTextString(m) } +func (*ScanRegionsRequest) ProtoMessage() {} +func (*ScanRegionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{21} +} +func (m *ScanRegionsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScanRegionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ScanRegionsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ScanRegionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScanRegionsRequest.Merge(dst, src) +} +func (m *ScanRegionsRequest) XXX_Size() int { + return m.Size() +} +func (m *ScanRegionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ScanRegionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ScanRegionsRequest proto.InternalMessageInfo + +func (m *ScanRegionsRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *ScanRegionsRequest) GetStartKey() []byte { + if m != nil { + return m.StartKey + } + return nil +} + +func (m *ScanRegionsRequest) GetLimit() int32 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *ScanRegionsRequest) GetEndKey() []byte { + if m != nil { + return m.EndKey + } + return nil +} + +type ScanRegionsResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Regions []*metapb.Region `protobuf:"bytes,2,rep,name=regions" json:"regions,omitempty"` + Leaders []*metapb.Peer `protobuf:"bytes,3,rep,name=leaders" json:"leaders,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ScanRegionsResponse) Reset() { *m = ScanRegionsResponse{} } +func (m *ScanRegionsResponse) String() string { return proto.CompactTextString(m) } +func (*ScanRegionsResponse) ProtoMessage() {} +func (*ScanRegionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{22} +} +func (m *ScanRegionsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScanRegionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ScanRegionsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ScanRegionsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScanRegionsResponse.Merge(dst, src) +} +func (m *ScanRegionsResponse) XXX_Size() int { + return m.Size() +} +func (m *ScanRegionsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ScanRegionsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ScanRegionsResponse proto.InternalMessageInfo + +func (m *ScanRegionsResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *ScanRegionsResponse) GetRegions() []*metapb.Region { + if m != nil { + return m.Regions + } + return nil +} + +func (m *ScanRegionsResponse) GetLeaders() []*metapb.Peer { + if m != nil { + return m.Leaders + } + return nil +} + +type GetClusterConfigRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterConfigRequest) Reset() { *m = GetClusterConfigRequest{} } +func (m *GetClusterConfigRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterConfigRequest) ProtoMessage() {} +func (*GetClusterConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{23} +} +func (m *GetClusterConfigRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetClusterConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetClusterConfigRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetClusterConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterConfigRequest.Merge(dst, src) +} +func (m *GetClusterConfigRequest) XXX_Size() int { + return m.Size() +} +func (m *GetClusterConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClusterConfigRequest proto.InternalMessageInfo + +func (m *GetClusterConfigRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +type GetClusterConfigResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Cluster *metapb.Cluster `protobuf:"bytes,2,opt,name=cluster" json:"cluster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterConfigResponse) Reset() { *m = GetClusterConfigResponse{} } +func (m *GetClusterConfigResponse) String() string { return proto.CompactTextString(m) } +func (*GetClusterConfigResponse) ProtoMessage() {} +func (*GetClusterConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{24} +} +func (m *GetClusterConfigResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetClusterConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetClusterConfigResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetClusterConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterConfigResponse.Merge(dst, src) +} +func (m *GetClusterConfigResponse) XXX_Size() int { + return m.Size() +} +func (m *GetClusterConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterConfigResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClusterConfigResponse proto.InternalMessageInfo + +func (m *GetClusterConfigResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetClusterConfigResponse) GetCluster() *metapb.Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +type PutClusterConfigRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Cluster *metapb.Cluster `protobuf:"bytes,2,opt,name=cluster" json:"cluster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutClusterConfigRequest) Reset() { *m = PutClusterConfigRequest{} } +func (m *PutClusterConfigRequest) String() string { return proto.CompactTextString(m) } +func (*PutClusterConfigRequest) ProtoMessage() {} +func (*PutClusterConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{25} +} +func (m *PutClusterConfigRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PutClusterConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PutClusterConfigRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *PutClusterConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutClusterConfigRequest.Merge(dst, src) +} +func (m *PutClusterConfigRequest) XXX_Size() int { + return m.Size() +} +func (m *PutClusterConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PutClusterConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PutClusterConfigRequest proto.InternalMessageInfo + +func (m *PutClusterConfigRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *PutClusterConfigRequest) GetCluster() *metapb.Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +type PutClusterConfigResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutClusterConfigResponse) Reset() { *m = PutClusterConfigResponse{} } +func (m *PutClusterConfigResponse) String() string { return proto.CompactTextString(m) } +func (*PutClusterConfigResponse) ProtoMessage() {} +func (*PutClusterConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{26} +} +func (m *PutClusterConfigResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PutClusterConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PutClusterConfigResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *PutClusterConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutClusterConfigResponse.Merge(dst, src) +} +func (m *PutClusterConfigResponse) XXX_Size() int { + return m.Size() +} +func (m *PutClusterConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PutClusterConfigResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PutClusterConfigResponse proto.InternalMessageInfo + +func (m *PutClusterConfigResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type Member struct { + // name is the name of the Scheduler member. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // member_id is the unique id of the Scheduler member. + MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"` + PeerUrls []string `protobuf:"bytes,3,rep,name=peer_urls,json=peerUrls" json:"peer_urls,omitempty"` + ClientUrls []string `protobuf:"bytes,4,rep,name=client_urls,json=clientUrls" json:"client_urls,omitempty"` + LeaderPriority int32 `protobuf:"varint,5,opt,name=leader_priority,json=leaderPriority,proto3" json:"leader_priority,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Member) Reset() { *m = Member{} } +func (m *Member) String() string { return proto.CompactTextString(m) } +func (*Member) ProtoMessage() {} +func (*Member) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{27} +} +func (m *Member) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Member.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Member) XXX_Merge(src proto.Message) { + xxx_messageInfo_Member.Merge(dst, src) +} +func (m *Member) XXX_Size() int { + return m.Size() +} +func (m *Member) XXX_DiscardUnknown() { + xxx_messageInfo_Member.DiscardUnknown(m) +} + +var xxx_messageInfo_Member proto.InternalMessageInfo + +func (m *Member) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Member) GetMemberId() uint64 { + if m != nil { + return m.MemberId + } + return 0 +} + +func (m *Member) GetPeerUrls() []string { + if m != nil { + return m.PeerUrls + } + return nil +} + +func (m *Member) GetClientUrls() []string { + if m != nil { + return m.ClientUrls + } + return nil +} + +func (m *Member) GetLeaderPriority() int32 { + if m != nil { + return m.LeaderPriority + } + return 0 +} + +type GetMembersRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetMembersRequest) Reset() { *m = GetMembersRequest{} } +func (m *GetMembersRequest) String() string { return proto.CompactTextString(m) } +func (*GetMembersRequest) ProtoMessage() {} +func (*GetMembersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{28} +} +func (m *GetMembersRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetMembersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetMembersRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetMembersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetMembersRequest.Merge(dst, src) +} +func (m *GetMembersRequest) XXX_Size() int { + return m.Size() +} +func (m *GetMembersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetMembersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetMembersRequest proto.InternalMessageInfo + +func (m *GetMembersRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +type GetMembersResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` + Leader *Member `protobuf:"bytes,3,opt,name=leader" json:"leader,omitempty"` + EtcdLeader *Member `protobuf:"bytes,4,opt,name=etcd_leader,json=etcdLeader" json:"etcd_leader,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetMembersResponse) Reset() { *m = GetMembersResponse{} } +func (m *GetMembersResponse) String() string { return proto.CompactTextString(m) } +func (*GetMembersResponse) ProtoMessage() {} +func (*GetMembersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{29} +} +func (m *GetMembersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetMembersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetMembersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetMembersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetMembersResponse.Merge(dst, src) +} +func (m *GetMembersResponse) XXX_Size() int { + return m.Size() +} +func (m *GetMembersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetMembersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetMembersResponse proto.InternalMessageInfo + +func (m *GetMembersResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetMembersResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +func (m *GetMembersResponse) GetLeader() *Member { + if m != nil { + return m.Leader + } + return nil +} + +func (m *GetMembersResponse) GetEtcdLeader() *Member { + if m != nil { + return m.EtcdLeader + } + return nil +} + +type RegionHeartbeatRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Region *metapb.Region `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"` + // Leader Peer sending the heartbeat. + Leader *metapb.Peer `protobuf:"bytes,3,opt,name=leader" json:"leader,omitempty"` + // Pending peers are the peers that the leader can't consider as + // working followers. + PendingPeers []*metapb.Peer `protobuf:"bytes,5,rep,name=pending_peers,json=pendingPeers" json:"pending_peers,omitempty"` + // Approximate region size. + ApproximateSize uint64 `protobuf:"varint,10,opt,name=approximate_size,json=approximateSize,proto3" json:"approximate_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegionHeartbeatRequest) Reset() { *m = RegionHeartbeatRequest{} } +func (m *RegionHeartbeatRequest) String() string { return proto.CompactTextString(m) } +func (*RegionHeartbeatRequest) ProtoMessage() {} +func (*RegionHeartbeatRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{30} +} +func (m *RegionHeartbeatRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RegionHeartbeatRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RegionHeartbeatRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RegionHeartbeatRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegionHeartbeatRequest.Merge(dst, src) +} +func (m *RegionHeartbeatRequest) XXX_Size() int { + return m.Size() +} +func (m *RegionHeartbeatRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RegionHeartbeatRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RegionHeartbeatRequest proto.InternalMessageInfo + +func (m *RegionHeartbeatRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *RegionHeartbeatRequest) GetRegion() *metapb.Region { + if m != nil { + return m.Region + } + return nil +} + +func (m *RegionHeartbeatRequest) GetLeader() *metapb.Peer { + if m != nil { + return m.Leader + } + return nil +} + +func (m *RegionHeartbeatRequest) GetPendingPeers() []*metapb.Peer { + if m != nil { + return m.PendingPeers + } + return nil +} + +func (m *RegionHeartbeatRequest) GetApproximateSize() uint64 { + if m != nil { + return m.ApproximateSize + } + return 0 +} + +type ChangePeer struct { + Peer *metapb.Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"` + ChangeType eraftpb.ConfChangeType `protobuf:"varint,2,opt,name=change_type,json=changeType,proto3,enum=eraftpb.ConfChangeType" json:"change_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChangePeer) Reset() { *m = ChangePeer{} } +func (m *ChangePeer) String() string { return proto.CompactTextString(m) } +func (*ChangePeer) ProtoMessage() {} +func (*ChangePeer) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{31} +} +func (m *ChangePeer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChangePeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChangePeer.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ChangePeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChangePeer.Merge(dst, src) +} +func (m *ChangePeer) XXX_Size() int { + return m.Size() +} +func (m *ChangePeer) XXX_DiscardUnknown() { + xxx_messageInfo_ChangePeer.DiscardUnknown(m) +} + +var xxx_messageInfo_ChangePeer proto.InternalMessageInfo + +func (m *ChangePeer) GetPeer() *metapb.Peer { + if m != nil { + return m.Peer + } + return nil +} + +func (m *ChangePeer) GetChangeType() eraftpb.ConfChangeType { + if m != nil { + return m.ChangeType + } + return eraftpb.ConfChangeType_AddNode +} + +type TransferLeader struct { + Peer *metapb.Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TransferLeader) Reset() { *m = TransferLeader{} } +func (m *TransferLeader) String() string { return proto.CompactTextString(m) } +func (*TransferLeader) ProtoMessage() {} +func (*TransferLeader) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{32} +} +func (m *TransferLeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TransferLeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TransferLeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TransferLeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransferLeader.Merge(dst, src) +} +func (m *TransferLeader) XXX_Size() int { + return m.Size() +} +func (m *TransferLeader) XXX_DiscardUnknown() { + xxx_messageInfo_TransferLeader.DiscardUnknown(m) +} + +var xxx_messageInfo_TransferLeader proto.InternalMessageInfo + +func (m *TransferLeader) GetPeer() *metapb.Peer { + if m != nil { + return m.Peer + } + return nil +} + +type RegionHeartbeatResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // Notice, Scheduleeer only allows handling reported epoch >= current scheduler's. + // Leader peer reports region status with RegionHeartbeatRequest + // to scheduler regularly, scheduler will determine whether this region + // should do ChangePeer or not. + // E,g, max peer number is 3, region A, first only peer 1 in A. + // 1. Scheduler region state -> Peers (1), ConfVer (1). + // 2. Leader peer 1 reports region state to scheduler, scheduler finds the + // peer number is < 3, so first changes its current region + // state -> Peers (1, 2), ConfVer (1), and returns ChangePeer Adding 2. + // 3. Leader does ChangePeer, then reports Peers (1, 2), ConfVer (2), + // scheduler updates its state -> Peers (1, 2), ConfVer (2). + // 4. Leader may report old Peers (1), ConfVer (1) to scheduler before ConfChange + // finished, scheduler stills responses ChangePeer Adding 2, of course, we must + // guarantee the second ChangePeer can't be applied in TiKV. + ChangePeer *ChangePeer `protobuf:"bytes,2,opt,name=change_peer,json=changePeer" json:"change_peer,omitempty"` + // Scheduler can return transfer_leader to let TiKV does leader transfer itself. + TransferLeader *TransferLeader `protobuf:"bytes,3,opt,name=transfer_leader,json=transferLeader" json:"transfer_leader,omitempty"` + // ID of the region + RegionId uint64 `protobuf:"varint,4,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` + RegionEpoch *metapb.RegionEpoch `protobuf:"bytes,5,opt,name=region_epoch,json=regionEpoch" json:"region_epoch,omitempty"` + // Leader of the region at the moment of the corresponding request was made. + TargetPeer *metapb.Peer `protobuf:"bytes,6,opt,name=target_peer,json=targetPeer" json:"target_peer,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RegionHeartbeatResponse) Reset() { *m = RegionHeartbeatResponse{} } +func (m *RegionHeartbeatResponse) String() string { return proto.CompactTextString(m) } +func (*RegionHeartbeatResponse) ProtoMessage() {} +func (*RegionHeartbeatResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{33} +} +func (m *RegionHeartbeatResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RegionHeartbeatResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RegionHeartbeatResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RegionHeartbeatResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegionHeartbeatResponse.Merge(dst, src) +} +func (m *RegionHeartbeatResponse) XXX_Size() int { + return m.Size() +} +func (m *RegionHeartbeatResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RegionHeartbeatResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RegionHeartbeatResponse proto.InternalMessageInfo + +func (m *RegionHeartbeatResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *RegionHeartbeatResponse) GetChangePeer() *ChangePeer { + if m != nil { + return m.ChangePeer + } + return nil +} + +func (m *RegionHeartbeatResponse) GetTransferLeader() *TransferLeader { + if m != nil { + return m.TransferLeader + } + return nil +} + +func (m *RegionHeartbeatResponse) GetRegionId() uint64 { + if m != nil { + return m.RegionId + } + return 0 +} + +func (m *RegionHeartbeatResponse) GetRegionEpoch() *metapb.RegionEpoch { + if m != nil { + return m.RegionEpoch + } + return nil +} + +func (m *RegionHeartbeatResponse) GetTargetPeer() *metapb.Peer { + if m != nil { + return m.TargetPeer + } + return nil +} + +type AskSplitRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Region *metapb.Region `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AskSplitRequest) Reset() { *m = AskSplitRequest{} } +func (m *AskSplitRequest) String() string { return proto.CompactTextString(m) } +func (*AskSplitRequest) ProtoMessage() {} +func (*AskSplitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{34} +} +func (m *AskSplitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AskSplitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AskSplitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *AskSplitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AskSplitRequest.Merge(dst, src) +} +func (m *AskSplitRequest) XXX_Size() int { + return m.Size() +} +func (m *AskSplitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AskSplitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AskSplitRequest proto.InternalMessageInfo + +func (m *AskSplitRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AskSplitRequest) GetRegion() *metapb.Region { + if m != nil { + return m.Region + } + return nil +} + +type AskSplitResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // We split the region into two, first uses the origin + // parent region id, and the second uses the new_region_id. + // We must guarantee that the new_region_id is global unique. + NewRegionId uint64 `protobuf:"varint,2,opt,name=new_region_id,json=newRegionId,proto3" json:"new_region_id,omitempty"` + // The peer ids for the new split region. + NewPeerIds []uint64 `protobuf:"varint,3,rep,packed,name=new_peer_ids,json=newPeerIds" json:"new_peer_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AskSplitResponse) Reset() { *m = AskSplitResponse{} } +func (m *AskSplitResponse) String() string { return proto.CompactTextString(m) } +func (*AskSplitResponse) ProtoMessage() {} +func (*AskSplitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{35} +} +func (m *AskSplitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AskSplitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AskSplitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *AskSplitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AskSplitResponse.Merge(dst, src) +} +func (m *AskSplitResponse) XXX_Size() int { + return m.Size() +} +func (m *AskSplitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AskSplitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AskSplitResponse proto.InternalMessageInfo + +func (m *AskSplitResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AskSplitResponse) GetNewRegionId() uint64 { + if m != nil { + return m.NewRegionId + } + return 0 +} + +func (m *AskSplitResponse) GetNewPeerIds() []uint64 { + if m != nil { + return m.NewPeerIds + } + return nil +} + +type ReportSplitRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Left *metapb.Region `protobuf:"bytes,2,opt,name=left" json:"left,omitempty"` + Right *metapb.Region `protobuf:"bytes,3,opt,name=right" json:"right,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReportSplitRequest) Reset() { *m = ReportSplitRequest{} } +func (m *ReportSplitRequest) String() string { return proto.CompactTextString(m) } +func (*ReportSplitRequest) ProtoMessage() {} +func (*ReportSplitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{36} +} +func (m *ReportSplitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReportSplitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReportSplitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ReportSplitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReportSplitRequest.Merge(dst, src) +} +func (m *ReportSplitRequest) XXX_Size() int { + return m.Size() +} +func (m *ReportSplitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReportSplitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReportSplitRequest proto.InternalMessageInfo + +func (m *ReportSplitRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *ReportSplitRequest) GetLeft() *metapb.Region { + if m != nil { + return m.Left + } + return nil +} + +func (m *ReportSplitRequest) GetRight() *metapb.Region { + if m != nil { + return m.Right + } + return nil +} + +type ReportSplitResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReportSplitResponse) Reset() { *m = ReportSplitResponse{} } +func (m *ReportSplitResponse) String() string { return proto.CompactTextString(m) } +func (*ReportSplitResponse) ProtoMessage() {} +func (*ReportSplitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{37} +} +func (m *ReportSplitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReportSplitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReportSplitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ReportSplitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReportSplitResponse.Merge(dst, src) +} +func (m *ReportSplitResponse) XXX_Size() int { + return m.Size() +} +func (m *ReportSplitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReportSplitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReportSplitResponse proto.InternalMessageInfo + +func (m *ReportSplitResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type SplitID struct { + NewRegionId uint64 `protobuf:"varint,1,opt,name=new_region_id,json=newRegionId,proto3" json:"new_region_id,omitempty"` + NewPeerIds []uint64 `protobuf:"varint,2,rep,packed,name=new_peer_ids,json=newPeerIds" json:"new_peer_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SplitID) Reset() { *m = SplitID{} } +func (m *SplitID) String() string { return proto.CompactTextString(m) } +func (*SplitID) ProtoMessage() {} +func (*SplitID) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{38} +} +func (m *SplitID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SplitID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SplitID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SplitID) XXX_Merge(src proto.Message) { + xxx_messageInfo_SplitID.Merge(dst, src) +} +func (m *SplitID) XXX_Size() int { + return m.Size() +} +func (m *SplitID) XXX_DiscardUnknown() { + xxx_messageInfo_SplitID.DiscardUnknown(m) +} + +var xxx_messageInfo_SplitID proto.InternalMessageInfo + +func (m *SplitID) GetNewRegionId() uint64 { + if m != nil { + return m.NewRegionId + } + return 0 +} + +func (m *SplitID) GetNewPeerIds() []uint64 { + if m != nil { + return m.NewPeerIds + } + return nil +} + +type TimeInterval struct { + // The unix timestamp in seconds of the start of this period. + StartTimestamp uint64 `protobuf:"varint,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` + // The unix timestamp in seconds of the end of this period. + EndTimestamp uint64 `protobuf:"varint,2,opt,name=end_timestamp,json=endTimestamp,proto3" json:"end_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeInterval) Reset() { *m = TimeInterval{} } +func (m *TimeInterval) String() string { return proto.CompactTextString(m) } +func (*TimeInterval) ProtoMessage() {} +func (*TimeInterval) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{39} +} +func (m *TimeInterval) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeInterval) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeInterval.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TimeInterval) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeInterval.Merge(dst, src) +} +func (m *TimeInterval) XXX_Size() int { + return m.Size() +} +func (m *TimeInterval) XXX_DiscardUnknown() { + xxx_messageInfo_TimeInterval.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeInterval proto.InternalMessageInfo + +func (m *TimeInterval) GetStartTimestamp() uint64 { + if m != nil { + return m.StartTimestamp + } + return 0 +} + +func (m *TimeInterval) GetEndTimestamp() uint64 { + if m != nil { + return m.EndTimestamp + } + return 0 +} + +type RecordPair struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value uint64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RecordPair) Reset() { *m = RecordPair{} } +func (m *RecordPair) String() string { return proto.CompactTextString(m) } +func (*RecordPair) ProtoMessage() {} +func (*RecordPair) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{40} +} +func (m *RecordPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RecordPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RecordPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RecordPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecordPair.Merge(dst, src) +} +func (m *RecordPair) XXX_Size() int { + return m.Size() +} +func (m *RecordPair) XXX_DiscardUnknown() { + xxx_messageInfo_RecordPair.DiscardUnknown(m) +} + +var xxx_messageInfo_RecordPair proto.InternalMessageInfo + +func (m *RecordPair) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *RecordPair) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +type StoreStats struct { + StoreId uint64 `protobuf:"varint,1,opt,name=store_id,json=storeId,proto3" json:"store_id,omitempty"` + // Capacity for the store. + Capacity uint64 `protobuf:"varint,2,opt,name=capacity,proto3" json:"capacity,omitempty"` + // Available size for the store. + Available uint64 `protobuf:"varint,3,opt,name=available,proto3" json:"available,omitempty"` + // Total region count in this store. + RegionCount uint32 `protobuf:"varint,4,opt,name=region_count,json=regionCount,proto3" json:"region_count,omitempty"` + // Current sending snapshot count. + SendingSnapCount uint32 `protobuf:"varint,5,opt,name=sending_snap_count,json=sendingSnapCount,proto3" json:"sending_snap_count,omitempty"` + // Current receiving snapshot count. + ReceivingSnapCount uint32 `protobuf:"varint,6,opt,name=receiving_snap_count,json=receivingSnapCount,proto3" json:"receiving_snap_count,omitempty"` + // When the store is started (unix timestamp in seconds). + StartTime uint32 `protobuf:"varint,7,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // How many region is applying snapshot. + ApplyingSnapCount uint32 `protobuf:"varint,8,opt,name=applying_snap_count,json=applyingSnapCount,proto3" json:"applying_snap_count,omitempty"` + // If the store is busy + IsBusy bool `protobuf:"varint,9,opt,name=is_busy,json=isBusy,proto3" json:"is_busy,omitempty"` + // Actually used space by db + UsedSize uint64 `protobuf:"varint,10,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"` + // Actually reported time interval + Interval *TimeInterval `protobuf:"bytes,15,opt,name=interval" json:"interval,omitempty"` + // Threads' CPU usages in the store + CpuUsages []*RecordPair `protobuf:"bytes,16,rep,name=cpu_usages,json=cpuUsages" json:"cpu_usages,omitempty"` + // Threads' read disk I/O rates in the store + ReadIoRates []*RecordPair `protobuf:"bytes,17,rep,name=read_io_rates,json=readIoRates" json:"read_io_rates,omitempty"` + // Threads' write disk I/O rates in the store + WriteIoRates []*RecordPair `protobuf:"bytes,18,rep,name=write_io_rates,json=writeIoRates" json:"write_io_rates,omitempty"` + // Operations' latencies in the store + OpLatencies []*RecordPair `protobuf:"bytes,19,rep,name=op_latencies,json=opLatencies" json:"op_latencies,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StoreStats) Reset() { *m = StoreStats{} } +func (m *StoreStats) String() string { return proto.CompactTextString(m) } +func (*StoreStats) ProtoMessage() {} +func (*StoreStats) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{41} +} +func (m *StoreStats) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *StoreStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreStats.Merge(dst, src) +} +func (m *StoreStats) XXX_Size() int { + return m.Size() +} +func (m *StoreStats) XXX_DiscardUnknown() { + xxx_messageInfo_StoreStats.DiscardUnknown(m) +} + +var xxx_messageInfo_StoreStats proto.InternalMessageInfo + +func (m *StoreStats) GetStoreId() uint64 { + if m != nil { + return m.StoreId + } + return 0 +} + +func (m *StoreStats) GetCapacity() uint64 { + if m != nil { + return m.Capacity + } + return 0 +} + +func (m *StoreStats) GetAvailable() uint64 { + if m != nil { + return m.Available + } + return 0 +} + +func (m *StoreStats) GetRegionCount() uint32 { + if m != nil { + return m.RegionCount + } + return 0 +} + +func (m *StoreStats) GetSendingSnapCount() uint32 { + if m != nil { + return m.SendingSnapCount + } + return 0 +} + +func (m *StoreStats) GetReceivingSnapCount() uint32 { + if m != nil { + return m.ReceivingSnapCount + } + return 0 +} + +func (m *StoreStats) GetStartTime() uint32 { + if m != nil { + return m.StartTime + } + return 0 +} + +func (m *StoreStats) GetApplyingSnapCount() uint32 { + if m != nil { + return m.ApplyingSnapCount + } + return 0 +} + +func (m *StoreStats) GetIsBusy() bool { + if m != nil { + return m.IsBusy + } + return false +} + +func (m *StoreStats) GetUsedSize() uint64 { + if m != nil { + return m.UsedSize + } + return 0 +} + +func (m *StoreStats) GetInterval() *TimeInterval { + if m != nil { + return m.Interval + } + return nil +} + +func (m *StoreStats) GetCpuUsages() []*RecordPair { + if m != nil { + return m.CpuUsages + } + return nil +} + +func (m *StoreStats) GetReadIoRates() []*RecordPair { + if m != nil { + return m.ReadIoRates + } + return nil +} + +func (m *StoreStats) GetWriteIoRates() []*RecordPair { + if m != nil { + return m.WriteIoRates + } + return nil +} + +func (m *StoreStats) GetOpLatencies() []*RecordPair { + if m != nil { + return m.OpLatencies + } + return nil +} + +type StoreHeartbeatRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Stats *StoreStats `protobuf:"bytes,2,opt,name=stats" json:"stats,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StoreHeartbeatRequest) Reset() { *m = StoreHeartbeatRequest{} } +func (m *StoreHeartbeatRequest) String() string { return proto.CompactTextString(m) } +func (*StoreHeartbeatRequest) ProtoMessage() {} +func (*StoreHeartbeatRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{42} +} +func (m *StoreHeartbeatRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreHeartbeatRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreHeartbeatRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *StoreHeartbeatRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreHeartbeatRequest.Merge(dst, src) +} +func (m *StoreHeartbeatRequest) XXX_Size() int { + return m.Size() +} +func (m *StoreHeartbeatRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StoreHeartbeatRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StoreHeartbeatRequest proto.InternalMessageInfo + +func (m *StoreHeartbeatRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *StoreHeartbeatRequest) GetStats() *StoreStats { + if m != nil { + return m.Stats + } + return nil +} + +type StoreHeartbeatResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StoreHeartbeatResponse) Reset() { *m = StoreHeartbeatResponse{} } +func (m *StoreHeartbeatResponse) String() string { return proto.CompactTextString(m) } +func (*StoreHeartbeatResponse) ProtoMessage() {} +func (*StoreHeartbeatResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{43} +} +func (m *StoreHeartbeatResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreHeartbeatResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreHeartbeatResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *StoreHeartbeatResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreHeartbeatResponse.Merge(dst, src) +} +func (m *StoreHeartbeatResponse) XXX_Size() int { + return m.Size() +} +func (m *StoreHeartbeatResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StoreHeartbeatResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StoreHeartbeatResponse proto.InternalMessageInfo + +func (m *StoreHeartbeatResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type ScatterRegionRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + RegionId uint64 `protobuf:"varint,2,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` + // Scheduler will use these region information if it can't find the region. + // For example, the region is just split and hasn't report to Scheduler yet. + Region *metapb.Region `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + Leader *metapb.Peer `protobuf:"bytes,4,opt,name=leader" json:"leader,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ScatterRegionRequest) Reset() { *m = ScatterRegionRequest{} } +func (m *ScatterRegionRequest) String() string { return proto.CompactTextString(m) } +func (*ScatterRegionRequest) ProtoMessage() {} +func (*ScatterRegionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{44} +} +func (m *ScatterRegionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScatterRegionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ScatterRegionRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ScatterRegionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScatterRegionRequest.Merge(dst, src) +} +func (m *ScatterRegionRequest) XXX_Size() int { + return m.Size() +} +func (m *ScatterRegionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ScatterRegionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ScatterRegionRequest proto.InternalMessageInfo + +func (m *ScatterRegionRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *ScatterRegionRequest) GetRegionId() uint64 { + if m != nil { + return m.RegionId + } + return 0 +} + +func (m *ScatterRegionRequest) GetRegion() *metapb.Region { + if m != nil { + return m.Region + } + return nil +} + +func (m *ScatterRegionRequest) GetLeader() *metapb.Peer { + if m != nil { + return m.Leader + } + return nil +} + +type ScatterRegionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ScatterRegionResponse) Reset() { *m = ScatterRegionResponse{} } +func (m *ScatterRegionResponse) String() string { return proto.CompactTextString(m) } +func (*ScatterRegionResponse) ProtoMessage() {} +func (*ScatterRegionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{45} +} +func (m *ScatterRegionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScatterRegionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ScatterRegionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ScatterRegionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScatterRegionResponse.Merge(dst, src) +} +func (m *ScatterRegionResponse) XXX_Size() int { + return m.Size() +} +func (m *ScatterRegionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ScatterRegionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ScatterRegionResponse proto.InternalMessageInfo + +func (m *ScatterRegionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type GetGCSafePointRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetGCSafePointRequest) Reset() { *m = GetGCSafePointRequest{} } +func (m *GetGCSafePointRequest) String() string { return proto.CompactTextString(m) } +func (*GetGCSafePointRequest) ProtoMessage() {} +func (*GetGCSafePointRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{46} +} +func (m *GetGCSafePointRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetGCSafePointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetGCSafePointRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetGCSafePointRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetGCSafePointRequest.Merge(dst, src) +} +func (m *GetGCSafePointRequest) XXX_Size() int { + return m.Size() +} +func (m *GetGCSafePointRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetGCSafePointRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetGCSafePointRequest proto.InternalMessageInfo + +func (m *GetGCSafePointRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +type GetGCSafePointResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + SafePoint uint64 `protobuf:"varint,2,opt,name=safe_point,json=safePoint,proto3" json:"safe_point,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetGCSafePointResponse) Reset() { *m = GetGCSafePointResponse{} } +func (m *GetGCSafePointResponse) String() string { return proto.CompactTextString(m) } +func (*GetGCSafePointResponse) ProtoMessage() {} +func (*GetGCSafePointResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{47} +} +func (m *GetGCSafePointResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetGCSafePointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetGCSafePointResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetGCSafePointResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetGCSafePointResponse.Merge(dst, src) +} +func (m *GetGCSafePointResponse) XXX_Size() int { + return m.Size() +} +func (m *GetGCSafePointResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetGCSafePointResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetGCSafePointResponse proto.InternalMessageInfo + +func (m *GetGCSafePointResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetGCSafePointResponse) GetSafePoint() uint64 { + if m != nil { + return m.SafePoint + } + return 0 +} + +type UpdateGCSafePointRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + SafePoint uint64 `protobuf:"varint,2,opt,name=safe_point,json=safePoint,proto3" json:"safe_point,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateGCSafePointRequest) Reset() { *m = UpdateGCSafePointRequest{} } +func (m *UpdateGCSafePointRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateGCSafePointRequest) ProtoMessage() {} +func (*UpdateGCSafePointRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{48} +} +func (m *UpdateGCSafePointRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateGCSafePointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateGCSafePointRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *UpdateGCSafePointRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateGCSafePointRequest.Merge(dst, src) +} +func (m *UpdateGCSafePointRequest) XXX_Size() int { + return m.Size() +} +func (m *UpdateGCSafePointRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateGCSafePointRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateGCSafePointRequest proto.InternalMessageInfo + +func (m *UpdateGCSafePointRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *UpdateGCSafePointRequest) GetSafePoint() uint64 { + if m != nil { + return m.SafePoint + } + return 0 +} + +type UpdateGCSafePointResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + NewSafePoint uint64 `protobuf:"varint,2,opt,name=new_safe_point,json=newSafePoint,proto3" json:"new_safe_point,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateGCSafePointResponse) Reset() { *m = UpdateGCSafePointResponse{} } +func (m *UpdateGCSafePointResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateGCSafePointResponse) ProtoMessage() {} +func (*UpdateGCSafePointResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{49} +} +func (m *UpdateGCSafePointResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateGCSafePointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateGCSafePointResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *UpdateGCSafePointResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateGCSafePointResponse.Merge(dst, src) +} +func (m *UpdateGCSafePointResponse) XXX_Size() int { + return m.Size() +} +func (m *UpdateGCSafePointResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateGCSafePointResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateGCSafePointResponse proto.InternalMessageInfo + +func (m *UpdateGCSafePointResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *UpdateGCSafePointResponse) GetNewSafePoint() uint64 { + if m != nil { + return m.NewSafePoint + } + return 0 +} + +type GetOperatorRequest struct { + Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + RegionId uint64 `protobuf:"varint,2,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetOperatorRequest) Reset() { *m = GetOperatorRequest{} } +func (m *GetOperatorRequest) String() string { return proto.CompactTextString(m) } +func (*GetOperatorRequest) ProtoMessage() {} +func (*GetOperatorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{50} +} +func (m *GetOperatorRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetOperatorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetOperatorRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetOperatorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetOperatorRequest.Merge(dst, src) +} +func (m *GetOperatorRequest) XXX_Size() int { + return m.Size() +} +func (m *GetOperatorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetOperatorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetOperatorRequest proto.InternalMessageInfo + +func (m *GetOperatorRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetOperatorRequest) GetRegionId() uint64 { + if m != nil { + return m.RegionId + } + return 0 +} + +type GetOperatorResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + RegionId uint64 `protobuf:"varint,2,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` + Desc []byte `protobuf:"bytes,3,opt,name=desc,proto3" json:"desc,omitempty"` + Status OperatorStatus `protobuf:"varint,4,opt,name=status,proto3,enum=schedulerpb.OperatorStatus" json:"status,omitempty"` + Kind []byte `protobuf:"bytes,5,opt,name=kind,proto3" json:"kind,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetOperatorResponse) Reset() { *m = GetOperatorResponse{} } +func (m *GetOperatorResponse) String() string { return proto.CompactTextString(m) } +func (*GetOperatorResponse) ProtoMessage() {} +func (*GetOperatorResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_schedulerpb_4e333137f5959f12, []int{51} +} +func (m *GetOperatorResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetOperatorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetOperatorResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GetOperatorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetOperatorResponse.Merge(dst, src) +} +func (m *GetOperatorResponse) XXX_Size() int { + return m.Size() +} +func (m *GetOperatorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetOperatorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetOperatorResponse proto.InternalMessageInfo + +func (m *GetOperatorResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetOperatorResponse) GetRegionId() uint64 { + if m != nil { + return m.RegionId + } + return 0 +} + +func (m *GetOperatorResponse) GetDesc() []byte { + if m != nil { + return m.Desc + } + return nil +} + +func (m *GetOperatorResponse) GetStatus() OperatorStatus { + if m != nil { + return m.Status + } + return OperatorStatus_SUCCESS +} + +func (m *GetOperatorResponse) GetKind() []byte { + if m != nil { + return m.Kind + } + return nil +} + +func init() { + proto.RegisterType((*RequestHeader)(nil), "schedulerpb.RequestHeader") + proto.RegisterType((*ResponseHeader)(nil), "schedulerpb.ResponseHeader") + proto.RegisterType((*Error)(nil), "schedulerpb.Error") + proto.RegisterType((*TsoRequest)(nil), "schedulerpb.TsoRequest") + proto.RegisterType((*Timestamp)(nil), "schedulerpb.Timestamp") + proto.RegisterType((*TsoResponse)(nil), "schedulerpb.TsoResponse") + proto.RegisterType((*BootstrapRequest)(nil), "schedulerpb.BootstrapRequest") + proto.RegisterType((*BootstrapResponse)(nil), "schedulerpb.BootstrapResponse") + proto.RegisterType((*IsBootstrappedRequest)(nil), "schedulerpb.IsBootstrappedRequest") + proto.RegisterType((*IsBootstrappedResponse)(nil), "schedulerpb.IsBootstrappedResponse") + proto.RegisterType((*AllocIDRequest)(nil), "schedulerpb.AllocIDRequest") + proto.RegisterType((*AllocIDResponse)(nil), "schedulerpb.AllocIDResponse") + proto.RegisterType((*GetStoreRequest)(nil), "schedulerpb.GetStoreRequest") + proto.RegisterType((*GetStoreResponse)(nil), "schedulerpb.GetStoreResponse") + proto.RegisterType((*PutStoreRequest)(nil), "schedulerpb.PutStoreRequest") + proto.RegisterType((*PutStoreResponse)(nil), "schedulerpb.PutStoreResponse") + proto.RegisterType((*GetAllStoresRequest)(nil), "schedulerpb.GetAllStoresRequest") + proto.RegisterType((*GetAllStoresResponse)(nil), "schedulerpb.GetAllStoresResponse") + proto.RegisterType((*GetRegionRequest)(nil), "schedulerpb.GetRegionRequest") + proto.RegisterType((*GetRegionResponse)(nil), "schedulerpb.GetRegionResponse") + proto.RegisterType((*GetRegionByIDRequest)(nil), "schedulerpb.GetRegionByIDRequest") + proto.RegisterType((*ScanRegionsRequest)(nil), "schedulerpb.ScanRegionsRequest") + proto.RegisterType((*ScanRegionsResponse)(nil), "schedulerpb.ScanRegionsResponse") + proto.RegisterType((*GetClusterConfigRequest)(nil), "schedulerpb.GetClusterConfigRequest") + proto.RegisterType((*GetClusterConfigResponse)(nil), "schedulerpb.GetClusterConfigResponse") + proto.RegisterType((*PutClusterConfigRequest)(nil), "schedulerpb.PutClusterConfigRequest") + proto.RegisterType((*PutClusterConfigResponse)(nil), "schedulerpb.PutClusterConfigResponse") + proto.RegisterType((*Member)(nil), "schedulerpb.Member") + proto.RegisterType((*GetMembersRequest)(nil), "schedulerpb.GetMembersRequest") + proto.RegisterType((*GetMembersResponse)(nil), "schedulerpb.GetMembersResponse") + proto.RegisterType((*RegionHeartbeatRequest)(nil), "schedulerpb.RegionHeartbeatRequest") + proto.RegisterType((*ChangePeer)(nil), "schedulerpb.ChangePeer") + proto.RegisterType((*TransferLeader)(nil), "schedulerpb.TransferLeader") + proto.RegisterType((*RegionHeartbeatResponse)(nil), "schedulerpb.RegionHeartbeatResponse") + proto.RegisterType((*AskSplitRequest)(nil), "schedulerpb.AskSplitRequest") + proto.RegisterType((*AskSplitResponse)(nil), "schedulerpb.AskSplitResponse") + proto.RegisterType((*ReportSplitRequest)(nil), "schedulerpb.ReportSplitRequest") + proto.RegisterType((*ReportSplitResponse)(nil), "schedulerpb.ReportSplitResponse") + proto.RegisterType((*SplitID)(nil), "schedulerpb.SplitID") + proto.RegisterType((*TimeInterval)(nil), "schedulerpb.TimeInterval") + proto.RegisterType((*RecordPair)(nil), "schedulerpb.RecordPair") + proto.RegisterType((*StoreStats)(nil), "schedulerpb.StoreStats") + proto.RegisterType((*StoreHeartbeatRequest)(nil), "schedulerpb.StoreHeartbeatRequest") + proto.RegisterType((*StoreHeartbeatResponse)(nil), "schedulerpb.StoreHeartbeatResponse") + proto.RegisterType((*ScatterRegionRequest)(nil), "schedulerpb.ScatterRegionRequest") + proto.RegisterType((*ScatterRegionResponse)(nil), "schedulerpb.ScatterRegionResponse") + proto.RegisterType((*GetGCSafePointRequest)(nil), "schedulerpb.GetGCSafePointRequest") + proto.RegisterType((*GetGCSafePointResponse)(nil), "schedulerpb.GetGCSafePointResponse") + proto.RegisterType((*UpdateGCSafePointRequest)(nil), "schedulerpb.UpdateGCSafePointRequest") + proto.RegisterType((*UpdateGCSafePointResponse)(nil), "schedulerpb.UpdateGCSafePointResponse") + proto.RegisterType((*GetOperatorRequest)(nil), "schedulerpb.GetOperatorRequest") + proto.RegisterType((*GetOperatorResponse)(nil), "schedulerpb.GetOperatorResponse") + proto.RegisterEnum("schedulerpb.ErrorType", ErrorType_name, ErrorType_value) + proto.RegisterEnum("schedulerpb.OperatorStatus", OperatorStatus_name, OperatorStatus_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Scheduler service + +type SchedulerClient interface { + // GetMembers get the member list of this cluster. It does not require + // the cluster_id in request matchs the id of this cluster. + GetMembers(ctx context.Context, in *GetMembersRequest, opts ...grpc.CallOption) (*GetMembersResponse, error) + Tso(ctx context.Context, opts ...grpc.CallOption) (Scheduler_TsoClient, error) + Bootstrap(ctx context.Context, in *BootstrapRequest, opts ...grpc.CallOption) (*BootstrapResponse, error) + IsBootstrapped(ctx context.Context, in *IsBootstrappedRequest, opts ...grpc.CallOption) (*IsBootstrappedResponse, error) + AllocID(ctx context.Context, in *AllocIDRequest, opts ...grpc.CallOption) (*AllocIDResponse, error) + GetStore(ctx context.Context, in *GetStoreRequest, opts ...grpc.CallOption) (*GetStoreResponse, error) + PutStore(ctx context.Context, in *PutStoreRequest, opts ...grpc.CallOption) (*PutStoreResponse, error) + GetAllStores(ctx context.Context, in *GetAllStoresRequest, opts ...grpc.CallOption) (*GetAllStoresResponse, error) + StoreHeartbeat(ctx context.Context, in *StoreHeartbeatRequest, opts ...grpc.CallOption) (*StoreHeartbeatResponse, error) + RegionHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Scheduler_RegionHeartbeatClient, error) + GetRegion(ctx context.Context, in *GetRegionRequest, opts ...grpc.CallOption) (*GetRegionResponse, error) + GetPrevRegion(ctx context.Context, in *GetRegionRequest, opts ...grpc.CallOption) (*GetRegionResponse, error) + GetRegionByID(ctx context.Context, in *GetRegionByIDRequest, opts ...grpc.CallOption) (*GetRegionResponse, error) + ScanRegions(ctx context.Context, in *ScanRegionsRequest, opts ...grpc.CallOption) (*ScanRegionsResponse, error) + AskSplit(ctx context.Context, in *AskSplitRequest, opts ...grpc.CallOption) (*AskSplitResponse, error) + GetClusterConfig(ctx context.Context, in *GetClusterConfigRequest, opts ...grpc.CallOption) (*GetClusterConfigResponse, error) + PutClusterConfig(ctx context.Context, in *PutClusterConfigRequest, opts ...grpc.CallOption) (*PutClusterConfigResponse, error) + ScatterRegion(ctx context.Context, in *ScatterRegionRequest, opts ...grpc.CallOption) (*ScatterRegionResponse, error) + GetGCSafePoint(ctx context.Context, in *GetGCSafePointRequest, opts ...grpc.CallOption) (*GetGCSafePointResponse, error) + UpdateGCSafePoint(ctx context.Context, in *UpdateGCSafePointRequest, opts ...grpc.CallOption) (*UpdateGCSafePointResponse, error) + GetOperator(ctx context.Context, in *GetOperatorRequest, opts ...grpc.CallOption) (*GetOperatorResponse, error) +} + +type schedulerClient struct { + cc *grpc.ClientConn +} + +func NewSchedulerClient(cc *grpc.ClientConn) SchedulerClient { + return &schedulerClient{cc} +} + +func (c *schedulerClient) GetMembers(ctx context.Context, in *GetMembersRequest, opts ...grpc.CallOption) (*GetMembersResponse, error) { + out := new(GetMembersResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/GetMembers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) Tso(ctx context.Context, opts ...grpc.CallOption) (Scheduler_TsoClient, error) { + stream, err := c.cc.NewStream(ctx, &_Scheduler_serviceDesc.Streams[0], "/schedulerpb.Scheduler/Tso", opts...) + if err != nil { + return nil, err + } + x := &schedulerTsoClient{stream} + return x, nil +} + +type Scheduler_TsoClient interface { + Send(*TsoRequest) error + Recv() (*TsoResponse, error) + grpc.ClientStream +} + +type schedulerTsoClient struct { + grpc.ClientStream +} + +func (x *schedulerTsoClient) Send(m *TsoRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *schedulerTsoClient) Recv() (*TsoResponse, error) { + m := new(TsoResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *schedulerClient) Bootstrap(ctx context.Context, in *BootstrapRequest, opts ...grpc.CallOption) (*BootstrapResponse, error) { + out := new(BootstrapResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/Bootstrap", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) IsBootstrapped(ctx context.Context, in *IsBootstrappedRequest, opts ...grpc.CallOption) (*IsBootstrappedResponse, error) { + out := new(IsBootstrappedResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/IsBootstrapped", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) AllocID(ctx context.Context, in *AllocIDRequest, opts ...grpc.CallOption) (*AllocIDResponse, error) { + out := new(AllocIDResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/AllocID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) GetStore(ctx context.Context, in *GetStoreRequest, opts ...grpc.CallOption) (*GetStoreResponse, error) { + out := new(GetStoreResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/GetStore", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) PutStore(ctx context.Context, in *PutStoreRequest, opts ...grpc.CallOption) (*PutStoreResponse, error) { + out := new(PutStoreResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/PutStore", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) GetAllStores(ctx context.Context, in *GetAllStoresRequest, opts ...grpc.CallOption) (*GetAllStoresResponse, error) { + out := new(GetAllStoresResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/GetAllStores", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) StoreHeartbeat(ctx context.Context, in *StoreHeartbeatRequest, opts ...grpc.CallOption) (*StoreHeartbeatResponse, error) { + out := new(StoreHeartbeatResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/StoreHeartbeat", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) RegionHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Scheduler_RegionHeartbeatClient, error) { + stream, err := c.cc.NewStream(ctx, &_Scheduler_serviceDesc.Streams[1], "/schedulerpb.Scheduler/RegionHeartbeat", opts...) + if err != nil { + return nil, err + } + x := &schedulerRegionHeartbeatClient{stream} + return x, nil +} + +type Scheduler_RegionHeartbeatClient interface { + Send(*RegionHeartbeatRequest) error + Recv() (*RegionHeartbeatResponse, error) + grpc.ClientStream +} + +type schedulerRegionHeartbeatClient struct { + grpc.ClientStream +} + +func (x *schedulerRegionHeartbeatClient) Send(m *RegionHeartbeatRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *schedulerRegionHeartbeatClient) Recv() (*RegionHeartbeatResponse, error) { + m := new(RegionHeartbeatResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *schedulerClient) GetRegion(ctx context.Context, in *GetRegionRequest, opts ...grpc.CallOption) (*GetRegionResponse, error) { + out := new(GetRegionResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/GetRegion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) GetPrevRegion(ctx context.Context, in *GetRegionRequest, opts ...grpc.CallOption) (*GetRegionResponse, error) { + out := new(GetRegionResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/GetPrevRegion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) GetRegionByID(ctx context.Context, in *GetRegionByIDRequest, opts ...grpc.CallOption) (*GetRegionResponse, error) { + out := new(GetRegionResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/GetRegionByID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) ScanRegions(ctx context.Context, in *ScanRegionsRequest, opts ...grpc.CallOption) (*ScanRegionsResponse, error) { + out := new(ScanRegionsResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/ScanRegions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) AskSplit(ctx context.Context, in *AskSplitRequest, opts ...grpc.CallOption) (*AskSplitResponse, error) { + out := new(AskSplitResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/AskSplit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) GetClusterConfig(ctx context.Context, in *GetClusterConfigRequest, opts ...grpc.CallOption) (*GetClusterConfigResponse, error) { + out := new(GetClusterConfigResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/GetClusterConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) PutClusterConfig(ctx context.Context, in *PutClusterConfigRequest, opts ...grpc.CallOption) (*PutClusterConfigResponse, error) { + out := new(PutClusterConfigResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/PutClusterConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) ScatterRegion(ctx context.Context, in *ScatterRegionRequest, opts ...grpc.CallOption) (*ScatterRegionResponse, error) { + out := new(ScatterRegionResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/ScatterRegion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) GetGCSafePoint(ctx context.Context, in *GetGCSafePointRequest, opts ...grpc.CallOption) (*GetGCSafePointResponse, error) { + out := new(GetGCSafePointResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/GetGCSafePoint", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) UpdateGCSafePoint(ctx context.Context, in *UpdateGCSafePointRequest, opts ...grpc.CallOption) (*UpdateGCSafePointResponse, error) { + out := new(UpdateGCSafePointResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/UpdateGCSafePoint", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerClient) GetOperator(ctx context.Context, in *GetOperatorRequest, opts ...grpc.CallOption) (*GetOperatorResponse, error) { + out := new(GetOperatorResponse) + err := c.cc.Invoke(ctx, "/schedulerpb.Scheduler/GetOperator", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Scheduler service + +type SchedulerServer interface { + // GetMembers get the member list of this cluster. It does not require + // the cluster_id in request matchs the id of this cluster. + GetMembers(context.Context, *GetMembersRequest) (*GetMembersResponse, error) + Tso(Scheduler_TsoServer) error + Bootstrap(context.Context, *BootstrapRequest) (*BootstrapResponse, error) + IsBootstrapped(context.Context, *IsBootstrappedRequest) (*IsBootstrappedResponse, error) + AllocID(context.Context, *AllocIDRequest) (*AllocIDResponse, error) + GetStore(context.Context, *GetStoreRequest) (*GetStoreResponse, error) + PutStore(context.Context, *PutStoreRequest) (*PutStoreResponse, error) + GetAllStores(context.Context, *GetAllStoresRequest) (*GetAllStoresResponse, error) + StoreHeartbeat(context.Context, *StoreHeartbeatRequest) (*StoreHeartbeatResponse, error) + RegionHeartbeat(Scheduler_RegionHeartbeatServer) error + GetRegion(context.Context, *GetRegionRequest) (*GetRegionResponse, error) + GetPrevRegion(context.Context, *GetRegionRequest) (*GetRegionResponse, error) + GetRegionByID(context.Context, *GetRegionByIDRequest) (*GetRegionResponse, error) + ScanRegions(context.Context, *ScanRegionsRequest) (*ScanRegionsResponse, error) + AskSplit(context.Context, *AskSplitRequest) (*AskSplitResponse, error) + GetClusterConfig(context.Context, *GetClusterConfigRequest) (*GetClusterConfigResponse, error) + PutClusterConfig(context.Context, *PutClusterConfigRequest) (*PutClusterConfigResponse, error) + ScatterRegion(context.Context, *ScatterRegionRequest) (*ScatterRegionResponse, error) + GetGCSafePoint(context.Context, *GetGCSafePointRequest) (*GetGCSafePointResponse, error) + UpdateGCSafePoint(context.Context, *UpdateGCSafePointRequest) (*UpdateGCSafePointResponse, error) + GetOperator(context.Context, *GetOperatorRequest) (*GetOperatorResponse, error) +} + +func RegisterSchedulerServer(s *grpc.Server, srv SchedulerServer) { + s.RegisterService(&_Scheduler_serviceDesc, srv) +} + +func _Scheduler_GetMembers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMembersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).GetMembers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/GetMembers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).GetMembers(ctx, req.(*GetMembersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_Tso_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SchedulerServer).Tso(&schedulerTsoServer{stream}) +} + +type Scheduler_TsoServer interface { + Send(*TsoResponse) error + Recv() (*TsoRequest, error) + grpc.ServerStream +} + +type schedulerTsoServer struct { + grpc.ServerStream +} + +func (x *schedulerTsoServer) Send(m *TsoResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *schedulerTsoServer) Recv() (*TsoRequest, error) { + m := new(TsoRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Scheduler_Bootstrap_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BootstrapRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).Bootstrap(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/Bootstrap", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).Bootstrap(ctx, req.(*BootstrapRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_IsBootstrapped_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IsBootstrappedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).IsBootstrapped(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/IsBootstrapped", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).IsBootstrapped(ctx, req.(*IsBootstrappedRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_AllocID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AllocIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).AllocID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/AllocID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).AllocID(ctx, req.(*AllocIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_GetStore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetStoreRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).GetStore(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/GetStore", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).GetStore(ctx, req.(*GetStoreRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_PutStore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PutStoreRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).PutStore(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/PutStore", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).PutStore(ctx, req.(*PutStoreRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_GetAllStores_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAllStoresRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).GetAllStores(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/GetAllStores", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).GetAllStores(ctx, req.(*GetAllStoresRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_StoreHeartbeat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StoreHeartbeatRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).StoreHeartbeat(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/StoreHeartbeat", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).StoreHeartbeat(ctx, req.(*StoreHeartbeatRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_RegionHeartbeat_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SchedulerServer).RegionHeartbeat(&schedulerRegionHeartbeatServer{stream}) +} + +type Scheduler_RegionHeartbeatServer interface { + Send(*RegionHeartbeatResponse) error + Recv() (*RegionHeartbeatRequest, error) + grpc.ServerStream +} + +type schedulerRegionHeartbeatServer struct { + grpc.ServerStream +} + +func (x *schedulerRegionHeartbeatServer) Send(m *RegionHeartbeatResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *schedulerRegionHeartbeatServer) Recv() (*RegionHeartbeatRequest, error) { + m := new(RegionHeartbeatRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Scheduler_GetRegion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRegionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).GetRegion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/GetRegion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).GetRegion(ctx, req.(*GetRegionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_GetPrevRegion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRegionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).GetPrevRegion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/GetPrevRegion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).GetPrevRegion(ctx, req.(*GetRegionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_GetRegionByID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRegionByIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).GetRegionByID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/GetRegionByID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).GetRegionByID(ctx, req.(*GetRegionByIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_ScanRegions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ScanRegionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).ScanRegions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/ScanRegions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).ScanRegions(ctx, req.(*ScanRegionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_AskSplit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AskSplitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).AskSplit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/AskSplit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).AskSplit(ctx, req.(*AskSplitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_GetClusterConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).GetClusterConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/GetClusterConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).GetClusterConfig(ctx, req.(*GetClusterConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_PutClusterConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PutClusterConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).PutClusterConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/PutClusterConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).PutClusterConfig(ctx, req.(*PutClusterConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_ScatterRegion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ScatterRegionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).ScatterRegion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/ScatterRegion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).ScatterRegion(ctx, req.(*ScatterRegionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_GetGCSafePoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetGCSafePointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).GetGCSafePoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/GetGCSafePoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).GetGCSafePoint(ctx, req.(*GetGCSafePointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_UpdateGCSafePoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateGCSafePointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).UpdateGCSafePoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/UpdateGCSafePoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).UpdateGCSafePoint(ctx, req.(*UpdateGCSafePointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Scheduler_GetOperator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOperatorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServer).GetOperator(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/schedulerpb.Scheduler/GetOperator", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServer).GetOperator(ctx, req.(*GetOperatorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Scheduler_serviceDesc = grpc.ServiceDesc{ + ServiceName: "schedulerpb.Scheduler", + HandlerType: (*SchedulerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetMembers", + Handler: _Scheduler_GetMembers_Handler, + }, + { + MethodName: "Bootstrap", + Handler: _Scheduler_Bootstrap_Handler, + }, + { + MethodName: "IsBootstrapped", + Handler: _Scheduler_IsBootstrapped_Handler, + }, + { + MethodName: "AllocID", + Handler: _Scheduler_AllocID_Handler, + }, + { + MethodName: "GetStore", + Handler: _Scheduler_GetStore_Handler, + }, + { + MethodName: "PutStore", + Handler: _Scheduler_PutStore_Handler, + }, + { + MethodName: "GetAllStores", + Handler: _Scheduler_GetAllStores_Handler, + }, + { + MethodName: "StoreHeartbeat", + Handler: _Scheduler_StoreHeartbeat_Handler, + }, + { + MethodName: "GetRegion", + Handler: _Scheduler_GetRegion_Handler, + }, + { + MethodName: "GetPrevRegion", + Handler: _Scheduler_GetPrevRegion_Handler, + }, + { + MethodName: "GetRegionByID", + Handler: _Scheduler_GetRegionByID_Handler, + }, + { + MethodName: "ScanRegions", + Handler: _Scheduler_ScanRegions_Handler, + }, + { + MethodName: "AskSplit", + Handler: _Scheduler_AskSplit_Handler, + }, + { + MethodName: "GetClusterConfig", + Handler: _Scheduler_GetClusterConfig_Handler, + }, + { + MethodName: "PutClusterConfig", + Handler: _Scheduler_PutClusterConfig_Handler, + }, + { + MethodName: "ScatterRegion", + Handler: _Scheduler_ScatterRegion_Handler, + }, + { + MethodName: "GetGCSafePoint", + Handler: _Scheduler_GetGCSafePoint_Handler, + }, + { + MethodName: "UpdateGCSafePoint", + Handler: _Scheduler_UpdateGCSafePoint_Handler, + }, + { + MethodName: "GetOperator", + Handler: _Scheduler_GetOperator_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Tso", + Handler: _Scheduler_Tso_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "RegionHeartbeat", + Handler: _Scheduler_RegionHeartbeat_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "schedulerpb.proto", +} + +func (m *RequestHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ClusterId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.ClusterId)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ResponseHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ClusterId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.ClusterId)) + } + if m.Error != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Error.Size())) + n1, err := m.Error.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Error) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Error) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Type)) + } + if len(m.Message) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *TsoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TsoRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n2, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.Count != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Count)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Timestamp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Physical != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Physical)) + } + if m.Logical != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Logical)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *TsoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TsoResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n3, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Count != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Count)) + } + if m.Timestamp != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Timestamp.Size())) + n4, err := m.Timestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *BootstrapRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BootstrapRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n5, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.Store != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Store.Size())) + n6, err := m.Store.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *BootstrapResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BootstrapResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n7, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *IsBootstrappedRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IsBootstrappedRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n8, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *IsBootstrappedResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IsBootstrappedResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n9, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.Bootstrapped { + dAtA[i] = 0x10 + i++ + if m.Bootstrapped { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *AllocIDRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocIDRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n10, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *AllocIDResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocIDResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n11, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.Id != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Id)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetStoreRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetStoreRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n12, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.StoreId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.StoreId)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetStoreResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetStoreResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n13, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.Store != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Store.Size())) + n14, err := m.Store.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.Stats != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Stats.Size())) + n15, err := m.Stats.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *PutStoreRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PutStoreRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n16, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.Store != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Store.Size())) + n17, err := m.Store.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *PutStoreResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PutStoreResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n18, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetAllStoresRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllStoresRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n19, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.ExcludeTombstoneStores { + dAtA[i] = 0x10 + i++ + if m.ExcludeTombstoneStores { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetAllStoresResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllStoresResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n20, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if len(m.Stores) > 0 { + for _, msg := range m.Stores { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetRegionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetRegionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n21, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if len(m.RegionKey) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(len(m.RegionKey))) + i += copy(dAtA[i:], m.RegionKey) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetRegionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetRegionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n22, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if m.Region != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Region.Size())) + n23, err := m.Region.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.Leader != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Leader.Size())) + n24, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if len(m.Slaves) > 0 { + for _, msg := range m.Slaves { + dAtA[i] = 0x22 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetRegionByIDRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetRegionByIDRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n25, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + if m.RegionId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.RegionId)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ScanRegionsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScanRegionsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n26, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if len(m.StartKey) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(len(m.StartKey))) + i += copy(dAtA[i:], m.StartKey) + } + if m.Limit != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Limit)) + } + if len(m.EndKey) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(len(m.EndKey))) + i += copy(dAtA[i:], m.EndKey) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ScanRegionsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScanRegionsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n27, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if len(m.Regions) > 0 { + for _, msg := range m.Regions { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Leaders) > 0 { + for _, msg := range m.Leaders { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetClusterConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetClusterConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n28, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetClusterConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetClusterConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n29, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + if m.Cluster != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Cluster.Size())) + n30, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *PutClusterConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PutClusterConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n31, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if m.Cluster != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Cluster.Size())) + n32, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *PutClusterConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PutClusterConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n33, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Member) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Member) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.MemberId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.MemberId)) + } + if len(m.PeerUrls) > 0 { + for _, s := range m.PeerUrls { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ClientUrls) > 0 { + for _, s := range m.ClientUrls { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.LeaderPriority != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.LeaderPriority)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetMembersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetMembersRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n34, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetMembersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetMembersResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n35, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Leader != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Leader.Size())) + n36, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + } + if m.EtcdLeader != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.EtcdLeader.Size())) + n37, err := m.EtcdLeader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RegionHeartbeatRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegionHeartbeatRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n38, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + if m.Region != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Region.Size())) + n39, err := m.Region.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + if m.Leader != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Leader.Size())) + n40, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + } + if len(m.PendingPeers) > 0 { + for _, msg := range m.PendingPeers { + dAtA[i] = 0x2a + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.ApproximateSize != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.ApproximateSize)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ChangePeer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChangePeer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Peer != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Peer.Size())) + n41, err := m.Peer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + if m.ChangeType != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.ChangeType)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *TransferLeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransferLeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Peer != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Peer.Size())) + n42, err := m.Peer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RegionHeartbeatResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegionHeartbeatResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n43, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + } + if m.ChangePeer != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.ChangePeer.Size())) + n44, err := m.ChangePeer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + } + if m.TransferLeader != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.TransferLeader.Size())) + n45, err := m.TransferLeader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + } + if m.RegionId != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.RegionId)) + } + if m.RegionEpoch != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.RegionEpoch.Size())) + n46, err := m.RegionEpoch.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 + } + if m.TargetPeer != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.TargetPeer.Size())) + n47, err := m.TargetPeer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *AskSplitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AskSplitRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n48, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + } + if m.Region != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Region.Size())) + n49, err := m.Region.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *AskSplitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AskSplitResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n50, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + } + if m.NewRegionId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.NewRegionId)) + } + if len(m.NewPeerIds) > 0 { + dAtA52 := make([]byte, len(m.NewPeerIds)*10) + var j51 int + for _, num := range m.NewPeerIds { + for num >= 1<<7 { + dAtA52[j51] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j51++ + } + dAtA52[j51] = uint8(num) + j51++ + } + dAtA[i] = 0x1a + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(j51)) + i += copy(dAtA[i:], dAtA52[:j51]) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ReportSplitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReportSplitRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n53, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 + } + if m.Left != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Left.Size())) + n54, err := m.Left.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n54 + } + if m.Right != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Right.Size())) + n55, err := m.Right.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n55 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ReportSplitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReportSplitResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n56, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n56 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SplitID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SplitID) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NewRegionId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.NewRegionId)) + } + if len(m.NewPeerIds) > 0 { + dAtA58 := make([]byte, len(m.NewPeerIds)*10) + var j57 int + for _, num := range m.NewPeerIds { + for num >= 1<<7 { + dAtA58[j57] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j57++ + } + dAtA58[j57] = uint8(num) + j57++ + } + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(j57)) + i += copy(dAtA[i:], dAtA58[:j57]) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *TimeInterval) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeInterval) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.StartTimestamp != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.StartTimestamp)) + } + if m.EndTimestamp != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.EndTimestamp)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RecordPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RecordPair) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.Value != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *StoreStats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreStats) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.StoreId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.StoreId)) + } + if m.Capacity != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Capacity)) + } + if m.Available != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Available)) + } + if m.RegionCount != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.RegionCount)) + } + if m.SendingSnapCount != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.SendingSnapCount)) + } + if m.ReceivingSnapCount != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.ReceivingSnapCount)) + } + if m.StartTime != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.StartTime)) + } + if m.ApplyingSnapCount != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.ApplyingSnapCount)) + } + if m.IsBusy { + dAtA[i] = 0x48 + i++ + if m.IsBusy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.UsedSize != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.UsedSize)) + } + if m.Interval != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Interval.Size())) + n59, err := m.Interval.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n59 + } + if len(m.CpuUsages) > 0 { + for _, msg := range m.CpuUsages { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.ReadIoRates) > 0 { + for _, msg := range m.ReadIoRates { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.WriteIoRates) > 0 { + for _, msg := range m.WriteIoRates { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.OpLatencies) > 0 { + for _, msg := range m.OpLatencies { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *StoreHeartbeatRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreHeartbeatRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n60, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n60 + } + if m.Stats != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Stats.Size())) + n61, err := m.Stats.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n61 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *StoreHeartbeatResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreHeartbeatResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n62, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n62 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ScatterRegionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScatterRegionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n63, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n63 + } + if m.RegionId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.RegionId)) + } + if m.Region != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Region.Size())) + n64, err := m.Region.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n64 + } + if m.Leader != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Leader.Size())) + n65, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n65 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ScatterRegionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScatterRegionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n66, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n66 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetGCSafePointRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetGCSafePointRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n67, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n67 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetGCSafePointResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetGCSafePointResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n68, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n68 + } + if m.SafePoint != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.SafePoint)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *UpdateGCSafePointRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateGCSafePointRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n69, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n69 + } + if m.SafePoint != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.SafePoint)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *UpdateGCSafePointResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateGCSafePointResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n70, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n70 + } + if m.NewSafePoint != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.NewSafePoint)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetOperatorRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetOperatorRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n71, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n71 + } + if m.RegionId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.RegionId)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetOperatorResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetOperatorResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Header.Size())) + n72, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n72 + } + if m.RegionId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.RegionId)) + } + if len(m.Desc) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(len(m.Desc))) + i += copy(dAtA[i:], m.Desc) + } + if m.Status != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(m.Status)) + } + if len(m.Kind) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintSchedulerpb(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintSchedulerpb(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *RequestHeader) Size() (n int) { + var l int + _ = l + if m.ClusterId != 0 { + n += 1 + sovSchedulerpb(uint64(m.ClusterId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResponseHeader) Size() (n int) { + var l int + _ = l + if m.ClusterId != 0 { + n += 1 + sovSchedulerpb(uint64(m.ClusterId)) + } + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Error) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovSchedulerpb(uint64(m.Type)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TsoRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Count != 0 { + n += 1 + sovSchedulerpb(uint64(m.Count)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Timestamp) Size() (n int) { + var l int + _ = l + if m.Physical != 0 { + n += 1 + sovSchedulerpb(uint64(m.Physical)) + } + if m.Logical != 0 { + n += 1 + sovSchedulerpb(uint64(m.Logical)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TsoResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Count != 0 { + n += 1 + sovSchedulerpb(uint64(m.Count)) + } + if m.Timestamp != nil { + l = m.Timestamp.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BootstrapRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Store != nil { + l = m.Store.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BootstrapResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *IsBootstrappedRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *IsBootstrappedResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Bootstrapped { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AllocIDRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AllocIDResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Id != 0 { + n += 1 + sovSchedulerpb(uint64(m.Id)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetStoreRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.StoreId != 0 { + n += 1 + sovSchedulerpb(uint64(m.StoreId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetStoreResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Store != nil { + l = m.Store.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Stats != nil { + l = m.Stats.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PutStoreRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Store != nil { + l = m.Store.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PutStoreResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetAllStoresRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.ExcludeTombstoneStores { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetAllStoresResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if len(m.Stores) > 0 { + for _, e := range m.Stores { + l = e.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetRegionRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + l = len(m.RegionKey) + if l > 0 { + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetRegionResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Region != nil { + l = m.Region.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Leader != nil { + l = m.Leader.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if len(m.Slaves) > 0 { + for _, e := range m.Slaves { + l = e.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetRegionByIDRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.RegionId != 0 { + n += 1 + sovSchedulerpb(uint64(m.RegionId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ScanRegionsRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + l = len(m.StartKey) + if l > 0 { + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sovSchedulerpb(uint64(m.Limit)) + } + l = len(m.EndKey) + if l > 0 { + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ScanRegionsResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if len(m.Regions) > 0 { + for _, e := range m.Regions { + l = e.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + } + if len(m.Leaders) > 0 { + for _, e := range m.Leaders { + l = e.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetClusterConfigRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetClusterConfigResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PutClusterConfigRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PutClusterConfigResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Member) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.MemberId != 0 { + n += 1 + sovSchedulerpb(uint64(m.MemberId)) + } + if len(m.PeerUrls) > 0 { + for _, s := range m.PeerUrls { + l = len(s) + n += 1 + l + sovSchedulerpb(uint64(l)) + } + } + if len(m.ClientUrls) > 0 { + for _, s := range m.ClientUrls { + l = len(s) + n += 1 + l + sovSchedulerpb(uint64(l)) + } + } + if m.LeaderPriority != 0 { + n += 1 + sovSchedulerpb(uint64(m.LeaderPriority)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetMembersRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetMembersResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + } + if m.Leader != nil { + l = m.Leader.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.EtcdLeader != nil { + l = m.EtcdLeader.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RegionHeartbeatRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Region != nil { + l = m.Region.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Leader != nil { + l = m.Leader.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if len(m.PendingPeers) > 0 { + for _, e := range m.PendingPeers { + l = e.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + } + if m.ApproximateSize != 0 { + n += 1 + sovSchedulerpb(uint64(m.ApproximateSize)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ChangePeer) Size() (n int) { + var l int + _ = l + if m.Peer != nil { + l = m.Peer.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.ChangeType != 0 { + n += 1 + sovSchedulerpb(uint64(m.ChangeType)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TransferLeader) Size() (n int) { + var l int + _ = l + if m.Peer != nil { + l = m.Peer.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RegionHeartbeatResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.ChangePeer != nil { + l = m.ChangePeer.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.TransferLeader != nil { + l = m.TransferLeader.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.RegionId != 0 { + n += 1 + sovSchedulerpb(uint64(m.RegionId)) + } + if m.RegionEpoch != nil { + l = m.RegionEpoch.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.TargetPeer != nil { + l = m.TargetPeer.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AskSplitRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Region != nil { + l = m.Region.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AskSplitResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.NewRegionId != 0 { + n += 1 + sovSchedulerpb(uint64(m.NewRegionId)) + } + if len(m.NewPeerIds) > 0 { + l = 0 + for _, e := range m.NewPeerIds { + l += sovSchedulerpb(uint64(e)) + } + n += 1 + sovSchedulerpb(uint64(l)) + l + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReportSplitRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Left != nil { + l = m.Left.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Right != nil { + l = m.Right.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReportSplitResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SplitID) Size() (n int) { + var l int + _ = l + if m.NewRegionId != 0 { + n += 1 + sovSchedulerpb(uint64(m.NewRegionId)) + } + if len(m.NewPeerIds) > 0 { + l = 0 + for _, e := range m.NewPeerIds { + l += sovSchedulerpb(uint64(e)) + } + n += 1 + sovSchedulerpb(uint64(l)) + l + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TimeInterval) Size() (n int) { + var l int + _ = l + if m.StartTimestamp != 0 { + n += 1 + sovSchedulerpb(uint64(m.StartTimestamp)) + } + if m.EndTimestamp != 0 { + n += 1 + sovSchedulerpb(uint64(m.EndTimestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RecordPair) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Value != 0 { + n += 1 + sovSchedulerpb(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StoreStats) Size() (n int) { + var l int + _ = l + if m.StoreId != 0 { + n += 1 + sovSchedulerpb(uint64(m.StoreId)) + } + if m.Capacity != 0 { + n += 1 + sovSchedulerpb(uint64(m.Capacity)) + } + if m.Available != 0 { + n += 1 + sovSchedulerpb(uint64(m.Available)) + } + if m.RegionCount != 0 { + n += 1 + sovSchedulerpb(uint64(m.RegionCount)) + } + if m.SendingSnapCount != 0 { + n += 1 + sovSchedulerpb(uint64(m.SendingSnapCount)) + } + if m.ReceivingSnapCount != 0 { + n += 1 + sovSchedulerpb(uint64(m.ReceivingSnapCount)) + } + if m.StartTime != 0 { + n += 1 + sovSchedulerpb(uint64(m.StartTime)) + } + if m.ApplyingSnapCount != 0 { + n += 1 + sovSchedulerpb(uint64(m.ApplyingSnapCount)) + } + if m.IsBusy { + n += 2 + } + if m.UsedSize != 0 { + n += 1 + sovSchedulerpb(uint64(m.UsedSize)) + } + if m.Interval != nil { + l = m.Interval.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if len(m.CpuUsages) > 0 { + for _, e := range m.CpuUsages { + l = e.Size() + n += 2 + l + sovSchedulerpb(uint64(l)) + } + } + if len(m.ReadIoRates) > 0 { + for _, e := range m.ReadIoRates { + l = e.Size() + n += 2 + l + sovSchedulerpb(uint64(l)) + } + } + if len(m.WriteIoRates) > 0 { + for _, e := range m.WriteIoRates { + l = e.Size() + n += 2 + l + sovSchedulerpb(uint64(l)) + } + } + if len(m.OpLatencies) > 0 { + for _, e := range m.OpLatencies { + l = e.Size() + n += 2 + l + sovSchedulerpb(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StoreHeartbeatRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Stats != nil { + l = m.Stats.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StoreHeartbeatResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ScatterRegionRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.RegionId != 0 { + n += 1 + sovSchedulerpb(uint64(m.RegionId)) + } + if m.Region != nil { + l = m.Region.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Leader != nil { + l = m.Leader.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ScatterRegionResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetGCSafePointRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetGCSafePointResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.SafePoint != 0 { + n += 1 + sovSchedulerpb(uint64(m.SafePoint)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UpdateGCSafePointRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.SafePoint != 0 { + n += 1 + sovSchedulerpb(uint64(m.SafePoint)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UpdateGCSafePointResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.NewSafePoint != 0 { + n += 1 + sovSchedulerpb(uint64(m.NewSafePoint)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetOperatorRequest) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.RegionId != 0 { + n += 1 + sovSchedulerpb(uint64(m.RegionId)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetOperatorResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.RegionId != 0 { + n += 1 + sovSchedulerpb(uint64(m.RegionId)) + } + l = len(m.Desc) + if l > 0 { + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.Status != 0 { + n += 1 + sovSchedulerpb(uint64(m.Status)) + } + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovSchedulerpb(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovSchedulerpb(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozSchedulerpb(x uint64) (n int) { + return sovSchedulerpb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RequestHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + } + m.ClusterId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClusterId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + } + m.ClusterId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClusterId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &Error{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Error) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Error: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Error: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ErrorType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TsoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TsoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TsoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Timestamp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType) + } + m.Physical = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Physical |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Logical", wireType) + } + m.Logical = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Logical |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TsoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TsoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TsoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timestamp == nil { + m.Timestamp = &Timestamp{} + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BootstrapRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BootstrapRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BootstrapRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Store", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Store == nil { + m.Store = &metapb.Store{} + } + if err := m.Store.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BootstrapResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BootstrapResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BootstrapResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IsBootstrappedRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IsBootstrappedRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IsBootstrappedRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IsBootstrappedResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IsBootstrappedResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IsBootstrappedResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Bootstrapped", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Bootstrapped = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllocIDRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocIDRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocIDRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllocIDResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocIDResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocIDResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetStoreRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetStoreRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetStoreRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StoreId", wireType) + } + m.StoreId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StoreId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetStoreResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetStoreResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetStoreResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Store", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Store == nil { + m.Store = &metapb.Store{} + } + if err := m.Store.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Stats == nil { + m.Stats = &StoreStats{} + } + if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PutStoreRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutStoreRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutStoreRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Store", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Store == nil { + m.Store = &metapb.Store{} + } + if err := m.Store.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PutStoreResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutStoreResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutStoreResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllStoresRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllStoresRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllStoresRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTombstoneStores", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ExcludeTombstoneStores = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllStoresResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllStoresResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllStoresResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stores", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stores = append(m.Stores, &metapb.Store{}) + if err := m.Stores[len(m.Stores)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRegionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRegionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRegionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RegionKey = append(m.RegionKey[:0], dAtA[iNdEx:postIndex]...) + if m.RegionKey == nil { + m.RegionKey = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRegionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRegionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRegionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Region == nil { + m.Region = &metapb.Region{} + } + if err := m.Region.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leader == nil { + m.Leader = &metapb.Peer{} + } + if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Slaves", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Slaves = append(m.Slaves, &metapb.Peer{}) + if err := m.Slaves[len(m.Slaves)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRegionByIDRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRegionByIDRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRegionByIDRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType) + } + m.RegionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RegionId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScanRegionsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScanRegionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScanRegionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...) + if m.StartKey == nil { + m.StartKey = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...) + if m.EndKey == nil { + m.EndKey = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScanRegionsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScanRegionsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScanRegionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Regions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Regions = append(m.Regions, &metapb.Region{}) + if err := m.Regions[len(m.Regions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Leaders = append(m.Leaders, &metapb.Peer{}) + if err := m.Leaders[len(m.Leaders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClusterConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClusterConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClusterConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClusterConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClusterConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClusterConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &metapb.Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PutClusterConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutClusterConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutClusterConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &metapb.Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PutClusterConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutClusterConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutClusterConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Member) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Member: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType) + } + m.MemberId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemberId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerUrls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerUrls = append(m.PeerUrls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientUrls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientUrls = append(m.ClientUrls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaderPriority", wireType) + } + m.LeaderPriority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LeaderPriority |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetMembersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetMembersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetMembersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetMembersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetMembersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetMembersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leader == nil { + m.Leader = &Member{} + } + if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EtcdLeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EtcdLeader == nil { + m.EtcdLeader = &Member{} + } + if err := m.EtcdLeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RegionHeartbeatRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RegionHeartbeatRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RegionHeartbeatRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Region == nil { + m.Region = &metapb.Region{} + } + if err := m.Region.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leader == nil { + m.Leader = &metapb.Peer{} + } + if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PendingPeers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PendingPeers = append(m.PendingPeers, &metapb.Peer{}) + if err := m.PendingPeers[len(m.PendingPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ApproximateSize", wireType) + } + m.ApproximateSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ApproximateSize |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChangePeer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChangePeer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChangePeer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Peer == nil { + m.Peer = &metapb.Peer{} + } + if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ChangeType", wireType) + } + m.ChangeType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ChangeType |= (eraftpb.ConfChangeType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TransferLeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TransferLeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TransferLeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Peer == nil { + m.Peer = &metapb.Peer{} + } + if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RegionHeartbeatResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RegionHeartbeatResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RegionHeartbeatResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChangePeer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ChangePeer == nil { + m.ChangePeer = &ChangePeer{} + } + if err := m.ChangePeer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TransferLeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TransferLeader == nil { + m.TransferLeader = &TransferLeader{} + } + if err := m.TransferLeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType) + } + m.RegionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RegionId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionEpoch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RegionEpoch == nil { + m.RegionEpoch = &metapb.RegionEpoch{} + } + if err := m.RegionEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPeer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetPeer == nil { + m.TargetPeer = &metapb.Peer{} + } + if err := m.TargetPeer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AskSplitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AskSplitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AskSplitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Region == nil { + m.Region = &metapb.Region{} + } + if err := m.Region.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AskSplitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AskSplitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AskSplitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NewRegionId", wireType) + } + m.NewRegionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NewRegionId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NewPeerIds = append(m.NewPeerIds, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NewPeerIds = append(m.NewPeerIds, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NewPeerIds", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReportSplitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReportSplitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReportSplitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Left", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Left == nil { + m.Left = &metapb.Region{} + } + if err := m.Left.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Right", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Right == nil { + m.Right = &metapb.Region{} + } + if err := m.Right.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReportSplitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReportSplitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReportSplitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SplitID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SplitID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SplitID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NewRegionId", wireType) + } + m.NewRegionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NewRegionId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NewPeerIds = append(m.NewPeerIds, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NewPeerIds = append(m.NewPeerIds, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NewPeerIds", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeInterval) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeInterval: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeInterval: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestamp", wireType) + } + m.StartTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimestamp |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimestamp", wireType) + } + m.EndTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTimestamp |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RecordPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RecordPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RecordPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreStats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StoreId", wireType) + } + m.StoreId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StoreId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + m.Capacity = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Capacity |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Available", wireType) + } + m.Available = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Available |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionCount", wireType) + } + m.RegionCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RegionCount |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SendingSnapCount", wireType) + } + m.SendingSnapCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SendingSnapCount |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReceivingSnapCount", wireType) + } + m.ReceivingSnapCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReceivingSnapCount |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + m.StartTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTime |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplyingSnapCount", wireType) + } + m.ApplyingSnapCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ApplyingSnapCount |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsBusy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IsBusy = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UsedSize", wireType) + } + m.UsedSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UsedSize |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Interval == nil { + m.Interval = &TimeInterval{} + } + if err := m.Interval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CpuUsages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CpuUsages = append(m.CpuUsages, &RecordPair{}) + if err := m.CpuUsages[len(m.CpuUsages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadIoRates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReadIoRates = append(m.ReadIoRates, &RecordPair{}) + if err := m.ReadIoRates[len(m.ReadIoRates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WriteIoRates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WriteIoRates = append(m.WriteIoRates, &RecordPair{}) + if err := m.WriteIoRates[len(m.WriteIoRates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OpLatencies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OpLatencies = append(m.OpLatencies, &RecordPair{}) + if err := m.OpLatencies[len(m.OpLatencies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreHeartbeatRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreHeartbeatRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreHeartbeatRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Stats == nil { + m.Stats = &StoreStats{} + } + if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreHeartbeatResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreHeartbeatResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreHeartbeatResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScatterRegionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScatterRegionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScatterRegionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType) + } + m.RegionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RegionId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Region == nil { + m.Region = &metapb.Region{} + } + if err := m.Region.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leader == nil { + m.Leader = &metapb.Peer{} + } + if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScatterRegionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScatterRegionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScatterRegionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetGCSafePointRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetGCSafePointRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetGCSafePointRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetGCSafePointResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetGCSafePointResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetGCSafePointResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SafePoint", wireType) + } + m.SafePoint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SafePoint |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateGCSafePointRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateGCSafePointRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateGCSafePointRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SafePoint", wireType) + } + m.SafePoint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SafePoint |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateGCSafePointResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateGCSafePointResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateGCSafePointResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NewSafePoint", wireType) + } + m.NewSafePoint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NewSafePoint |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetOperatorRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetOperatorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetOperatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType) + } + m.RegionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RegionId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetOperatorResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetOperatorResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetOperatorResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType) + } + m.RegionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RegionId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Desc", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Desc = append(m.Desc[:0], dAtA[iNdEx:postIndex]...) + if m.Desc == nil { + m.Desc = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= (OperatorStatus(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSchedulerpb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = append(m.Kind[:0], dAtA[iNdEx:postIndex]...) + if m.Kind == nil { + m.Kind = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSchedulerpb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSchedulerpb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSchedulerpb(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthSchedulerpb + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSchedulerpb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSchedulerpb(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSchedulerpb = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSchedulerpb = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("schedulerpb.proto", fileDescriptor_schedulerpb_4e333137f5959f12) } + +var fileDescriptor_schedulerpb_4e333137f5959f12 = []byte{ + // 2343 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5a, 0xcf, 0x6f, 0x23, 0x49, + 0xf5, 0x4f, 0x3b, 0x8e, 0x63, 0x3f, 0x3b, 0xb6, 0x53, 0xc9, 0x26, 0x5e, 0xef, 0x4c, 0x36, 0x53, + 0x33, 0x3b, 0xdf, 0xd9, 0xf9, 0x32, 0x61, 0xc9, 0x0e, 0xab, 0x15, 0x08, 0xa4, 0xc4, 0xf1, 0x66, + 0xcc, 0x24, 0xb6, 0xd5, 0x76, 0x06, 0x56, 0x20, 0x35, 0x1d, 0x77, 0xc5, 0x69, 0xa6, 0xdd, 0xdd, + 0xdb, 0x55, 0xce, 0x8c, 0xe7, 0xca, 0x89, 0x03, 0x08, 0x21, 0x90, 0x90, 0xe0, 0xc0, 0x3f, 0xc1, + 0x8d, 0x23, 0x07, 0x8e, 0x88, 0x2b, 0x17, 0x34, 0xfc, 0x13, 0x1c, 0x51, 0x55, 0x75, 0xb7, 0xbb, + 0xdb, 0x76, 0x12, 0xd4, 0x03, 0x37, 0x57, 0xbd, 0x4f, 0xbd, 0xdf, 0x55, 0xf5, 0xea, 0xb5, 0x61, + 0x9d, 0x0e, 0x2e, 0x89, 0x31, 0xb6, 0x88, 0xe7, 0x9e, 0xef, 0xb9, 0x9e, 0xc3, 0x1c, 0x54, 0x8c, + 0x4c, 0xd5, 0x4b, 0x23, 0xc2, 0xf4, 0x80, 0x54, 0x5f, 0x23, 0x9e, 0x7e, 0xc1, 0xc2, 0xe1, 0xe6, + 0xd0, 0x19, 0x3a, 0xe2, 0xe7, 0xd7, 0xf9, 0x2f, 0x39, 0x8b, 0xf7, 0x60, 0x4d, 0x25, 0x5f, 0x8d, + 0x09, 0x65, 0xcf, 0x88, 0x6e, 0x10, 0x0f, 0xdd, 0x05, 0x18, 0x58, 0x63, 0xca, 0x88, 0xa7, 0x99, + 0x46, 0x4d, 0xd9, 0x55, 0x1e, 0x65, 0xd5, 0x82, 0x3f, 0xd3, 0x32, 0xf0, 0x97, 0x50, 0x56, 0x09, + 0x75, 0x1d, 0x9b, 0x92, 0x5b, 0x2d, 0x40, 0x8f, 0x60, 0x85, 0x78, 0x9e, 0xe3, 0xd5, 0x32, 0xbb, + 0xca, 0xa3, 0xe2, 0x3e, 0xda, 0x8b, 0xda, 0xd0, 0xe4, 0x14, 0x55, 0x02, 0xf0, 0x29, 0xac, 0x88, + 0x31, 0x7a, 0x0c, 0x59, 0x36, 0x71, 0x89, 0xe0, 0x55, 0xde, 0xdf, 0x9a, 0x5d, 0xd1, 0x9f, 0xb8, + 0x44, 0x15, 0x18, 0x54, 0x83, 0xd5, 0x11, 0xa1, 0x54, 0x1f, 0x12, 0x21, 0xa0, 0xa0, 0x06, 0x43, + 0xfc, 0x02, 0xa0, 0x4f, 0x1d, 0xdf, 0x38, 0xb4, 0x0f, 0xb9, 0x4b, 0xa1, 0xaf, 0xe0, 0x5a, 0xdc, + 0xaf, 0xc7, 0xb8, 0xc6, 0x5c, 0xa0, 0xfa, 0x48, 0xb4, 0x09, 0x2b, 0x03, 0x67, 0x6c, 0x33, 0xc1, + 0x79, 0x4d, 0x95, 0x03, 0x7c, 0x00, 0x85, 0xbe, 0x39, 0x22, 0x94, 0xe9, 0x23, 0x17, 0xd5, 0x21, + 0xef, 0x5e, 0x4e, 0xa8, 0x39, 0xd0, 0x2d, 0xc1, 0x78, 0x59, 0x0d, 0xc7, 0x5c, 0x35, 0xcb, 0x19, + 0x0a, 0x52, 0x46, 0x90, 0x82, 0x21, 0xfe, 0x85, 0x02, 0x45, 0xa1, 0x9b, 0x74, 0x24, 0xfa, 0x34, + 0xa1, 0xdc, 0x07, 0x09, 0xe5, 0xa2, 0xfe, 0xbe, 0x5e, 0x3b, 0xf4, 0x14, 0x0a, 0x2c, 0xd0, 0xae, + 0xb6, 0x2c, 0xb8, 0xc5, 0x1d, 0x18, 0xea, 0xae, 0x4e, 0x81, 0xf8, 0x25, 0x54, 0x0f, 0x1d, 0x87, + 0x51, 0xe6, 0xe9, 0x6e, 0x1a, 0x8f, 0xdd, 0x87, 0x15, 0xca, 0x1c, 0x8f, 0xf8, 0xc1, 0x5e, 0xdb, + 0xf3, 0x13, 0xb2, 0xc7, 0x27, 0x55, 0x49, 0xc3, 0xcf, 0x60, 0x3d, 0x22, 0x2c, 0x85, 0x0b, 0xf0, + 0x73, 0x78, 0xaf, 0x45, 0x43, 0x5e, 0x2e, 0x31, 0x52, 0xe8, 0x8e, 0xbf, 0x82, 0xad, 0x24, 0xb3, + 0x34, 0xe1, 0xc1, 0x50, 0x3a, 0x8f, 0x30, 0x13, 0x1e, 0xc9, 0xab, 0xb1, 0x39, 0x7c, 0x04, 0xe5, + 0x03, 0xcb, 0x72, 0x06, 0xad, 0xa3, 0x34, 0x8a, 0xbf, 0x80, 0x4a, 0xc8, 0x25, 0x8d, 0xc6, 0x65, + 0xc8, 0x98, 0x52, 0xcf, 0xac, 0x9a, 0x31, 0x0d, 0xfc, 0x63, 0xa8, 0x1c, 0x13, 0x26, 0x43, 0x97, + 0x22, 0x27, 0xde, 0x87, 0xbc, 0x88, 0xbb, 0x16, 0x32, 0x5f, 0x15, 0xe3, 0x96, 0x81, 0x7f, 0xa7, + 0x40, 0x75, 0x2a, 0x22, 0x8d, 0xee, 0xb7, 0x49, 0x3c, 0xf4, 0x84, 0x83, 0x74, 0x46, 0xfd, 0x7d, + 0xb1, 0x1d, 0x63, 0x2c, 0x90, 0x3d, 0x4e, 0x56, 0x25, 0x0a, 0xff, 0x04, 0x2a, 0xdd, 0x71, 0x7a, + 0xfb, 0x6f, 0xb5, 0x27, 0x8e, 0xa1, 0x3a, 0x95, 0x95, 0x66, 0x4b, 0xfc, 0x54, 0x81, 0x8d, 0x63, + 0xc2, 0x0e, 0x2c, 0x4b, 0x30, 0xa3, 0x69, 0x34, 0xff, 0x1c, 0x6a, 0xe4, 0xf5, 0xc0, 0x1a, 0x1b, + 0x44, 0x63, 0xce, 0xe8, 0x9c, 0x32, 0xc7, 0x26, 0x9a, 0xd0, 0x97, 0xfa, 0xe9, 0xbc, 0xe5, 0xd3, + 0xfb, 0x01, 0x59, 0x0a, 0xc5, 0x1e, 0x6c, 0xc6, 0x95, 0x48, 0x13, 0xdb, 0x8f, 0x20, 0x17, 0x0a, + 0x5d, 0x9e, 0xf5, 0xa0, 0x4f, 0xc4, 0x44, 0xe4, 0x92, 0x4a, 0x86, 0xa6, 0x63, 0xa7, 0xb1, 0xfa, + 0x2e, 0x80, 0x27, 0x98, 0x68, 0x2f, 0xc9, 0x44, 0xd8, 0x59, 0x52, 0x0b, 0x72, 0xe6, 0x39, 0x99, + 0xe0, 0x3f, 0x29, 0xb0, 0x1e, 0x91, 0x93, 0xc6, 0xb0, 0x87, 0x90, 0x93, 0x7c, 0xfd, 0xd4, 0x28, + 0x07, 0x86, 0xf9, 0xcc, 0x7d, 0x2a, 0x7a, 0x00, 0x39, 0x4b, 0x32, 0x97, 0x89, 0x5b, 0x0a, 0x70, + 0x5d, 0xc2, 0xb9, 0x49, 0x1a, 0x47, 0x51, 0x4b, 0xbf, 0x22, 0xb4, 0x96, 0x15, 0x6e, 0x4a, 0xa0, + 0x24, 0x0d, 0x0f, 0x45, 0x64, 0xa4, 0x80, 0xc3, 0x49, 0xaa, 0x83, 0x07, 0x7d, 0x00, 0xbe, 0x5f, + 0xa6, 0x5b, 0x3b, 0x2f, 0x27, 0x5a, 0x06, 0xfe, 0xb5, 0x02, 0xa8, 0x37, 0xd0, 0x6d, 0x29, 0x8a, + 0xa6, 0x94, 0x43, 0x99, 0xee, 0xb1, 0x48, 0x40, 0xf2, 0x62, 0xe2, 0x39, 0x99, 0xf0, 0x6b, 0xd0, + 0x32, 0x47, 0x26, 0x13, 0xbe, 0x59, 0x51, 0xe5, 0x00, 0x6d, 0xc3, 0x2a, 0xb1, 0x0d, 0xb1, 0x20, + 0x2b, 0x16, 0xe4, 0x88, 0x6d, 0xf0, 0xf0, 0xfd, 0x5e, 0x81, 0x8d, 0x98, 0x5a, 0x69, 0x02, 0xf8, + 0x08, 0x56, 0xa5, 0xbd, 0x41, 0x6a, 0x26, 0x23, 0x18, 0x90, 0xd1, 0x43, 0x58, 0x95, 0x61, 0xe2, + 0x87, 0xcf, 0x6c, 0x74, 0x02, 0x22, 0x3e, 0x85, 0xed, 0x63, 0xc2, 0x1a, 0xb2, 0x7a, 0x6a, 0x38, + 0xf6, 0x85, 0x39, 0x4c, 0x73, 0x35, 0xbc, 0x81, 0xda, 0x2c, 0xbb, 0x34, 0x16, 0x7f, 0x0c, 0xab, + 0x7e, 0x69, 0xe7, 0xe7, 0x6c, 0x25, 0xb0, 0xc3, 0x17, 0xa2, 0x06, 0x74, 0xfc, 0x1a, 0xb6, 0xbb, + 0xe3, 0x77, 0x66, 0xca, 0x7f, 0x22, 0xb9, 0x03, 0xb5, 0x59, 0xc9, 0x69, 0x0e, 0xd5, 0x3f, 0x28, + 0x90, 0x3b, 0x25, 0xa3, 0x73, 0xe2, 0x21, 0x04, 0x59, 0x5b, 0x1f, 0xc9, 0xda, 0xb4, 0xa0, 0x8a, + 0xdf, 0x3c, 0x3f, 0x47, 0x82, 0x1a, 0xd9, 0x07, 0x72, 0xa2, 0x65, 0x70, 0xa2, 0x4b, 0x88, 0xa7, + 0x8d, 0x3d, 0x4b, 0xc6, 0xbe, 0xa0, 0xe6, 0xf9, 0xc4, 0x99, 0x67, 0x51, 0xf4, 0x21, 0x14, 0x07, + 0x96, 0x49, 0x6c, 0x26, 0xc9, 0x59, 0x41, 0x06, 0x39, 0x25, 0x00, 0xff, 0x07, 0x15, 0x99, 0x1a, + 0x9a, 0xeb, 0x99, 0x8e, 0x67, 0xb2, 0x49, 0x6d, 0x45, 0xe4, 0x79, 0x59, 0x4e, 0x77, 0xfd, 0x59, + 0x7c, 0x2c, 0x4e, 0x25, 0xa9, 0x64, 0x9a, 0xcd, 0x86, 0xff, 0xae, 0x00, 0x8a, 0x72, 0x4a, 0x93, + 0x2d, 0x4f, 0x78, 0x71, 0x2e, 0xf8, 0xf8, 0xfb, 0x63, 0x23, 0xb6, 0x4a, 0xca, 0x50, 0x03, 0x0c, + 0xfa, 0xff, 0xc4, 0x39, 0x37, 0x17, 0x1d, 0x1c, 0x77, 0x4f, 0xa1, 0x48, 0xd8, 0xc0, 0xd0, 0xfc, + 0x15, 0xd9, 0xc5, 0x2b, 0x80, 0xe3, 0x4e, 0xa4, 0x75, 0xff, 0x52, 0x60, 0x4b, 0xee, 0xcd, 0x67, + 0x44, 0xf7, 0xd8, 0x39, 0xd1, 0x59, 0x9a, 0xa4, 0x7c, 0xb7, 0x27, 0xf8, 0x37, 0x60, 0xcd, 0x25, + 0xb6, 0x61, 0xda, 0x43, 0x8d, 0x67, 0x08, 0xad, 0xad, 0xcc, 0x39, 0x2a, 0x4a, 0x3e, 0x84, 0x0f, + 0x28, 0xfa, 0x18, 0xaa, 0xba, 0xeb, 0x7a, 0xce, 0x6b, 0x73, 0xa4, 0x33, 0xa2, 0x51, 0xf3, 0x0d, + 0xa9, 0x81, 0xc8, 0xc0, 0x4a, 0x64, 0xbe, 0x67, 0xbe, 0x21, 0xf8, 0x12, 0xa0, 0x71, 0xa9, 0xdb, + 0x43, 0xc2, 0x57, 0xa2, 0x5d, 0xc8, 0x72, 0x19, 0xbe, 0xad, 0x71, 0x11, 0x82, 0x82, 0x3e, 0x87, + 0xe2, 0x40, 0xe0, 0x35, 0xf1, 0x18, 0xcb, 0x88, 0xc7, 0xd8, 0xf6, 0x5e, 0xf0, 0xa8, 0xe4, 0xfb, + 0x4a, 0xf2, 0x13, 0xaf, 0x31, 0x18, 0x84, 0xbf, 0xf1, 0x3e, 0x94, 0xfb, 0x9e, 0x6e, 0xd3, 0x0b, + 0xe2, 0x49, 0xb7, 0xdf, 0x2c, 0x0d, 0xff, 0x2d, 0x03, 0xdb, 0x33, 0x81, 0x49, 0x93, 0x7b, 0x53, + 0xf5, 0x85, 0xe4, 0xcc, 0x9c, 0x92, 0x6f, 0xea, 0x8e, 0x40, 0x7d, 0xe1, 0x9a, 0x23, 0xa8, 0x30, + 0x5f, 0x7d, 0x2d, 0x16, 0xb5, 0xb8, 0xdc, 0xb8, 0x89, 0x6a, 0x99, 0xc5, 0x4d, 0x8e, 0x5d, 0x8e, + 0xd9, 0xf8, 0xe5, 0x88, 0x3e, 0x83, 0x92, 0x4f, 0x24, 0xae, 0x33, 0xb8, 0x14, 0x7b, 0x9a, 0x67, + 0x6f, 0x2c, 0x7b, 0x9a, 0x9c, 0xa4, 0x16, 0xbd, 0xe9, 0x00, 0x3d, 0x81, 0x22, 0xd3, 0xbd, 0x21, + 0x61, 0xd2, 0xa8, 0xdc, 0x1c, 0x77, 0x82, 0x04, 0xf0, 0xdf, 0x78, 0x04, 0x95, 0x03, 0xfa, 0xb2, + 0xe7, 0x5a, 0xe6, 0xff, 0x22, 0xcb, 0xf1, 0xcf, 0x15, 0xa8, 0x4e, 0xe5, 0xa5, 0x7b, 0x3c, 0xad, + 0xd9, 0xe4, 0x95, 0x96, 0xac, 0x2e, 0x8a, 0x36, 0x79, 0xa5, 0x06, 0x3e, 0xdc, 0x85, 0x12, 0xc7, + 0x88, 0xc3, 0xd5, 0x34, 0xe4, 0xd9, 0x9a, 0x55, 0xc1, 0x26, 0xaf, 0xb8, 0xed, 0x2d, 0x83, 0xe2, + 0x5f, 0x29, 0x80, 0x54, 0xe2, 0x3a, 0x1e, 0x4b, 0xed, 0x02, 0x0c, 0x59, 0x8b, 0x5c, 0xb0, 0x05, + 0x0e, 0x10, 0x34, 0xf4, 0x00, 0x56, 0x3c, 0x73, 0x78, 0xc9, 0xfc, 0x6c, 0x49, 0x82, 0x24, 0x11, + 0x7f, 0x0f, 0x36, 0x62, 0x3a, 0xa5, 0xb9, 0x97, 0x3a, 0xb0, 0x2a, 0xb8, 0xb4, 0x8e, 0x66, 0x3d, + 0xa6, 0xdc, 0xec, 0xb1, 0xcc, 0x8c, 0xc7, 0x7e, 0x04, 0xa5, 0xbe, 0x39, 0x22, 0x2d, 0x9b, 0x11, + 0xef, 0x4a, 0xb7, 0xf8, 0xf5, 0x23, 0x2b, 0xaf, 0x69, 0x4f, 0x41, 0xf2, 0x2d, 0x8b, 0xe9, 0x69, + 0x1f, 0xe4, 0x3e, 0xac, 0xf1, 0x7a, 0x6b, 0x0a, 0x93, 0x01, 0x2b, 0x11, 0xdb, 0x08, 0x41, 0xf8, + 0x29, 0x80, 0x4a, 0x06, 0x8e, 0x67, 0x74, 0x75, 0xd3, 0x43, 0x55, 0x58, 0xe6, 0xe5, 0x99, 0xbc, + 0x48, 0xf9, 0x4f, 0x5e, 0xca, 0x5d, 0xe9, 0xd6, 0x98, 0xf8, 0x8b, 0xe5, 0x00, 0xff, 0x72, 0x05, + 0x60, 0xfa, 0x38, 0x8b, 0x3d, 0x27, 0x95, 0xd8, 0x73, 0x12, 0xd5, 0x21, 0x3f, 0xd0, 0x5d, 0x7d, + 0xc0, 0x6f, 0x49, 0xff, 0x1a, 0x0e, 0xc6, 0xe8, 0x0e, 0x14, 0xf4, 0x2b, 0xdd, 0xb4, 0xf4, 0x73, + 0x8b, 0x88, 0x00, 0x65, 0xd5, 0xe9, 0x04, 0xba, 0x17, 0xee, 0x47, 0xd9, 0x52, 0xc9, 0x8a, 0x96, + 0x8a, 0xbf, 0xf5, 0x1a, 0xa2, 0xb1, 0xf2, 0x35, 0x40, 0xd4, 0x3f, 0x9c, 0xa9, 0xad, 0xbb, 0x3e, + 0x70, 0x45, 0x00, 0xab, 0x3e, 0xa5, 0x67, 0xeb, 0xae, 0x44, 0x7f, 0x02, 0x9b, 0x1e, 0x19, 0x10, + 0xf3, 0x2a, 0x81, 0xcf, 0x09, 0x3c, 0x0a, 0x69, 0xd3, 0x15, 0x77, 0x01, 0xa6, 0xae, 0xae, 0xad, + 0x0a, 0x5c, 0x21, 0xf4, 0x32, 0xda, 0x83, 0x0d, 0xdd, 0x75, 0xad, 0x49, 0x82, 0x5f, 0x5e, 0xe0, + 0xd6, 0x03, 0xd2, 0x94, 0xdd, 0x36, 0xac, 0x9a, 0x54, 0x3b, 0x1f, 0xd3, 0x49, 0xad, 0x20, 0x9e, + 0x6a, 0x39, 0x93, 0x1e, 0x8e, 0xe9, 0x84, 0x9f, 0x4b, 0x63, 0x4a, 0x8c, 0xe8, 0x55, 0x91, 0xe7, + 0x13, 0xfc, 0x8e, 0x40, 0xdf, 0x84, 0xbc, 0xe9, 0xc7, 0xbe, 0x56, 0x11, 0x79, 0xf8, 0xfe, 0x4c, + 0xf3, 0x28, 0x48, 0x0e, 0x35, 0x84, 0xa2, 0xcf, 0x00, 0x06, 0xee, 0x58, 0x1b, 0x53, 0x7d, 0x48, + 0x68, 0xad, 0x2a, 0x6e, 0xad, 0xed, 0x44, 0x02, 0x07, 0x71, 0x57, 0x0b, 0x03, 0x77, 0x7c, 0x26, + 0x90, 0xe8, 0xdb, 0xb0, 0xe6, 0x11, 0xdd, 0xd0, 0x4c, 0x47, 0xf3, 0x74, 0x46, 0x68, 0x6d, 0xfd, + 0xfa, 0xa5, 0x45, 0x8e, 0x6e, 0x39, 0x2a, 0xc7, 0xa2, 0xef, 0x40, 0xf9, 0x95, 0x67, 0x32, 0x32, + 0x5d, 0x8d, 0xae, 0x5f, 0x5d, 0x12, 0xf0, 0x60, 0xf9, 0xb7, 0xa0, 0xe4, 0xb8, 0x9a, 0xa5, 0x33, + 0x62, 0x0f, 0x4c, 0x42, 0x6b, 0x1b, 0x37, 0x88, 0x76, 0xdc, 0x93, 0x00, 0x8b, 0xdf, 0xc0, 0x7b, + 0x22, 0x23, 0xdf, 0x49, 0x0d, 0x11, 0x76, 0x25, 0x32, 0xb7, 0xea, 0x4a, 0x9c, 0xc2, 0x56, 0x52, + 0x76, 0x9a, 0x23, 0xe4, 0x8f, 0x0a, 0x6c, 0xf6, 0x06, 0x3a, 0xe3, 0x05, 0x74, 0xea, 0xa7, 0xf3, + 0x75, 0x0f, 0xc2, 0xc8, 0x2d, 0xb2, 0x7c, 0xcb, 0x5a, 0x29, 0xbb, 0xb8, 0x56, 0xc2, 0x27, 0xf0, + 0x5e, 0x42, 0xed, 0x94, 0x8d, 0xc4, 0x63, 0xc2, 0x8e, 0x1b, 0x3d, 0xfd, 0x82, 0x74, 0x1d, 0xd3, + 0x4e, 0x13, 0x50, 0x6c, 0xc1, 0x56, 0x92, 0x59, 0x9a, 0xbb, 0x90, 0x1f, 0x0c, 0xfa, 0x05, 0xd1, + 0x5c, 0xce, 0xca, 0xf7, 0x6a, 0x81, 0x06, 0xbc, 0xf1, 0x08, 0x6a, 0x67, 0xae, 0xa1, 0x33, 0xf2, + 0x6e, 0xb4, 0xbf, 0x49, 0xdc, 0x15, 0xbc, 0x3f, 0x47, 0x5c, 0x1a, 0xfb, 0x1e, 0x40, 0x99, 0xdf, + 0x4a, 0x33, 0x42, 0xf9, 0x5d, 0x15, 0x8a, 0xc0, 0x44, 0xbc, 0x4a, 0x3a, 0x2e, 0xf1, 0x74, 0xe6, + 0x78, 0xff, 0xb5, 0xae, 0xc5, 0x9f, 0x65, 0xfb, 0x6c, 0x2a, 0x27, 0x8d, 0x65, 0xd7, 0x6e, 0x07, + 0x04, 0x59, 0x83, 0xd0, 0x81, 0xd8, 0x0c, 0x25, 0x55, 0xfc, 0xe6, 0x52, 0xf8, 0x26, 0x1f, 0x53, + 0x91, 0xfa, 0xe5, 0x84, 0x94, 0x40, 0xa9, 0x9e, 0x80, 0xa8, 0x3e, 0x94, 0x33, 0x7a, 0x69, 0xda, + 0x86, 0xb8, 0x8a, 0x4a, 0xaa, 0xf8, 0xfd, 0xf8, 0x37, 0x0a, 0x14, 0xc2, 0x2f, 0x25, 0x28, 0x07, + 0x99, 0xce, 0xf3, 0xea, 0x12, 0x2a, 0xc2, 0xea, 0x59, 0xfb, 0x79, 0xbb, 0xf3, 0xfd, 0x76, 0x55, + 0x41, 0x9b, 0x50, 0x6d, 0x77, 0xfa, 0xda, 0x61, 0xa7, 0xd3, 0xef, 0xf5, 0xd5, 0x83, 0x6e, 0xb7, + 0x79, 0x54, 0xcd, 0xa0, 0x0d, 0xa8, 0xf4, 0xfa, 0x1d, 0xb5, 0xa9, 0xf5, 0x3b, 0xa7, 0x87, 0xbd, + 0x7e, 0xa7, 0xdd, 0xac, 0x2e, 0xa3, 0x1a, 0x6c, 0x1e, 0x9c, 0xa8, 0xcd, 0x83, 0xa3, 0x2f, 0xe3, + 0xf0, 0x2c, 0xa7, 0xb4, 0xda, 0x8d, 0xce, 0x69, 0xf7, 0xa0, 0xdf, 0x3a, 0x3c, 0x69, 0x6a, 0x2f, + 0x9a, 0x6a, 0xaf, 0xd5, 0x69, 0x57, 0x57, 0x38, 0x7b, 0xb5, 0x79, 0xdc, 0xea, 0xb4, 0x35, 0x2e, + 0xe5, 0x8b, 0xce, 0x59, 0xfb, 0xa8, 0x9a, 0x7b, 0xdc, 0x85, 0x72, 0xdc, 0x0a, 0xae, 0x53, 0xef, + 0xac, 0xd1, 0x68, 0xf6, 0x7a, 0x52, 0xc1, 0x7e, 0xeb, 0xb4, 0xd9, 0x39, 0xeb, 0x57, 0x15, 0x04, + 0x90, 0x6b, 0x1c, 0xb4, 0x1b, 0xcd, 0x93, 0x6a, 0x86, 0x13, 0xd4, 0x66, 0xf7, 0xe4, 0xa0, 0xc1, + 0xd5, 0xe1, 0x83, 0xb3, 0x76, 0xbb, 0xd5, 0x3e, 0xae, 0x66, 0xf7, 0x7f, 0x56, 0x86, 0x42, 0x2f, + 0x70, 0x12, 0xea, 0x00, 0x4c, 0xdf, 0xae, 0x68, 0x27, 0xe6, 0xbe, 0x99, 0xe7, 0x71, 0xfd, 0xc3, + 0x85, 0x74, 0x19, 0x4e, 0xbc, 0x84, 0xbe, 0x0b, 0xcb, 0x7d, 0xea, 0xa0, 0xf8, 0xa1, 0x3c, 0xfd, + 0xac, 0x54, 0xaf, 0xcd, 0x12, 0x82, 0xb5, 0x8f, 0x94, 0x4f, 0x14, 0x74, 0x02, 0x85, 0xf0, 0x93, + 0x02, 0xba, 0x1b, 0x03, 0x27, 0x3f, 0xb8, 0xd4, 0x77, 0x16, 0x91, 0x43, 0x6d, 0x7e, 0x08, 0xe5, + 0xf8, 0x27, 0x0a, 0x84, 0x63, 0x6b, 0xe6, 0x7e, 0x0c, 0xa9, 0xdf, 0xbf, 0x16, 0x13, 0x32, 0xff, + 0x02, 0x56, 0xfd, 0xcf, 0x08, 0x28, 0x9e, 0x77, 0xf1, 0x4f, 0x14, 0xf5, 0x3b, 0xf3, 0x89, 0x21, + 0x9f, 0x16, 0xe4, 0x83, 0x9e, 0x3e, 0xba, 0x93, 0xf4, 0x70, 0xb4, 0x9b, 0x5e, 0xbf, 0xbb, 0x80, + 0x1a, 0x65, 0x15, 0x74, 0xc5, 0x13, 0xac, 0x12, 0x8d, 0xf9, 0x04, 0xab, 0x64, 0x2b, 0x1d, 0x2f, + 0xa1, 0x33, 0x28, 0x45, 0x3b, 0xd2, 0x68, 0x37, 0x29, 0x3b, 0xd9, 0x31, 0xaf, 0xdf, 0xbb, 0x06, + 0x11, 0x8d, 0x48, 0xfc, 0x36, 0x4e, 0x44, 0x64, 0x6e, 0x99, 0x90, 0x88, 0xc8, 0xfc, 0xeb, 0x1c, + 0x2f, 0xa1, 0x73, 0xa8, 0x24, 0x9e, 0xc4, 0xe8, 0x7e, 0xe2, 0xdc, 0x99, 0xd7, 0xc9, 0xa8, 0x3f, + 0xb8, 0x1e, 0x94, 0x4c, 0xd0, 0xb0, 0x1f, 0x8c, 0x66, 0x02, 0x12, 0x2b, 0x09, 0xea, 0x3b, 0x8b, + 0xc8, 0xa1, 0xc6, 0x5d, 0x58, 0x3b, 0x26, 0xac, 0xeb, 0x91, 0xab, 0x77, 0xc5, 0xb1, 0x2f, 0x38, + 0x4e, 0xfb, 0xd5, 0xe8, 0xde, 0xfc, 0x25, 0x91, 0x5e, 0xf6, 0x2d, 0xb8, 0xaa, 0x50, 0x8c, 0x34, + 0x81, 0x51, 0xfc, 0x20, 0x98, 0xed, 0x5a, 0xd7, 0x77, 0x17, 0x03, 0xa2, 0xc9, 0x1a, 0x3c, 0x7e, + 0x13, 0xc9, 0x9a, 0x78, 0x83, 0x27, 0x92, 0x35, 0xf9, 0x62, 0xc6, 0x4b, 0x48, 0x17, 0x9f, 0x32, + 0x62, 0x0d, 0x4c, 0xf4, 0x20, 0x69, 0xd4, 0xbc, 0xce, 0x6a, 0xfd, 0xa3, 0x1b, 0x50, 0x51, 0x11, + 0xc9, 0x1e, 0x69, 0x42, 0xc4, 0x82, 0xe6, 0x6d, 0x42, 0xc4, 0xa2, 0x46, 0x2b, 0x5e, 0x42, 0x3f, + 0x80, 0xb5, 0x58, 0x89, 0x96, 0x08, 0xdd, 0xbc, 0xaa, 0xb3, 0x8e, 0xaf, 0x83, 0x44, 0x77, 0x5d, + 0xbc, 0xc2, 0x4a, 0xec, 0xba, 0xb9, 0xb5, 0x5c, 0x62, 0xd7, 0xcd, 0x2f, 0xd1, 0xf0, 0x12, 0x32, + 0x60, 0x7d, 0xa6, 0xc2, 0x41, 0x71, 0xa3, 0x17, 0x15, 0x5c, 0xf5, 0x87, 0x37, 0xc1, 0xa2, 0x19, + 0x18, 0xa9, 0x33, 0xd0, 0xcc, 0x55, 0x94, 0xa8, 0x74, 0xea, 0xbb, 0x8b, 0x01, 0x01, 0xcf, 0xc3, + 0xea, 0x5f, 0xde, 0xee, 0x28, 0x7f, 0x7d, 0xbb, 0xa3, 0xfc, 0xe3, 0xed, 0x8e, 0xf2, 0xdb, 0x7f, + 0xee, 0x2c, 0x9d, 0xe7, 0xc4, 0x9f, 0x3c, 0x3e, 0xfd, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x40, + 0x5d, 0x19, 0x5a, 0x39, 0x22, 0x00, 0x00, +} diff --git a/proto/pkg/tinykvpb/tinykvpb.pb.go b/proto/pkg/tinykvpb/tinykvpb.pb.go new file mode 100644 index 00000000..3a8ec38a --- /dev/null +++ b/proto/pkg/tinykvpb/tinykvpb.pb.go @@ -0,0 +1,647 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tinykvpb.proto + +package tinykvpb + +import ( + "fmt" + "math" + + proto "github.com/golang/protobuf/proto" + + _ "github.com/gogo/protobuf/gogoproto" + + coprocessor "github.com/pingcap-incubator/tinykv/proto/pkg/coprocessor" + + kvrpcpb "github.com/pingcap-incubator/tinykv/proto/pkg/kvrpcpb" + + raft_serverpb "github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb" + + context "golang.org/x/net/context" + + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for TinyKv service + +type TinyKvClient interface { + // KV commands with mvcc/txn supported. + KvGet(ctx context.Context, in *kvrpcpb.GetRequest, opts ...grpc.CallOption) (*kvrpcpb.GetResponse, error) + KvScan(ctx context.Context, in *kvrpcpb.ScanRequest, opts ...grpc.CallOption) (*kvrpcpb.ScanResponse, error) + KvPrewrite(ctx context.Context, in *kvrpcpb.PrewriteRequest, opts ...grpc.CallOption) (*kvrpcpb.PrewriteResponse, error) + KvCommit(ctx context.Context, in *kvrpcpb.CommitRequest, opts ...grpc.CallOption) (*kvrpcpb.CommitResponse, error) + KvCheckTxnStatus(ctx context.Context, in *kvrpcpb.CheckTxnStatusRequest, opts ...grpc.CallOption) (*kvrpcpb.CheckTxnStatusResponse, error) + KvBatchRollback(ctx context.Context, in *kvrpcpb.BatchRollbackRequest, opts ...grpc.CallOption) (*kvrpcpb.BatchRollbackResponse, error) + KvResolveLock(ctx context.Context, in *kvrpcpb.ResolveLockRequest, opts ...grpc.CallOption) (*kvrpcpb.ResolveLockResponse, error) + // RawKV commands. + RawGet(ctx context.Context, in *kvrpcpb.RawGetRequest, opts ...grpc.CallOption) (*kvrpcpb.RawGetResponse, error) + RawPut(ctx context.Context, in *kvrpcpb.RawPutRequest, opts ...grpc.CallOption) (*kvrpcpb.RawPutResponse, error) + RawDelete(ctx context.Context, in *kvrpcpb.RawDeleteRequest, opts ...grpc.CallOption) (*kvrpcpb.RawDeleteResponse, error) + RawScan(ctx context.Context, in *kvrpcpb.RawScanRequest, opts ...grpc.CallOption) (*kvrpcpb.RawScanResponse, error) + // Raft commands (tinykv <-> tinykv). + Raft(ctx context.Context, opts ...grpc.CallOption) (TinyKv_RaftClient, error) + Snapshot(ctx context.Context, opts ...grpc.CallOption) (TinyKv_SnapshotClient, error) + // Coprocessor + Coprocessor(ctx context.Context, in *coprocessor.Request, opts ...grpc.CallOption) (*coprocessor.Response, error) +} + +type tinyKvClient struct { + cc *grpc.ClientConn +} + +func NewTinyKvClient(cc *grpc.ClientConn) TinyKvClient { + return &tinyKvClient{cc} +} + +func (c *tinyKvClient) KvGet(ctx context.Context, in *kvrpcpb.GetRequest, opts ...grpc.CallOption) (*kvrpcpb.GetResponse, error) { + out := new(kvrpcpb.GetResponse) + err := c.cc.Invoke(ctx, "/tinykvpb.TinyKv/KvGet", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tinyKvClient) KvScan(ctx context.Context, in *kvrpcpb.ScanRequest, opts ...grpc.CallOption) (*kvrpcpb.ScanResponse, error) { + out := new(kvrpcpb.ScanResponse) + err := c.cc.Invoke(ctx, "/tinykvpb.TinyKv/KvScan", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tinyKvClient) KvPrewrite(ctx context.Context, in *kvrpcpb.PrewriteRequest, opts ...grpc.CallOption) (*kvrpcpb.PrewriteResponse, error) { + out := new(kvrpcpb.PrewriteResponse) + err := c.cc.Invoke(ctx, "/tinykvpb.TinyKv/KvPrewrite", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tinyKvClient) KvCommit(ctx context.Context, in *kvrpcpb.CommitRequest, opts ...grpc.CallOption) (*kvrpcpb.CommitResponse, error) { + out := new(kvrpcpb.CommitResponse) + err := c.cc.Invoke(ctx, "/tinykvpb.TinyKv/KvCommit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tinyKvClient) KvCheckTxnStatus(ctx context.Context, in *kvrpcpb.CheckTxnStatusRequest, opts ...grpc.CallOption) (*kvrpcpb.CheckTxnStatusResponse, error) { + out := new(kvrpcpb.CheckTxnStatusResponse) + err := c.cc.Invoke(ctx, "/tinykvpb.TinyKv/KvCheckTxnStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tinyKvClient) KvBatchRollback(ctx context.Context, in *kvrpcpb.BatchRollbackRequest, opts ...grpc.CallOption) (*kvrpcpb.BatchRollbackResponse, error) { + out := new(kvrpcpb.BatchRollbackResponse) + err := c.cc.Invoke(ctx, "/tinykvpb.TinyKv/KvBatchRollback", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tinyKvClient) KvResolveLock(ctx context.Context, in *kvrpcpb.ResolveLockRequest, opts ...grpc.CallOption) (*kvrpcpb.ResolveLockResponse, error) { + out := new(kvrpcpb.ResolveLockResponse) + err := c.cc.Invoke(ctx, "/tinykvpb.TinyKv/KvResolveLock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tinyKvClient) RawGet(ctx context.Context, in *kvrpcpb.RawGetRequest, opts ...grpc.CallOption) (*kvrpcpb.RawGetResponse, error) { + out := new(kvrpcpb.RawGetResponse) + err := c.cc.Invoke(ctx, "/tinykvpb.TinyKv/RawGet", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tinyKvClient) RawPut(ctx context.Context, in *kvrpcpb.RawPutRequest, opts ...grpc.CallOption) (*kvrpcpb.RawPutResponse, error) { + out := new(kvrpcpb.RawPutResponse) + err := c.cc.Invoke(ctx, "/tinykvpb.TinyKv/RawPut", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tinyKvClient) RawDelete(ctx context.Context, in *kvrpcpb.RawDeleteRequest, opts ...grpc.CallOption) (*kvrpcpb.RawDeleteResponse, error) { + out := new(kvrpcpb.RawDeleteResponse) + err := c.cc.Invoke(ctx, "/tinykvpb.TinyKv/RawDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tinyKvClient) RawScan(ctx context.Context, in *kvrpcpb.RawScanRequest, opts ...grpc.CallOption) (*kvrpcpb.RawScanResponse, error) { + out := new(kvrpcpb.RawScanResponse) + err := c.cc.Invoke(ctx, "/tinykvpb.TinyKv/RawScan", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tinyKvClient) Raft(ctx context.Context, opts ...grpc.CallOption) (TinyKv_RaftClient, error) { + stream, err := c.cc.NewStream(ctx, &_TinyKv_serviceDesc.Streams[0], "/tinykvpb.TinyKv/Raft", opts...) + if err != nil { + return nil, err + } + x := &tinyKvRaftClient{stream} + return x, nil +} + +type TinyKv_RaftClient interface { + Send(*raft_serverpb.RaftMessage) error + CloseAndRecv() (*raft_serverpb.Done, error) + grpc.ClientStream +} + +type tinyKvRaftClient struct { + grpc.ClientStream +} + +func (x *tinyKvRaftClient) Send(m *raft_serverpb.RaftMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *tinyKvRaftClient) CloseAndRecv() (*raft_serverpb.Done, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(raft_serverpb.Done) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *tinyKvClient) Snapshot(ctx context.Context, opts ...grpc.CallOption) (TinyKv_SnapshotClient, error) { + stream, err := c.cc.NewStream(ctx, &_TinyKv_serviceDesc.Streams[1], "/tinykvpb.TinyKv/Snapshot", opts...) + if err != nil { + return nil, err + } + x := &tinyKvSnapshotClient{stream} + return x, nil +} + +type TinyKv_SnapshotClient interface { + Send(*raft_serverpb.SnapshotChunk) error + CloseAndRecv() (*raft_serverpb.Done, error) + grpc.ClientStream +} + +type tinyKvSnapshotClient struct { + grpc.ClientStream +} + +func (x *tinyKvSnapshotClient) Send(m *raft_serverpb.SnapshotChunk) error { + return x.ClientStream.SendMsg(m) +} + +func (x *tinyKvSnapshotClient) CloseAndRecv() (*raft_serverpb.Done, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(raft_serverpb.Done) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *tinyKvClient) Coprocessor(ctx context.Context, in *coprocessor.Request, opts ...grpc.CallOption) (*coprocessor.Response, error) { + out := new(coprocessor.Response) + err := c.cc.Invoke(ctx, "/tinykvpb.TinyKv/Coprocessor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for TinyKv service + +type TinyKvServer interface { + // KV commands with mvcc/txn supported. + KvGet(context.Context, *kvrpcpb.GetRequest) (*kvrpcpb.GetResponse, error) + KvScan(context.Context, *kvrpcpb.ScanRequest) (*kvrpcpb.ScanResponse, error) + KvPrewrite(context.Context, *kvrpcpb.PrewriteRequest) (*kvrpcpb.PrewriteResponse, error) + KvCommit(context.Context, *kvrpcpb.CommitRequest) (*kvrpcpb.CommitResponse, error) + KvCheckTxnStatus(context.Context, *kvrpcpb.CheckTxnStatusRequest) (*kvrpcpb.CheckTxnStatusResponse, error) + KvBatchRollback(context.Context, *kvrpcpb.BatchRollbackRequest) (*kvrpcpb.BatchRollbackResponse, error) + KvResolveLock(context.Context, *kvrpcpb.ResolveLockRequest) (*kvrpcpb.ResolveLockResponse, error) + // RawKV commands. + RawGet(context.Context, *kvrpcpb.RawGetRequest) (*kvrpcpb.RawGetResponse, error) + RawPut(context.Context, *kvrpcpb.RawPutRequest) (*kvrpcpb.RawPutResponse, error) + RawDelete(context.Context, *kvrpcpb.RawDeleteRequest) (*kvrpcpb.RawDeleteResponse, error) + RawScan(context.Context, *kvrpcpb.RawScanRequest) (*kvrpcpb.RawScanResponse, error) + // Raft commands (tinykv <-> tinykv). + Raft(TinyKv_RaftServer) error + Snapshot(TinyKv_SnapshotServer) error + // Coprocessor + Coprocessor(context.Context, *coprocessor.Request) (*coprocessor.Response, error) +} + +func RegisterTinyKvServer(s *grpc.Server, srv TinyKvServer) { + s.RegisterService(&_TinyKv_serviceDesc, srv) +} + +func _TinyKv_KvGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(kvrpcpb.GetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TinyKvServer).KvGet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tinykvpb.TinyKv/KvGet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TinyKvServer).KvGet(ctx, req.(*kvrpcpb.GetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TinyKv_KvScan_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(kvrpcpb.ScanRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TinyKvServer).KvScan(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tinykvpb.TinyKv/KvScan", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TinyKvServer).KvScan(ctx, req.(*kvrpcpb.ScanRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TinyKv_KvPrewrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(kvrpcpb.PrewriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TinyKvServer).KvPrewrite(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tinykvpb.TinyKv/KvPrewrite", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TinyKvServer).KvPrewrite(ctx, req.(*kvrpcpb.PrewriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TinyKv_KvCommit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(kvrpcpb.CommitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TinyKvServer).KvCommit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tinykvpb.TinyKv/KvCommit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TinyKvServer).KvCommit(ctx, req.(*kvrpcpb.CommitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TinyKv_KvCheckTxnStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(kvrpcpb.CheckTxnStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TinyKvServer).KvCheckTxnStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tinykvpb.TinyKv/KvCheckTxnStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TinyKvServer).KvCheckTxnStatus(ctx, req.(*kvrpcpb.CheckTxnStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TinyKv_KvBatchRollback_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(kvrpcpb.BatchRollbackRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TinyKvServer).KvBatchRollback(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tinykvpb.TinyKv/KvBatchRollback", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TinyKvServer).KvBatchRollback(ctx, req.(*kvrpcpb.BatchRollbackRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TinyKv_KvResolveLock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(kvrpcpb.ResolveLockRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TinyKvServer).KvResolveLock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tinykvpb.TinyKv/KvResolveLock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TinyKvServer).KvResolveLock(ctx, req.(*kvrpcpb.ResolveLockRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TinyKv_RawGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(kvrpcpb.RawGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TinyKvServer).RawGet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tinykvpb.TinyKv/RawGet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TinyKvServer).RawGet(ctx, req.(*kvrpcpb.RawGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TinyKv_RawPut_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(kvrpcpb.RawPutRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TinyKvServer).RawPut(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tinykvpb.TinyKv/RawPut", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TinyKvServer).RawPut(ctx, req.(*kvrpcpb.RawPutRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TinyKv_RawDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(kvrpcpb.RawDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TinyKvServer).RawDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tinykvpb.TinyKv/RawDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TinyKvServer).RawDelete(ctx, req.(*kvrpcpb.RawDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TinyKv_RawScan_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(kvrpcpb.RawScanRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TinyKvServer).RawScan(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tinykvpb.TinyKv/RawScan", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TinyKvServer).RawScan(ctx, req.(*kvrpcpb.RawScanRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TinyKv_Raft_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TinyKvServer).Raft(&tinyKvRaftServer{stream}) +} + +type TinyKv_RaftServer interface { + SendAndClose(*raft_serverpb.Done) error + Recv() (*raft_serverpb.RaftMessage, error) + grpc.ServerStream +} + +type tinyKvRaftServer struct { + grpc.ServerStream +} + +func (x *tinyKvRaftServer) SendAndClose(m *raft_serverpb.Done) error { + return x.ServerStream.SendMsg(m) +} + +func (x *tinyKvRaftServer) Recv() (*raft_serverpb.RaftMessage, error) { + m := new(raft_serverpb.RaftMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TinyKv_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TinyKvServer).Snapshot(&tinyKvSnapshotServer{stream}) +} + +type TinyKv_SnapshotServer interface { + SendAndClose(*raft_serverpb.Done) error + Recv() (*raft_serverpb.SnapshotChunk, error) + grpc.ServerStream +} + +type tinyKvSnapshotServer struct { + grpc.ServerStream +} + +func (x *tinyKvSnapshotServer) SendAndClose(m *raft_serverpb.Done) error { + return x.ServerStream.SendMsg(m) +} + +func (x *tinyKvSnapshotServer) Recv() (*raft_serverpb.SnapshotChunk, error) { + m := new(raft_serverpb.SnapshotChunk) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TinyKv_Coprocessor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(coprocessor.Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TinyKvServer).Coprocessor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tinykvpb.TinyKv/Coprocessor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TinyKvServer).Coprocessor(ctx, req.(*coprocessor.Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _TinyKv_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tinykvpb.TinyKv", + HandlerType: (*TinyKvServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "KvGet", + Handler: _TinyKv_KvGet_Handler, + }, + { + MethodName: "KvScan", + Handler: _TinyKv_KvScan_Handler, + }, + { + MethodName: "KvPrewrite", + Handler: _TinyKv_KvPrewrite_Handler, + }, + { + MethodName: "KvCommit", + Handler: _TinyKv_KvCommit_Handler, + }, + { + MethodName: "KvCheckTxnStatus", + Handler: _TinyKv_KvCheckTxnStatus_Handler, + }, + { + MethodName: "KvBatchRollback", + Handler: _TinyKv_KvBatchRollback_Handler, + }, + { + MethodName: "KvResolveLock", + Handler: _TinyKv_KvResolveLock_Handler, + }, + { + MethodName: "RawGet", + Handler: _TinyKv_RawGet_Handler, + }, + { + MethodName: "RawPut", + Handler: _TinyKv_RawPut_Handler, + }, + { + MethodName: "RawDelete", + Handler: _TinyKv_RawDelete_Handler, + }, + { + MethodName: "RawScan", + Handler: _TinyKv_RawScan_Handler, + }, + { + MethodName: "Coprocessor", + Handler: _TinyKv_Coprocessor_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Raft", + Handler: _TinyKv_Raft_Handler, + ClientStreams: true, + }, + { + StreamName: "Snapshot", + Handler: _TinyKv_Snapshot_Handler, + ClientStreams: true, + }, + }, + Metadata: "tinykvpb.proto", +} + +func init() { proto.RegisterFile("tinykvpb.proto", fileDescriptor_tinykvpb_71a6ae942ac295c5) } + +var fileDescriptor_tinykvpb_71a6ae942ac295c5 = []byte{ + // 452 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0xdb, 0x6e, 0xd3, 0x30, + 0x18, 0xc7, 0x5b, 0x09, 0xba, 0x62, 0x34, 0x18, 0x6e, 0x81, 0x2d, 0x8c, 0x20, 0xed, 0x8a, 0xab, + 0x22, 0x01, 0x12, 0x17, 0x1c, 0x24, 0x96, 0x4a, 0xbb, 0xf0, 0x90, 0x2a, 0x77, 0x5c, 0x23, 0xd7, + 0xfa, 0xd6, 0x56, 0xe9, 0xec, 0x60, 0x3b, 0x2e, 0x7d, 0x13, 0x9e, 0x83, 0xa7, 0xe0, 0x92, 0x47, + 0x40, 0xe5, 0x45, 0x50, 0x92, 0xd9, 0x39, 0x34, 0xbd, 0xb3, 0x7f, 0xff, 0x83, 0x5b, 0xc7, 0x1f, + 0x7a, 0x60, 0x96, 0x62, 0x13, 0xdb, 0x64, 0x36, 0x4a, 0x94, 0x34, 0x12, 0xf7, 0xdd, 0x3e, 0x38, + 0x8c, 0xad, 0x4a, 0xb8, 0x13, 0x82, 0x81, 0x62, 0xd7, 0xe6, 0x9b, 0x06, 0x65, 0x41, 0x79, 0xf8, + 0x88, 0xcb, 0x44, 0x49, 0x0e, 0x5a, 0x4b, 0x75, 0x8b, 0x86, 0x73, 0x39, 0x97, 0xf9, 0xf2, 0x55, + 0xb6, 0x2a, 0xe8, 0xeb, 0x5f, 0x07, 0xa8, 0x77, 0xb5, 0x14, 0x1b, 0x62, 0xf1, 0x5b, 0x74, 0x97, + 0xd8, 0x0b, 0x30, 0x78, 0x30, 0x72, 0x27, 0x5c, 0x80, 0xa1, 0xf0, 0x3d, 0x05, 0x6d, 0x82, 0x61, + 0x1d, 0xea, 0x44, 0x0a, 0x0d, 0x67, 0x1d, 0xfc, 0x0e, 0xf5, 0x88, 0x9d, 0x72, 0x26, 0x70, 0xe9, + 0xc8, 0xb6, 0x2e, 0xf7, 0xb8, 0x41, 0x7d, 0x30, 0x42, 0x88, 0xd8, 0x89, 0x82, 0xb5, 0x5a, 0x1a, + 0xc0, 0xc7, 0xde, 0xe6, 0x90, 0x2b, 0x38, 0x69, 0x51, 0x7c, 0xc9, 0x47, 0xd4, 0x27, 0x36, 0x92, + 0x37, 0x37, 0x4b, 0x83, 0x9f, 0x78, 0x63, 0x01, 0x5c, 0xc1, 0xd3, 0x1d, 0xee, 0xe3, 0x5f, 0xd1, + 0x11, 0xb1, 0xd1, 0x02, 0x78, 0x7c, 0xf5, 0x43, 0x4c, 0x0d, 0x33, 0xa9, 0xc6, 0x61, 0x69, 0xaf, + 0x09, 0xae, 0xee, 0xc5, 0x5e, 0xdd, 0xd7, 0x52, 0xf4, 0x90, 0xd8, 0x73, 0x66, 0xf8, 0x82, 0xca, + 0xd5, 0x6a, 0xc6, 0x78, 0x8c, 0x9f, 0xfb, 0x54, 0x8d, 0xbb, 0xd2, 0x70, 0x9f, 0xec, 0x3b, 0x2f, + 0xd1, 0x21, 0xb1, 0x14, 0xb4, 0x5c, 0x59, 0xb8, 0x94, 0x3c, 0xc6, 0xcf, 0x7c, 0xa4, 0x42, 0x5d, + 0xdf, 0x69, 0xbb, 0xe8, 0xdb, 0xde, 0xa3, 0x1e, 0x65, 0xeb, 0xec, 0x63, 0x97, 0xb7, 0x56, 0x80, + 0xdd, 0x5b, 0x73, 0xbc, 0x11, 0x9e, 0xa4, 0x8d, 0xf0, 0x24, 0x6d, 0x0f, 0xe7, 0xdc, 0x87, 0xc7, + 0xe8, 0x1e, 0x65, 0xeb, 0x31, 0xac, 0xc0, 0x00, 0x3e, 0xa9, 0xfa, 0x0a, 0xe6, 0x2a, 0x82, 0x36, + 0xc9, 0xb7, 0x7c, 0x42, 0x07, 0x94, 0xad, 0xf3, 0x67, 0x57, 0x3b, 0xab, 0xfa, 0xf2, 0x8e, 0x77, + 0x85, 0xca, 0x5f, 0xb8, 0x43, 0xd9, 0xb5, 0xc1, 0xc1, 0xa8, 0x3e, 0x3d, 0x19, 0xfc, 0x02, 0x5a, + 0xb3, 0x39, 0x04, 0x83, 0x86, 0x36, 0x96, 0x02, 0xce, 0x3a, 0x2f, 0xbb, 0xf8, 0x33, 0xea, 0x4f, + 0x05, 0x4b, 0xf4, 0x42, 0x1a, 0x7c, 0xda, 0x30, 0x39, 0x21, 0x5a, 0xa4, 0x22, 0xde, 0x5f, 0xf1, + 0x01, 0xdd, 0x8f, 0xca, 0x09, 0xc5, 0xc3, 0x51, 0x75, 0x5e, 0xcb, 0xd1, 0xa9, 0x53, 0xf7, 0xeb, + 0xcf, 0x8f, 0x7e, 0x6f, 0xc3, 0xee, 0x9f, 0x6d, 0xd8, 0xfd, 0xbb, 0x0d, 0xbb, 0x3f, 0xff, 0x85, + 0x9d, 0x59, 0x2f, 0x9f, 0xe6, 0x37, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc2, 0x6c, 0xc6, 0xd2, + 0x36, 0x04, 0x00, 0x00, +} diff --git a/proto/proto/coprocessor.proto b/proto/proto/coprocessor.proto new file mode 100644 index 00000000..18fb64b1 --- /dev/null +++ b/proto/proto/coprocessor.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; +package coprocessor; + +import "errorpb.proto"; +import "kvrpcpb.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; + + +// [start, end) +message KeyRange { + bytes start = 1; + bytes end = 2; +} + +message Request { + kvrpcpb.Context context = 1; + int64 tp = 2; + bytes data = 3; + uint64 start_ts = 7; + repeated KeyRange ranges = 4; +} + +message Response { + bytes data = 1 [(gogoproto.customtype) = "github.com/pingcap/kvproto/pkg/sharedbytes.SharedBytes", (gogoproto.nullable) = false]; + errorpb.Error region_error = 2; + kvrpcpb.LockInfo locked = 3; + string other_error = 4; + KeyRange range = 5; +} + diff --git a/proto/proto/eraftpb.proto b/proto/proto/eraftpb.proto new file mode 100644 index 00000000..58026ab5 --- /dev/null +++ b/proto/proto/eraftpb.proto @@ -0,0 +1,112 @@ +syntax = "proto3"; +package eraftpb; + +enum EntryType { + EntryNormal = 0; + EntryConfChange = 1; +} + +// The entry is a type of change that needs to be applied. It contains two data fields. +// While the fields are built into the model; their usage is determined by the entry_type. +// +// For normal entries, the data field should contain the data change that should be applied. +// The context field can be used for any contextual data that might be relevant to the +// application of the data. +// +// For configuration changes, the data will contain the ConfChange message and the +// context will provide anything needed to assist the configuration change. The context +// is for the user to set and use in this case. +message Entry { + EntryType entry_type = 1; + uint64 term = 2; + uint64 index = 3; + bytes data = 4; +} + +// SnapshotMetadata cantains the log index and term of the last log applied to this +// Snapshot, along with the membership information of the time the last log applied. +message SnapshotMetadata { + ConfState conf_state = 1; + uint64 index = 2; + uint64 term = 3; +} + +message Snapshot { + bytes data = 1; + SnapshotMetadata metadata = 2; +} + +// Some MessageType defined here are local messages which not come from the network, but should +// also use the Step method to handle +enum MessageType { + // 'MessageType_MsgHup' is a local message used for election. If an election timeout happened, + // the node should passes 'MessageType_MsgHup' to its Step method and start a new election. + MsgHup = 0; + // 'MessageType_MsgBeat' is a local message that signals the leader to send a heartbeat + // of the 'MessageType_MsgHeartbeat' type to its followers. + MsgBeat = 1; + // 'MessageType_MsgPropose' is a local message that proposes to append data to the leader's log entries. + MsgPropose = 2; + // 'MessageType_MsgAppend' contains log entries to replicate. + MsgAppend = 3; + // 'MessageType_MsgAppendResponse' is response to log replication request('MessageType_MsgAppend'). + MsgAppendResponse = 4; + // 'MessageType_MsgRequestVote' requests votes for election. + MsgRequestVote = 5; + // 'MessageType_MsgRequestVoteResponse' contains responses from voting request. + MsgRequestVoteResponse = 6; + // 'MessageType_MsgSnapshot' requests to install a snapshot message. + MsgSnapshot = 7; + // 'MessageType_MsgHeartbeat' sends heartbeat from leader to its followers. + MsgHeartbeat = 8; + // 'MessageType_MsgHeartbeatResponse' is a response to 'MessageType_MsgHeartbeat'. + MsgHeartbeatResponse = 9; + // 'MessageType_MsgTransferLeader' requests the leader to transfer its leadership. + MsgTransferLeader = 11; + // 'MessageType_MsgTimeoutNow' send from the leader to the leadership transfer target, to let + // the transfer target timeout immediately and start a new election. + MsgTimeoutNow = 12; +} + +message Message { + MessageType msg_type = 1; + uint64 to = 2; + uint64 from = 3; + uint64 term = 4; + uint64 log_term = 5; + uint64 index = 6; + repeated Entry entries = 7; + uint64 commit = 8; + Snapshot snapshot = 9; + bool reject = 10; + // TODO: Delete Start + uint64 reject_hint = 11; + // TODO: Delete End +} + +// HardState contains the state of a node, including the current term, commit index +// and the vote record +message HardState { + uint64 term = 1; + uint64 vote = 2; + uint64 commit = 3; +} + +// ConfState contains the current membership information of the raft group +message ConfState { + // all node id + repeated uint64 nodes = 1; +} + +enum ConfChangeType { + AddNode = 0; + RemoveNode = 1; +} + +// ConfChange is the data that attach on entry with EntryConfChange type +message ConfChange { + ConfChangeType change_type = 1; + // node will be add/remove + uint64 node_id = 2; + bytes context = 3; +} diff --git a/proto/proto/errorpb.proto b/proto/proto/errorpb.proto new file mode 100644 index 00000000..ca1db671 --- /dev/null +++ b/proto/proto/errorpb.proto @@ -0,0 +1,49 @@ +syntax = "proto3"; +package errorpb; + +import "metapb.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; + +message NotLeader { + uint64 region_id = 1; + metapb.Peer leader = 2; +} + +message StoreNotMatch { + uint64 request_store_id = 1; + uint64 actual_store_id = 2; +} + +message RegionNotFound { + uint64 region_id = 1; +} + +message KeyNotInRegion { + bytes key = 1; + uint64 region_id = 2; + bytes start_key = 3; + bytes end_key = 4; +} + +message EpochNotMatch { + repeated metapb.Region current_regions = 1; +} + +message StaleCommand { +} + +message Error { + reserved "stale_epoch"; + + string message = 1; + NotLeader not_leader = 2; + RegionNotFound region_not_found = 3; + KeyNotInRegion key_not_in_region = 4; + EpochNotMatch epoch_not_match = 5; + StaleCommand stale_command = 7; + StoreNotMatch store_not_match = 8; +} \ No newline at end of file diff --git a/proto/proto/kvrpcpb.proto b/proto/proto/kvrpcpb.proto new file mode 100644 index 00000000..0611bd81 --- /dev/null +++ b/proto/proto/kvrpcpb.proto @@ -0,0 +1,260 @@ +syntax = "proto3"; +package kvrpcpb; + +import "metapb.proto"; +import "errorpb.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// Raw commands. +message RawGetRequest { + Context context = 1; + bytes key = 2; + string cf = 3; +} + +message RawGetResponse { + errorpb.Error region_error = 1; + string error = 2; + bytes value = 3; + // True if the requested key doesn't exist; another error will not be signalled. + bool not_found = 4; +} + +message RawPutRequest { + Context context = 1; + bytes key = 2; + bytes value = 3; + string cf = 4; +} + +message RawPutResponse { + errorpb.Error region_error = 1; + string error = 2; +} + +message RawDeleteRequest { + Context context = 1; + bytes key = 2; + string cf = 3; +} + +message RawDeleteResponse { + errorpb.Error region_error = 1; + string error = 2; +} + +message RawScanRequest { + Context context = 1; + bytes start_key = 2; + // The maximum number of values read. + uint32 limit = 3; + string cf = 4; + +} + +message RawScanResponse { + errorpb.Error region_error = 1; + // An error which affects the whole scan. Per-key errors are included in kvs. + string error = 2; + repeated KvPair kvs = 3; +} + +// Transactional commands. +// Note that "version" and "timestamp" are synonymous. + +// Read the value of a key at the given time. +message GetRequest { + Context context = 1; + bytes key = 2; + uint64 version = 3; +} + +message GetResponse { + errorpb.Error region_error = 1; + KeyError error = 2; + bytes value = 3; + // True if the requested key doesn't exist; another error will not be signalled. + bool not_found = 4; +} + +// Prewrite is the first phase of two phase commit. A prewrite commit contains all the +// writes (mutations) which a client would like to make as part of a transaction. The +// request succeeds if none of the keys are locked. In that case all those keys will +// be locked. If the prewrite fails, no changes are made to the DB. +message PrewriteRequest { + Context context = 1; + repeated Mutation mutations = 2; + // Key of the primary lock. + bytes primary_lock = 3; + uint64 start_version = 4; + uint64 lock_ttl = 5; +} + +// Empty if the prewrite is successful. +message PrewriteResponse { + errorpb.Error region_error = 1; + repeated KeyError errors = 2; +} + +// Commit is the second phase of 2pc. The client must have successfully prewritten +// the transaction to all nodes. If all keys are locked by the given transaction, +// then the commit should succeed. If any keys are locked by a different +// transaction or are not locked at all (rolled back or expired), the commit +// fails. +message CommitRequest { + Context context = 1; + // Identifies the transaction, must match the start_version in the transaction's + // prewrite request. + uint64 start_version = 2; + // Must match the keys mutated by the transaction's prewrite request. + repeated bytes keys = 3; + // Must be greater than start_version. + uint64 commit_version = 4; +} + +// Empty if the commit is successful. +message CommitResponse { + errorpb.Error region_error = 1; + KeyError error = 2; +} + +// Read multiple values from the DB. +message ScanRequest { + Context context = 1; + bytes start_key = 2; + // The maximum number of values read. + uint32 limit = 3; + uint64 version = 4; +} + +message ScanResponse { + errorpb.Error region_error = 1; + // Other errors are recorded for each key in pairs. + repeated KvPair pairs = 2; +} + +// Rollback an un-committed transaction. Will fail if the transaction has already +// been committed or keys are locked by a different transaction. If the keys were never +// locked, no action is needed but it is not an error. If successful all keys will be +// unlocked and all uncommitted values removed. +message BatchRollbackRequest { + Context context = 1; + uint64 start_version = 2; + repeated bytes keys = 3; +} + +// Empty if the rollback is successful. +message BatchRollbackResponse { + errorpb.Error region_error = 1; + KeyError error = 2; +} + +// CheckTxnStatus reports on the status of a transaction and may take action to +// rollback expired locks. +// If the transaction has previously been rolled back or committed, return that information. +// If the TTL of the transaction is exhausted, abort that transaction and roll back the primary lock. +// Otherwise, returns the TTL information. +message CheckTxnStatusRequest { + Context context = 1; + bytes primary_key = 2; + uint64 lock_ts = 3; // primary key and lock ts together to locate the primary lock of a transaction. + uint64 current_ts = 4; // current_ts is used to check TTL timeout, it may be inaccurate. +} + +message CheckTxnStatusResponse { + errorpb.Error region_error = 1; + // Three kinds of txn status: + // locked: lock_ttl > 0 + // committed: commit_version > 0 + // rolled back: lock_ttl == 0 && commit_version == 0 + uint64 lock_ttl = 2; + uint64 commit_version = 3; + // The action performed by TinyKV in response to the CheckTxnStatus request. + Action action = 4; +} + +// Resolve lock will find all locks belonging to the transaction with the given start timestamp. +// If commit_version is 0, TinyKV will rollback all locks. If commit_version is greater than +// 0 it will commit those locks with the given commit timestamp. +// The client will make a resolve lock request for all secondary keys once it has successfully +// committed or rolled back the primary key. +message ResolveLockRequest { + Context context = 1; + uint64 start_version = 2; + uint64 commit_version = 3; +} + +// Empty if the lock is resolved successfully. +message ResolveLockResponse { + errorpb.Error region_error = 1; + KeyError error = 2; +} + +// Utility data types used by the above requests and responses. + +// Either a key/value pair or an error for a particular key. +message KvPair { + KeyError error = 1; + bytes key = 2; + bytes value = 3; +} + +enum Op { + Put = 0; + Del = 1; + Rollback = 2; + // Used by TinySQL but not TinyKV. + Lock = 3; +} + +message Mutation { + Op op = 1; + bytes key = 2; + bytes value = 3; +} + +enum Action { + NoAction = 0; + // The lock is rolled back because it has expired. + TTLExpireRollback = 1; + // The lock does not exist, TinyKV left a record of the rollback, but did not + // have to delete a lock. + LockNotExistRollback = 2; +} + +// Data types used for errors. + +// Many responses can include a KeyError for some problem with one of the requested key. +// Only one field is set and it indicates what the client should do in response. +message KeyError { + LockInfo locked = 1; // Client should backoff or cleanup the lock then retry. + string retryable = 2; // Client may restart the txn. e.g write conflict. + string abort = 3; // Client should abort the txn. + WriteConflict conflict = 4; // Another transaction is trying to write a key. The client can retry. +} + +message LockInfo { + bytes primary_lock = 1; + uint64 lock_version = 2; + bytes key = 3; + uint64 lock_ttl = 4; +} + +message WriteConflict { + uint64 start_ts = 1; + uint64 conflict_ts = 2; + bytes key = 3; + bytes primary = 4; +} + +// Miscellaneous data present in each request. +message Context { + uint64 region_id = 1; + metapb.RegionEpoch region_epoch = 2; + metapb.Peer peer = 3; + uint64 term = 5; +} diff --git a/proto/proto/metapb.proto b/proto/proto/metapb.proto new file mode 100644 index 00000000..6b9f113e --- /dev/null +++ b/proto/proto/metapb.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; +package metapb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; + +message Cluster { + uint64 id = 1; + // max peer count for a region. + // scheduler will do the auto-balance if region peer count mismatches. + uint32 max_peer_count = 2; + // more attributes...... +} + +enum StoreState { + Up = 0; + Offline = 1; + Tombstone = 2; +} + +message Store { + uint64 id = 1; + // Address to handle client requests (kv, cop, etc.) + string address = 2; + StoreState state = 3; +} + +message RegionEpoch { + // Conf change version, auto increment when add or remove peer + uint64 conf_ver = 1; + // Region version, auto increment when split or merge + uint64 version = 2; +} + +message Region { + uint64 id = 1; + // Region key range [start_key, end_key). + bytes start_key = 2; + bytes end_key = 3; + RegionEpoch region_epoch = 4; + repeated Peer peers = 5; +} + +message Peer { + uint64 id = 1; + uint64 store_id = 2; +} diff --git a/proto/proto/raft_cmdpb.proto b/proto/proto/raft_cmdpb.proto new file mode 100644 index 00000000..9e4c6198 --- /dev/null +++ b/proto/proto/raft_cmdpb.proto @@ -0,0 +1,151 @@ +syntax = "proto3"; +package raft_cmdpb; + +import "metapb.proto"; +import "errorpb.proto"; +import "eraftpb.proto"; + +message GetRequest { + string cf = 1; + bytes key = 2; +} + +message GetResponse { + bytes value = 1; +} + +message PutRequest { + string cf = 1; + bytes key = 2; + bytes value = 3; +} + +message PutResponse {} + +message DeleteRequest { + string cf = 1; + bytes key = 2; +} + +message DeleteResponse {} + +message SnapRequest {} + +message SnapResponse { + metapb.Region region = 1; +} + +enum CmdType { + Invalid = 0; + Get = 1; + Put = 3; + Delete = 4; + Snap = 5; +} + +message Request { + CmdType cmd_type = 1; + GetRequest get = 2; + PutRequest put = 4; + DeleteRequest delete = 5; + SnapRequest snap = 6; +} + +message Response { + CmdType cmd_type = 1; + GetResponse get = 2; + PutResponse put = 4; + DeleteResponse delete = 5; + SnapResponse snap = 6; +} + +message ChangePeerRequest { + // This can be only called in internal RaftStore now. + eraftpb.ConfChangeType change_type = 1; + metapb.Peer peer = 2; +} + +message ChangePeerResponse { + metapb.Region region = 1; +} + +message SplitRequest { + // This can be only called in internal RaftStore now. + // The split_key has to exist in the splitting region. + bytes split_key = 1; + // We split the region into two. The first uses the origin + // parent region id, and the second uses the new_region_id. + // We must guarantee that the new_region_id is global unique. + uint64 new_region_id = 2; + // The peer ids for the new split region. + repeated uint64 new_peer_ids = 3; +} + +message SplitResponse { + // SplitResponse contains the region where specific keys have split into. + repeated metapb.Region regions = 1; +} + +message CompactLogRequest { + uint64 compact_index = 1; + uint64 compact_term = 2; +} + +message CompactLogResponse {} + +message TransferLeaderRequest { + metapb.Peer peer = 1; +} + +message TransferLeaderResponse {} + +enum AdminCmdType { + InvalidAdmin = 0; + ChangePeer = 1; + CompactLog = 3; + TransferLeader = 4; + Split = 10; +} + +message AdminRequest { + AdminCmdType cmd_type = 1; + ChangePeerRequest change_peer = 2; + CompactLogRequest compact_log = 4; + TransferLeaderRequest transfer_leader = 5; + SplitRequest split = 10; +} + +message AdminResponse { + AdminCmdType cmd_type = 1; + ChangePeerResponse change_peer = 2; + CompactLogResponse compact_log = 4; + TransferLeaderResponse transfer_leader = 5; + SplitResponse split = 10; +} + +message RaftRequestHeader { + uint64 region_id = 1; + metapb.Peer peer = 2; + metapb.RegionEpoch region_epoch = 4; + uint64 term = 5; +} + +message RaftResponseHeader { + errorpb.Error error = 1; + bytes uuid = 2; + uint64 current_term = 3; +} + +message RaftCmdRequest { + RaftRequestHeader header = 1; + // We can't enclose normal requests and administrator request + // at same time. + repeated Request requests = 2; + AdminRequest admin_request = 3; +} + +message RaftCmdResponse { + RaftResponseHeader header = 1; + repeated Response responses = 2; + AdminResponse admin_response = 3; +} diff --git a/proto/proto/raft_serverpb.proto b/proto/proto/raft_serverpb.proto new file mode 100644 index 00000000..ee98daea --- /dev/null +++ b/proto/proto/raft_serverpb.proto @@ -0,0 +1,89 @@ +syntax = "proto3"; +package raft_serverpb; + +import "eraftpb.proto"; +import "metapb.proto"; + +// The message sent between Raft peer, it wraps the raft meessage with some meta information. +message RaftMessage { + uint64 region_id = 1; + metapb.Peer from_peer = 2; + metapb.Peer to_peer = 3; + eraftpb.Message message = 4; + metapb.RegionEpoch region_epoch = 5; + // true means to_peer is a tombstone peer and it should remove itself. + bool is_tombstone = 6; + // Region key range [start_key, end_key). + bytes start_key = 7; + bytes end_key = 8; +} + +// Used to store the persistent state for Raft, including the hard state for raft and the last index of the raft log. +message RaftLocalState { + eraftpb.HardState hard_state = 1; + uint64 last_index = 2; +} + +// Used to store the persistent state for Raft state machine. +message RaftApplyState { + uint64 applied_index = 1; + RaftTruncatedState truncated_state = 2; +} + +// The truncated state for Raft log compaction. +message RaftTruncatedState { + uint64 index = 1; + uint64 term = 2; +} + +// Used to store Region information and the corresponding Peer state on this Store. +message RegionLocalState { + PeerState state = 1; + metapb.Region region = 2; +} + +// Normal indicates that this Peer is normal; +// Tombstone shows that this Peer has been removed from Region and cannot join in Raft Group. +enum PeerState { + Normal = 0; + Tombstone = 2; +} + +// The persistent identification for Store. +// It used to recover the store id after restart. +message StoreIdent { + uint64 cluster_id = 1; + uint64 store_id = 2; +} + +// Snapshot sending and reciveing related messages. +// Not included in the course scope. +message KeyValue { + bytes key = 1; + bytes value = 2; +} + +message RaftSnapshotData { + metapb.Region region = 1; + uint64 file_size = 2; + repeated KeyValue data = 3; + SnapshotMeta meta = 5; +} + +message SnapshotCFFile { + string cf = 1; + uint64 size = 2; + uint32 checksum = 3; +} + +message SnapshotMeta { + repeated SnapshotCFFile cf_files = 1; +} + +message SnapshotChunk { + RaftMessage message = 1; + bytes data = 2; +} + +message Done {} + diff --git a/proto/proto/schedulerpb.proto b/proto/proto/schedulerpb.proto new file mode 100644 index 00000000..7b56c668 --- /dev/null +++ b/proto/proto/schedulerpb.proto @@ -0,0 +1,439 @@ +syntax = "proto3"; +package schedulerpb; + +import "metapb.proto"; +import "eraftpb.proto"; + +import "gogoproto/gogo.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +service Scheduler { + // GetMembers get the member list of this cluster. It does not require + // the cluster_id in request matchs the id of this cluster. + rpc GetMembers(GetMembersRequest) returns (GetMembersResponse) {} + + rpc Tso(stream TsoRequest) returns (stream TsoResponse) {} + + rpc Bootstrap(BootstrapRequest) returns (BootstrapResponse) {} + + rpc IsBootstrapped(IsBootstrappedRequest) returns (IsBootstrappedResponse) {} + + rpc AllocID(AllocIDRequest) returns (AllocIDResponse) {} + + rpc GetStore(GetStoreRequest) returns (GetStoreResponse) {} + + rpc PutStore(PutStoreRequest) returns (PutStoreResponse) {} + + rpc GetAllStores(GetAllStoresRequest) returns (GetAllStoresResponse) {} + + rpc StoreHeartbeat(StoreHeartbeatRequest) returns (StoreHeartbeatResponse) {} + + rpc RegionHeartbeat(stream RegionHeartbeatRequest) returns (stream RegionHeartbeatResponse) {} + + rpc GetRegion(GetRegionRequest) returns (GetRegionResponse) {} + + rpc GetPrevRegion(GetRegionRequest) returns (GetRegionResponse) {} + + rpc GetRegionByID(GetRegionByIDRequest) returns (GetRegionResponse) {} + + rpc ScanRegions(ScanRegionsRequest) returns (ScanRegionsResponse) {} + + rpc AskSplit(AskSplitRequest) returns (AskSplitResponse) {} + + rpc GetClusterConfig(GetClusterConfigRequest) returns (GetClusterConfigResponse) {} + + rpc PutClusterConfig(PutClusterConfigRequest) returns (PutClusterConfigResponse) {} + + rpc ScatterRegion(ScatterRegionRequest) returns (ScatterRegionResponse) {} + + rpc GetGCSafePoint(GetGCSafePointRequest) returns (GetGCSafePointResponse) {} + + rpc UpdateGCSafePoint(UpdateGCSafePointRequest) returns (UpdateGCSafePointResponse) {} + + rpc GetOperator(GetOperatorRequest) returns (GetOperatorResponse) {} +} + +message RequestHeader { + // cluster_id is the ID of the cluster which be sent to. + uint64 cluster_id = 1; +} + +message ResponseHeader { + // cluster_id is the ID of the cluster which sent the response. + uint64 cluster_id = 1; + Error error = 2; +} + +enum ErrorType { + OK = 0; + UNKNOWN = 1; + NOT_BOOTSTRAPPED = 2; + STORE_TOMBSTONE = 3; + ALREADY_BOOTSTRAPPED = 4; + INCOMPATIBLE_VERSION = 5; + REGION_NOT_FOUND = 6; +} + +message Error { + ErrorType type = 1; + string message = 2; +} + +message TsoRequest { + RequestHeader header = 1; + + uint32 count = 2; +} + +message Timestamp { + int64 physical = 1; + int64 logical = 2; +} + +message TsoResponse { + ResponseHeader header = 1; + + uint32 count = 2; + Timestamp timestamp = 3; +} + +message BootstrapRequest { + RequestHeader header = 1; + + metapb.Store store = 2; +} + +message BootstrapResponse { + ResponseHeader header = 1; +} + +message IsBootstrappedRequest { + RequestHeader header = 1; +} + +message IsBootstrappedResponse { + ResponseHeader header = 1; + + bool bootstrapped = 2; +} + +message AllocIDRequest { + RequestHeader header = 1; +} + +message AllocIDResponse { + ResponseHeader header = 1; + + uint64 id = 2; +} + +message GetStoreRequest { + RequestHeader header = 1; + + uint64 store_id = 2; +} + +message GetStoreResponse { + ResponseHeader header = 1; + + metapb.Store store = 2; + StoreStats stats = 3; +} + +message PutStoreRequest { + RequestHeader header = 1; + + metapb.Store store = 2; +} + +message PutStoreResponse { + ResponseHeader header = 1; +} + +message GetAllStoresRequest { + RequestHeader header = 1; + // Do NOT return tombstone stores if set to true. + bool exclude_tombstone_stores = 2; +} + +message GetAllStoresResponse { + ResponseHeader header = 1; + + repeated metapb.Store stores = 2; +} + +message GetRegionRequest { + RequestHeader header = 1; + + bytes region_key = 2; +} + +message GetRegionResponse { + ResponseHeader header = 1; + + metapb.Region region = 2; + metapb.Peer leader = 3; + repeated metapb.Peer slaves = 4; +} + +message GetRegionByIDRequest { + RequestHeader header = 1; + + uint64 region_id = 2; +} + +// Use GetRegionResponse as the response of GetRegionByIDRequest. + +message ScanRegionsRequest { + RequestHeader header = 1; + + bytes start_key = 2; + int32 limit = 3; // no limit when limit <= 0. + bytes end_key = 4; // end_key is +inf when it is empty. +} + +message ScanRegionsResponse { + ResponseHeader header = 1; + + repeated metapb.Region regions = 2; + repeated metapb.Peer leaders = 3; +} + +message GetClusterConfigRequest { + RequestHeader header = 1; +} + +message GetClusterConfigResponse { + ResponseHeader header = 1; + + metapb.Cluster cluster = 2; +} + +message PutClusterConfigRequest { + RequestHeader header = 1; + + metapb.Cluster cluster = 2; +} + +message PutClusterConfigResponse { + ResponseHeader header = 1; +} + +message Member { + // name is the name of the Scheduler member. + string name = 1; + // member_id is the unique id of the Scheduler member. + uint64 member_id = 2; + repeated string peer_urls = 3; + repeated string client_urls = 4; + int32 leader_priority = 5; +} + +message GetMembersRequest { + RequestHeader header = 1; +} + +message GetMembersResponse { + ResponseHeader header = 1; + + repeated Member members = 2; + Member leader = 3; + Member etcd_leader = 4; +} + +message RegionHeartbeatRequest { + RequestHeader header = 1; + + metapb.Region region = 2; + // Leader Peer sending the heartbeat. + metapb.Peer leader = 3; + // Pending peers are the peers that the leader can't consider as + // working followers. + repeated metapb.Peer pending_peers = 5; + // Approximate region size. + uint64 approximate_size = 10; +} + +message ChangePeer { + metapb.Peer peer = 1; + eraftpb.ConfChangeType change_type = 2; +} + +message TransferLeader { + metapb.Peer peer = 1; +} + +message RegionHeartbeatResponse { + ResponseHeader header = 1; + + // Notice, Scheduleeer only allows handling reported epoch >= current scheduler's. + // Leader peer reports region status with RegionHeartbeatRequest + // to scheduler regularly, scheduler will determine whether this region + // should do ChangePeer or not. + // E,g, max peer number is 3, region A, first only peer 1 in A. + // 1. Scheduler region state -> Peers (1), ConfVer (1). + // 2. Leader peer 1 reports region state to scheduler, scheduler finds the + // peer number is < 3, so first changes its current region + // state -> Peers (1, 2), ConfVer (1), and returns ChangePeer Adding 2. + // 3. Leader does ChangePeer, then reports Peers (1, 2), ConfVer (2), + // scheduler updates its state -> Peers (1, 2), ConfVer (2). + // 4. Leader may report old Peers (1), ConfVer (1) to scheduler before ConfChange + // finished, scheduler stills responses ChangePeer Adding 2, of course, we must + // guarantee the second ChangePeer can't be applied in TiKV. + ChangePeer change_peer = 2; + // Scheduler can return transfer_leader to let TiKV does leader transfer itself. + TransferLeader transfer_leader = 3; + // ID of the region + uint64 region_id = 4; + metapb.RegionEpoch region_epoch = 5; + // Leader of the region at the moment of the corresponding request was made. + metapb.Peer target_peer = 6; +} + +message AskSplitRequest { + RequestHeader header = 1; + + metapb.Region region = 2; +} + +message AskSplitResponse { + ResponseHeader header = 1; + + // We split the region into two, first uses the origin + // parent region id, and the second uses the new_region_id. + // We must guarantee that the new_region_id is global unique. + uint64 new_region_id = 2; + // The peer ids for the new split region. + repeated uint64 new_peer_ids = 3; +} + +message ReportSplitRequest { + RequestHeader header = 1; + + metapb.Region left = 2; + metapb.Region right = 3; +} + +message ReportSplitResponse { + ResponseHeader header = 1; +} + +message SplitID { + uint64 new_region_id = 1; + repeated uint64 new_peer_ids = 2; +} + +message TimeInterval { + // The unix timestamp in seconds of the start of this period. + uint64 start_timestamp = 1; + // The unix timestamp in seconds of the end of this period. + uint64 end_timestamp = 2; +} + +message RecordPair { + string key = 1; + uint64 value = 2; +} + +message StoreStats { + uint64 store_id = 1; + // Capacity for the store. + uint64 capacity = 2; + // Available size for the store. + uint64 available = 3; + // Total region count in this store. + uint32 region_count = 4; + // Current sending snapshot count. + uint32 sending_snap_count = 5; + // Current receiving snapshot count. + uint32 receiving_snap_count = 6; + // When the store is started (unix timestamp in seconds). + uint32 start_time = 7; + // How many region is applying snapshot. + uint32 applying_snap_count = 8; + // If the store is busy + bool is_busy = 9; + // Actually used space by db + uint64 used_size = 10; + // Actually reported time interval + TimeInterval interval = 15; + // Threads' CPU usages in the store + repeated RecordPair cpu_usages = 16; + // Threads' read disk I/O rates in the store + repeated RecordPair read_io_rates = 17; + // Threads' write disk I/O rates in the store + repeated RecordPair write_io_rates = 18; + // Operations' latencies in the store + repeated RecordPair op_latencies = 19; +} + +message StoreHeartbeatRequest { + RequestHeader header = 1; + + StoreStats stats = 2; +} + +message StoreHeartbeatResponse { + ResponseHeader header = 1; +} + +message ScatterRegionRequest { + RequestHeader header = 1; + + uint64 region_id = 2; + + // Scheduler will use these region information if it can't find the region. + // For example, the region is just split and hasn't report to Scheduler yet. + metapb.Region region = 3; + metapb.Peer leader = 4; +} + +message ScatterRegionResponse { + ResponseHeader header = 1; +} + +message GetGCSafePointRequest { + RequestHeader header = 1; +} + +message GetGCSafePointResponse { + ResponseHeader header = 1; + + uint64 safe_point = 2; +} + +message UpdateGCSafePointRequest { + RequestHeader header = 1; + + uint64 safe_point = 2; +} + +message UpdateGCSafePointResponse { + ResponseHeader header = 1; + + uint64 new_safe_point = 2; +} + +message GetOperatorRequest { + RequestHeader header = 1; + uint64 region_id = 2; +} + +enum OperatorStatus { + SUCCESS = 0; + TIMEOUT = 1; + CANCEL = 2; + REPLACE = 3; + RUNNING = 4; +} + +message GetOperatorResponse { + ResponseHeader header = 1; + uint64 region_id = 2; + bytes desc = 3; + OperatorStatus status = 4; + bytes kind = 5; +} diff --git a/proto/proto/tinykvpb.proto b/proto/proto/tinykvpb.proto new file mode 100644 index 00000000..d96e1017 --- /dev/null +++ b/proto/proto/tinykvpb.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; +package tinykvpb; + +import "kvrpcpb.proto"; +import "raft_serverpb.proto"; +import "coprocessor.proto"; + +import "gogoproto/gogo.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// Serve as a distributed kv database. See the request and response definitions in +// kvrpcpb.proto +service TinyKv { + // KV commands with mvcc/txn supported. + rpc KvGet(kvrpcpb.GetRequest) returns (kvrpcpb.GetResponse) {} + rpc KvScan(kvrpcpb.ScanRequest) returns (kvrpcpb.ScanResponse) {} + rpc KvPrewrite(kvrpcpb.PrewriteRequest) returns (kvrpcpb.PrewriteResponse) {} + rpc KvCommit(kvrpcpb.CommitRequest) returns (kvrpcpb.CommitResponse) {} + rpc KvCheckTxnStatus(kvrpcpb.CheckTxnStatusRequest) returns (kvrpcpb.CheckTxnStatusResponse) {} + rpc KvBatchRollback(kvrpcpb.BatchRollbackRequest) returns (kvrpcpb.BatchRollbackResponse) {} + rpc KvResolveLock(kvrpcpb.ResolveLockRequest) returns (kvrpcpb.ResolveLockResponse) {} + + // RawKV commands. + rpc RawGet(kvrpcpb.RawGetRequest) returns (kvrpcpb.RawGetResponse) {} + rpc RawPut(kvrpcpb.RawPutRequest) returns (kvrpcpb.RawPutResponse) {} + rpc RawDelete(kvrpcpb.RawDeleteRequest) returns (kvrpcpb.RawDeleteResponse) {} + rpc RawScan(kvrpcpb.RawScanRequest) returns (kvrpcpb.RawScanResponse) {} + + // Raft commands (tinykv <-> tinykv). + rpc Raft(stream raft_serverpb.RaftMessage) returns (raft_serverpb.Done) {} + rpc Snapshot(stream raft_serverpb.SnapshotChunk) returns (raft_serverpb.Done) {} + + // Coprocessor + rpc Coprocessor(coprocessor.Request) returns (coprocessor.Response) {} +} diff --git a/proto/tools.json b/proto/tools.json new file mode 100644 index 00000000..6e5fe082 --- /dev/null +++ b/proto/tools.json @@ -0,0 +1,14 @@ +{ + "Tools": [ + { + "Repository": "github.com/gogo/protobuf/protoc-gen-gofast", + "Commit": "636bf0302bc95575d69441b25a2603156ffdddf1" + }, + { + "Repository": "golang.org/x/tools/cmd/goimports", + "Commit": "04b5d21e00f1f47bd824a6ade581e7189bacde87" + } + ], + "RetoolVersion": "1.3.7" + } + \ No newline at end of file diff --git a/raft/doc.go b/raft/doc.go new file mode 100644 index 00000000..eb77dd30 --- /dev/null +++ b/raft/doc.go @@ -0,0 +1,292 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package raft sends and receives messages in the Protocol Buffer format +defined in the eraftpb package. + +Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. +The state machine is kept in sync through the use of a replicated log. +For more details on Raft, see "In Search of an Understandable Consensus Algorithm" +(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout. + +A simple example application, _raftexample_, is also available to help illustrate +how to use this package in practice: +https://github.com/etcd-io/etcd/tree/master/contrib/raftexample + +Usage + +The primary object in raft is a Node. You either start a Node from scratch +using raft.StartNode or start a Node from some initial state using raft.RestartNode. + +To start a node from scratch: + + storage := raft.NewMemoryStorage() + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxInflightMsgs: 256, + } + n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) + +To restart a node from previous state: + + storage := raft.NewMemoryStorage() + + // recover the in-memory storage from persistent + // snapshot, state and entries. + storage.ApplySnapshot(snapshot) + storage.SetHardState(state) + storage.Append(entries) + + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxInflightMsgs: 256, + } + + // restart raft without peer information. + // peer information is already included in the storage. + n := raft.RestartNode(c) + +Now that you are holding onto a Node you have a few responsibilities: + +First, you must read from the Node.Ready() channel and process the updates +it contains. These steps may be performed in parallel, except as noted in step +2. + +1. Write HardState, Entries, and Snapshot to persistent storage if they are +not empty. Note that when writing an Entry with Index i, any +previously-persisted entries with Index >= i must be discarded. + +2. Send all Messages to the nodes named in the To field. It is important that +no messages be sent until the latest HardState has been persisted to disk, +and all Entries written by any previous Ready batch (Messages may be sent while +entries from the same batch are being persisted). To reduce the I/O latency, an +optimization can be applied to make leader write to disk in parallel with its +followers (as explained at section 10.2.1 in Raft thesis). If any Message has type +MessageType_MsgSnapshot, call Node.ReportSnapshot() after it has been sent (these messages may be +large). + +Note: Marshalling messages is not thread-safe; it is important that you +make sure that no new entries are persisted while marshalling. +The easiest way to achieve this is to serialize the messages directly inside +your main raft loop. + +3. Apply Snapshot (if any) and CommittedEntries to the state machine. +If any committed Entry has Type EntryType_EntryConfChange, call Node.ApplyConfChange() +to apply it to the node. The configuration change may be cancelled at this point +by setting the NodeId field to zero before calling ApplyConfChange +(but ApplyConfChange must be called one way or the other, and the decision to cancel +must be based solely on the state machine and not external information such as +the observed health of the node). + +4. Call Node.Advance() to signal readiness for the next batch of updates. +This may be done at any time after step 1, although all updates must be processed +in the order they were returned by Ready. + +Second, all persisted log entries must be made available via an +implementation of the Storage interface. The provided MemoryStorage +type can be used for this (if you repopulate its state upon a +restart), or you can supply your own disk-backed implementation. + +Third, when you receive a message from another node, pass it to Node.Step: + + func recvRaftRPC(ctx context.Context, m eraftpb.Message) { + n.Step(ctx, m) + } + +Finally, you need to call Node.Tick() at regular intervals (probably +via a time.Ticker). Raft has two important timeouts: heartbeat and the +election timeout. However, internally to the raft package time is +represented by an abstract "tick". + +The total state machine handling loop will look something like this: + + for { + select { + case <-s.Ticker: + n.Tick() + case rd := <-s.Node.Ready(): + saveToStorage(rd.State, rd.Entries, rd.Snapshot) + send(rd.Messages) + if !raft.IsEmptySnap(rd.Snapshot) { + processSnapshot(rd.Snapshot) + } + for _, entry := range rd.CommittedEntries { + process(entry) + if entry.Type == eraftpb.EntryType_EntryConfChange { + var cc eraftpb.ConfChange + cc.Unmarshal(entry.Data) + s.Node.ApplyConfChange(cc) + } + } + s.Node.Advance() + case <-s.done: + return + } + } + +To propose changes to the state machine from your node take your application +data, serialize it into a byte slice and call: + + n.Propose(ctx, data) + +If the proposal is committed, data will appear in committed entries with type +eraftpb.EntryType_EntryNormal. There is no guarantee that a proposed command will be +committed; you may have to re-propose after a timeout. + +To add or remove a node in a cluster, build ConfChange struct 'cc' and call: + + n.ProposeConfChange(ctx, cc) + +After config change is committed, some committed entry with type +eraftpb.EntryType_EntryConfChange will be returned. You must apply it to node through: + + var cc eraftpb.ConfChange + cc.Unmarshal(data) + n.ApplyConfChange(cc) + +Note: An ID represents a unique node in a cluster for all time. A +given ID MUST be used only once even if the old node has been removed. +This means that for example IP addresses make poor node IDs since they +may be reused. Node IDs must be non-zero. + +Implementation notes + +This implementation is up to date with the final Raft thesis +(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our +implementation of the membership change protocol differs somewhat from +that described in chapter 4. The key invariant that membership changes +happen one node at a time is preserved, but in our implementation the +membership change takes effect when its entry is applied, not when it +is added to the log (so the entry is committed under the old +membership instead of the new). This is equivalent in terms of safety, +since the old and new configurations are guaranteed to overlap. + +To ensure that we do not attempt to commit two membership changes at +once by matching log positions (which would be unsafe since they +should have different quorum requirements), we simply disallow any +proposed membership change while any uncommitted change appears in +the leader's log. + +This approach introduces a problem when you try to remove a member +from a two-member cluster: If one of the members dies before the +other one receives the commit of the confchange entry, then the member +cannot be removed any more since the cluster cannot make progress. +For this reason it is highly recommended to use three or more nodes in +every cluster. + +MessageType + +Package raft sends and receives message in Protocol Buffer format (defined +in eraftpb package). Each state (follower, candidate, leader) implements its +own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when +advancing with the given eraftpb.Message. Each step is determined by its +eraftpb.MessageType. Note that every step is checked by one common method +'Step' that safety-checks the terms of node and incoming message to prevent +stale log entries: + + 'MessageType_MsgHup' is used for election. If a node is a follower or candidate, the + 'tick' function in 'raft' struct is set as 'tickElection'. If a follower or + candidate has not received any heartbeat before the election timeout, it + passes 'MessageType_MsgHup' to its Step method and becomes (or remains) a candidate to + start a new election. + + 'MessageType_MsgBeat' is an internal type that signals the leader to send a heartbeat of + the 'MessageType_MsgHeartbeat' type. If a node is a leader, the 'tick' function in + the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to + send periodic 'MessageType_MsgHeartbeat' messages to its followers. + + 'MessageType_MsgPropose' proposes to append data to its log entries. This is a special + type to redirect proposals to leader. Therefore, send method overwrites + eraftpb.Message's term with its HardState's term to avoid attaching its + local term to 'MessageType_MsgPropose'. When 'MessageType_MsgPropose' is passed to the leader's 'Step' + method, the leader first calls the 'appendEntry' method to append entries + to its log, and then calls 'bcastAppend' method to send those entries to + its peers. When passed to candidate, 'MessageType_MsgPropose' is dropped. When passed to + follower, 'MessageType_MsgPropose' is stored in follower's mailbox(msgs) by the send + method. It is stored with sender's ID and later forwarded to leader by + rafthttp package. + + 'MessageType_MsgAppend' contains log entries to replicate. A leader calls bcastAppend, + which calls sendAppend, which sends soon-to-be-replicated logs in 'MessageType_MsgAppend' + type. When 'MessageType_MsgAppend' is passed to candidate's Step method, candidate reverts + back to follower, because it indicates that there is a valid leader sending + 'MessageType_MsgAppend' messages. Candidate and follower respond to this message in + 'MessageType_MsgAppendResponse' type. + + 'MessageType_MsgAppendResponse' is response to log replication request('MessageType_MsgAppend'). When + 'MessageType_MsgAppend' is passed to candidate or follower's Step method, it responds by + calling 'handleAppendEntries' method, which sends 'MessageType_MsgAppendResponse' to raft + mailbox. + + 'MessageType_MsgRequestVote' requests votes for election. When a node is a follower or + candidate and 'MessageType_MsgHup' is passed to its Step method, then the node calls + 'campaign' method to campaign itself to become a leader. Once 'campaign' + method is called, the node becomes candidate and sends 'MessageType_MsgRequestVote' to peers + in cluster to request votes. When passed to leader or candidate's Step + method and the message's Term is lower than leader's or candidate's, + 'MessageType_MsgRequestVote' will be rejected ('MessageType_MsgRequestVoteResponse' is returned with Reject true). + If leader or candidate receives 'MessageType_MsgRequestVote' with higher term, it will revert + back to follower. When 'MessageType_MsgRequestVote' is passed to follower, it votes for the + sender only when sender's last term is greater than MessageType_MsgRequestVote's term or + sender's last term is equal to MessageType_MsgRequestVote's term but sender's last committed + index is greater than or equal to follower's. + + 'MessageType_MsgRequestVoteResponse' contains responses from voting request. When 'MessageType_MsgRequestVoteResponse' is + passed to candidate, the candidate calculates how many votes it has won. If + it's more than majority (quorum), it becomes leader and calls 'bcastAppend'. + If candidate receives majority of votes of denials, it reverts back to + follower. + + 'MessageType_MsgSnapshot' requests to install a snapshot message. When a node has just + become a leader or the leader receives 'MessageType_MsgPropose' message, it calls + 'bcastAppend' method, which then calls 'sendAppend' method to each + follower. In 'sendAppend', if a leader fails to get term or entries, + the leader requests snapshot by sending 'MessageType_MsgSnapshot' type message. + + 'MessageType_MsgSnapStatus' tells the result of snapshot install message. When a + follower rejected 'MessageType_MsgSnapshot', it indicates the snapshot request with + 'MessageType_MsgSnapshot' had failed from network issues which causes the network layer + to fail to send out snapshots to its followers. Then leader considers + follower's progress as probe. When 'MessageType_MsgSnapshot' were not rejected, it + indicates that the snapshot succeeded and the leader sets follower's + progress to probe and resumes its log replication. + + 'MessageType_MsgHeartbeat' sends heartbeat from leader. When 'MessageType_MsgHeartbeat' is passed + to candidate and message's term is higher than candidate's, the candidate + reverts back to follower and updates its committed index from the one in + this heartbeat. And it sends the message to its mailbox. When + 'MessageType_MsgHeartbeat' is passed to follower's Step method and message's term is + higher than follower's, the follower updates its leaderID with the ID + from the message. + + 'MessageType_MsgHeartbeatResponse' is a response to 'MessageType_MsgHeartbeat'. When 'MessageType_MsgHeartbeatResponse' + is passed to leader's Step method, the leader knows which follower + responded. And only when the leader's last committed index is greater than + follower's Match index, the leader runs 'sendAppend` method. + + 'MessageType_MsgUnreachable' tells that request(message) wasn't delivered. When + 'MessageType_MsgUnreachable' is passed to leader's Step method, the leader discovers + that the follower that sent this 'MessageType_MsgUnreachable' is not reachable, often + indicating 'MessageType_MsgAppend' is lost. When follower's progress state is replicate, + the leader sets it back to probe. + +*/ +package raft diff --git a/raft/log.go b/raft/log.go new file mode 100644 index 00000000..f3967455 --- /dev/null +++ b/raft/log.go @@ -0,0 +1,89 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import pb "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + +// RaftLog manage the log entries, its struct look like: +// +// truncated.....first.....applied....committed....stabled.....last +// --------| |------------------------------------------------| +// log entries +// +// for simplify the RaftLog implement should manage all log entries +// that not truncated +type RaftLog struct { + // storage contains all stable entries since the last snapshot. + storage Storage + + // committed is the highest log position that is known to be in + // stable storage on a quorum of nodes. + committed uint64 + + // applied is the highest log position that the application has + // been instructed to apply to its state machine. + // Invariant: applied <= committed + applied uint64 + + // log entries with index <= stabled are stabled to storage + stabled uint64 + + // all entries that have not yet compact. + entries []pb.Entry + + // the incoming unstable snapshot, if any. + // (Used in 2C) + pendingSnapshot *pb.Snapshot + + // Your Data Here (2A). +} + +// newLog returns log using the given storage. It recovers the log +// to the state that it just commits and applies the latest snapshot. +func newLog(storage Storage) *RaftLog { + // Your Code Here (2A). + return nil +} + +// We need to compact the log entries in some point of time like +// storage compact stabled log entries prevent the log entries +// grow unlimitedly in memory +func (l *RaftLog) maybeCompact() { + // Your Code Here (2C). +} + +// unstableEntries return all the unstable entries +func (l *RaftLog) unstableEntries() []pb.Entry { + // Your Code Here (2A). + return nil +} + +// nextEnts returns all the committed but not applied entries +func (l *RaftLog) nextEnts() (ents []pb.Entry) { + // Your Code Here (2A). + return nil +} + +// LastIndex return the last index of the lon entries +func (l *RaftLog) LastIndex() uint64 { + // Your Code Here (2A). + return 0 +} + +// Term return the term of the entry in the given index +func (l *RaftLog) Term(i uint64) (uint64, error) { + // Your Code Here (2A). + return 0, nil +} diff --git a/raft/raft.go b/raft/raft.go new file mode 100644 index 00000000..241d4b9f --- /dev/null +++ b/raft/raft.go @@ -0,0 +1,238 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + + pb "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" +) + +// None is a placeholder node ID used when there is no leader. +const None uint64 = 0 + +// StateType represents the role of a node in a cluster. +type StateType uint64 + +const ( + StateFollower StateType = iota + StateCandidate + StateLeader +) + +var stmap = [...]string{ + "StateFollower", + "StateCandidate", + "StateLeader", +} + +func (st StateType) String() string { + return stmap[uint64(st)] +} + +// ErrProposalDropped is returned when the proposal is ignored by some cases, +// so that the proposer can be notified and fail fast. +var ErrProposalDropped = errors.New("raft proposal dropped") + +// Config contains the parameters to start a raft. +type Config struct { + // ID is the identity of the local raft. ID cannot be 0. + ID uint64 + + // peers contains the IDs of all nodes (including self) in the raft cluster. It + // should only be set when starting a new raft cluster. Restarting raft from + // previous configuration will panic if peers is set. peer is private and only + // used for testing right now. + peers []uint64 + + // ElectionTick is the number of Node.Tick invocations that must pass between + // elections. That is, if a follower does not receive any message from the + // leader of current term before ElectionTick has elapsed, it will become + // candidate and start an election. ElectionTick must be greater than + // HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid + // unnecessary leader switching. + ElectionTick int + // HeartbeatTick is the number of Node.Tick invocations that must pass between + // heartbeats. That is, a leader sends heartbeat messages to maintain its + // leadership every HeartbeatTick ticks. + HeartbeatTick int + + // Storage is the storage for raft. raft generates entries and states to be + // stored in storage. raft reads the persisted entries and states out of + // Storage when it needs. raft reads out the previous state and configuration + // out of storage when restarting. + Storage Storage + // Applied is the last applied index. It should only be set when restarting + // raft. raft will not return entries to the application smaller or equal to + // Applied. If Applied is unset when restarting, raft might return previous + // applied entries. This is a very application dependent configuration. + Applied uint64 +} + +func (c *Config) validate() error { + if c.ID == None { + return errors.New("cannot use none as id") + } + + if c.HeartbeatTick <= 0 { + return errors.New("heartbeat tick must be greater than 0") + } + + if c.ElectionTick <= c.HeartbeatTick { + return errors.New("election tick must be greater than heartbeat tick") + } + + if c.Storage == nil { + return errors.New("storage cannot be nil") + } + + return nil +} + +// Progress represents a follower’s progress in the view of the leader. Leader maintains +// progresses of all followers, and sends entries to the follower based on its progress. +type Progress struct { + Match, Next uint64 +} + +type Raft struct { + id uint64 + + Term uint64 + Vote uint64 + + // the log + RaftLog *RaftLog + + // log replication progress of each peers + Prs map[uint64]*Progress + + // this peer's role + State StateType + + // votes records + votes map[uint64]bool + + // msgs need to send + msgs []pb.Message + + // the leader id + Lead uint64 + + // heartbeat interval + heartbeatTimeout int + // baseline of election interval + electionTimeout int + // randomizedElectionTimeout is a random number between + // [electiontimeout, 2 * electiontimeout - 1]. + randomizedElectionTimeout int + + // leadTransferee is id of the leader transfer target when its value is not zero. + // Follow the procedure defined in raft thesis 3.10. + leadTransferee uint64 + + // Only one conf change may be pending (in the log, but not yet + // applied) at a time. This is enforced via PendingConfIndex, which + // is set to a value >= the log index of the latest pending + // configuration change (if any). Config changes are only allowed to + // be proposed if the leader's applied index is greater than this + // value. + PendingConfIndex uint64 + + // number of ticks since it reached last electionTimeout + electionElapsed int + + // number of ticks since it reached last heartbeatTimeout. + // only leader keeps heartbeatElapsed. + heartbeatElapsed int +} + +// newRaft return a raft peer with the given config +func newRaft(c *Config) *Raft { + if err := c.validate(); err != nil { + panic(err.Error()) + } + // Your Code Here (2A). + return nil +} + +// sendAppend sends an append RPC with new entries (if any) and the +// current commit index to the given peer. Returns true if a message was sent. +func (r *Raft) sendAppend(to uint64) bool { + // Your Code Here (2A). + return false +} + +// sendHeartbeat sends a heartbeat RPC to the given peer. +func (r *Raft) sendHeartbeat(to uint64) { + // Your Code Here (2A). +} + +// tick advances the internal logical clock by a single tick. +func (r *Raft) tick() { + // Your Code Here (2A). +} + +// becomeFollower transform this peer's state to Follower +func (r *Raft) becomeFollower(term uint64, lead uint64) { + // Your Code Here (2A). +} + +// becomeCandidate transform this peer's state to candidate +func (r *Raft) becomeCandidate() { + // Your Code Here (2A). +} + +// becomeLeader transform this peer's state to leader +func (r *Raft) becomeLeader() { + // Your Code Here (2A). +} + +// Step the entrance of handle message, see `MessageType` +// on `eraftpb.proto` for what msgs should be handled +func (r *Raft) Step(m pb.Message) error { + // Your Code Here (2A). + switch r.State { + case StateFollower: + case StateCandidate: + case StateLeader: + } + return nil +} + +// handleAppendEntries handle AppendEntries RPC request +func (r *Raft) handleAppendEntries(m pb.Message) { + // Your Code Here (2A). +} + +// handleHeartbeat handle Heartbeat RPC request +func (r *Raft) handleHeartbeat(m pb.Message) { + // Your Code Here (2A). +} + +// handleSnapshot handle Snapshot RPC request +func (r *Raft) handleSnapshot(m pb.Message) { + // Your Code Here (2C). +} + +// addNode add a new node to raft group +func (r *Raft) addNode(id uint64) { + // Your Code Here (3A). +} + +// removeNode remove a node from raft group +func (r *Raft) removeNode(id uint64) { + // Your Code Here (3A). +} diff --git a/raft/raft_paper_test.go b/raft/raft_paper_test.go new file mode 100644 index 00000000..edd1fd44 --- /dev/null +++ b/raft/raft_paper_test.go @@ -0,0 +1,932 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +This file contains tests which verify that the scenarios described +in the raft paper (https://ramcloud.stanford.edu/raft.pdf) are +handled by the raft implementation correctly. Each test focuses on +several sentences written in the paper. This could help us to prevent +most implementation bugs. + +Each test is composed of three parts: init, test and check. +Init part uses simple and understandable way to simulate the init state. +Test part uses Step function to generate the scenario. Check part checks +outgoing messages and state. +*/ +package raft + +import ( + "fmt" + "reflect" + "sort" + "testing" + + pb "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" +) + +func TestFollowerUpdateTermFromMessage2AA(t *testing.T) { + testUpdateTermFromMessage(t, StateFollower) +} +func TestCandidateUpdateTermFromMessage2AA(t *testing.T) { + testUpdateTermFromMessage(t, StateCandidate) +} +func TestLeaderUpdateTermFromMessage2AA(t *testing.T) { + testUpdateTermFromMessage(t, StateLeader) +} + +// testUpdateTermFromMessage tests that if one server’s current term is +// smaller than the other’s, then it updates its current term to the larger +// value. If a candidate or leader discovers that its term is out of date, +// it immediately reverts to follower state. +// Reference: section 5.1 +func testUpdateTermFromMessage(t *testing.T, state StateType) { + r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + switch state { + case StateFollower: + r.becomeFollower(1, 2) + case StateCandidate: + r.becomeCandidate() + case StateLeader: + r.becomeCandidate() + r.becomeLeader() + } + + r.Step(pb.Message{MsgType: pb.MessageType_MsgAppend, Term: 2}) + + if r.Term != 2 { + t.Errorf("term = %d, want %d", r.Term, 2) + } + if r.State != StateFollower { + t.Errorf("state = %v, want %v", r.State, StateFollower) + } +} + +// TestStartAsFollower tests that when servers start up, they begin as followers. +// Reference: section 5.2 +func TestStartAsFollower2AA(t *testing.T) { + r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + if r.State != StateFollower { + t.Errorf("state = %s, want %s", r.State, StateFollower) + } +} + +// TestLeaderBcastBeat tests that if the leader receives a heartbeat tick, +// it will send a MessageType_MsgHeartbeat with m.Index = 0, m.LogTerm=0 and empty entries +// as heartbeat to all followers. +// Reference: section 5.2 +func TestLeaderBcastBeat2AA(t *testing.T) { + // heartbeat interval + hi := 1 + r := newTestRaft(1, []uint64{1, 2, 3}, 10, hi, NewMemoryStorage()) + r.becomeCandidate() + r.becomeLeader() + + r.Step(pb.Message{MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{}}}) + r.readMessages() // clear message + + for i := 0; i < hi; i++ { + r.tick() + } + + msgs := r.readMessages() + sort.Sort(messageSlice(msgs)) + wmsgs := []pb.Message{ + {From: 1, To: 2, Term: 1, MsgType: pb.MessageType_MsgHeartbeat}, + {From: 1, To: 3, Term: 1, MsgType: pb.MessageType_MsgHeartbeat}, + } + if !reflect.DeepEqual(msgs, wmsgs) { + t.Errorf("msgs = %v, want %v", msgs, wmsgs) + } +} + +func TestFollowerStartElection2AA(t *testing.T) { + testNonleaderStartElection(t, StateFollower) +} +func TestCandidateStartNewElection2AA(t *testing.T) { + testNonleaderStartElection(t, StateCandidate) +} + +// testNonleaderStartElection tests that if a follower receives no communication +// over election timeout, it begins an election to choose a new leader. It +// increments its current term and transitions to candidate state. It then +// votes for itself and issues RequestVote RPCs in parallel to each of the +// other servers in the cluster. +// Reference: section 5.2 +// Also if a candidate fails to obtain a majority, it will time out and +// start a new election by incrementing its term and initiating another +// round of RequestVote RPCs. +// Reference: section 5.2 +func testNonleaderStartElection(t *testing.T, state StateType) { + // election timeout + et := 10 + r := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage()) + switch state { + case StateFollower: + r.becomeFollower(1, 2) + case StateCandidate: + r.becomeCandidate() + } + + for i := 1; i < 2*et; i++ { + r.tick() + } + + if r.Term != 2 { + t.Errorf("term = %d, want 2", r.Term) + } + if r.State != StateCandidate { + t.Errorf("state = %s, want %s", r.State, StateCandidate) + } + if !r.votes[r.id] { + t.Errorf("vote for self = false, want true") + } + msgs := r.readMessages() + sort.Sort(messageSlice(msgs)) + wmsgs := []pb.Message{ + {From: 1, To: 2, Term: 2, MsgType: pb.MessageType_MsgRequestVote}, + {From: 1, To: 3, Term: 2, MsgType: pb.MessageType_MsgRequestVote}, + } + if !reflect.DeepEqual(msgs, wmsgs) { + t.Errorf("msgs = %v, want %v", msgs, wmsgs) + } +} + +// TestLeaderElectionInOneRoundRPC tests all cases that may happen in +// leader election during one round of RequestVote RPC: +// a) it wins the election +// b) it loses the election +// c) it is unclear about the result +// Reference: section 5.2 +func TestLeaderElectionInOneRoundRPC2AA(t *testing.T) { + tests := []struct { + size int + votes map[uint64]bool + state StateType + }{ + // win the election when receiving votes from a majority of the servers + {1, map[uint64]bool{}, StateLeader}, + {3, map[uint64]bool{2: true, 3: true}, StateLeader}, + {3, map[uint64]bool{2: true}, StateLeader}, + {5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, StateLeader}, + {5, map[uint64]bool{2: true, 3: true, 4: true}, StateLeader}, + {5, map[uint64]bool{2: true, 3: true}, StateLeader}, + + // return to follower state if it receives vote denial from a majority + {3, map[uint64]bool{2: false, 3: false}, StateFollower}, + {5, map[uint64]bool{2: false, 3: false, 4: false, 5: false}, StateFollower}, + {5, map[uint64]bool{2: true, 3: false, 4: false, 5: false}, StateFollower}, + + // stay in candidate if it does not obtain the majority + {3, map[uint64]bool{}, StateCandidate}, + {5, map[uint64]bool{2: true}, StateCandidate}, + {5, map[uint64]bool{2: false, 3: false}, StateCandidate}, + {5, map[uint64]bool{}, StateCandidate}, + } + for i, tt := range tests { + r := newTestRaft(1, idsBySize(tt.size), 10, 1, NewMemoryStorage()) + + r.Step(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + for id, vote := range tt.votes { + r.Step(pb.Message{From: id, To: 1, Term: r.Term, MsgType: pb.MessageType_MsgRequestVoteResponse, Reject: !vote}) + } + + if r.State != tt.state { + t.Errorf("#%d: state = %s, want %s", i, r.State, tt.state) + } + if g := r.Term; g != 1 { + t.Errorf("#%d: term = %d, want %d", i, g, 1) + } + } +} + +// TestFollowerVote tests that each follower will vote for at most one +// candidate in a given term, on a first-come-first-served basis. +// Reference: section 5.2 +func TestFollowerVote2AA(t *testing.T) { + tests := []struct { + vote uint64 + nvote uint64 + wreject bool + }{ + {None, 1, false}, + {None, 2, false}, + {1, 1, false}, + {2, 2, false}, + {1, 2, true}, + {2, 1, true}, + } + for i, tt := range tests { + r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + r.Term = 1 + r.Vote = tt.vote + + r.Step(pb.Message{From: tt.nvote, To: 1, Term: 1, MsgType: pb.MessageType_MsgRequestVote}) + + msgs := r.readMessages() + wmsgs := []pb.Message{ + {From: 1, To: tt.nvote, Term: 1, MsgType: pb.MessageType_MsgRequestVoteResponse, Reject: tt.wreject}, + } + if !reflect.DeepEqual(msgs, wmsgs) { + t.Errorf("#%d: msgs = %v, want %v", i, msgs, wmsgs) + } + } +} + +// TestCandidateFallback tests that while waiting for votes, +// if a candidate receives an AppendEntries RPC from another server claiming +// to be leader whose term is at least as large as the candidate's current term, +// it recognizes the leader as legitimate and returns to follower state. +// Reference: section 5.2 +func TestCandidateFallback2AA(t *testing.T) { + tests := []pb.Message{ + {From: 2, To: 1, Term: 1, MsgType: pb.MessageType_MsgAppend}, + {From: 2, To: 1, Term: 2, MsgType: pb.MessageType_MsgAppend}, + } + for i, tt := range tests { + r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + r.Step(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + if r.State != StateCandidate { + t.Fatalf("unexpected state = %s, want %s", r.State, StateCandidate) + } + + r.Step(tt) + + if g := r.State; g != StateFollower { + t.Errorf("#%d: state = %s, want %s", i, g, StateFollower) + } + if g := r.Term; g != tt.Term { + t.Errorf("#%d: term = %d, want %d", i, g, tt.Term) + } + } +} + +func TestFollowerElectionTimeoutRandomized2AA(t *testing.T) { + testNonleaderElectionTimeoutRandomized(t, StateFollower) +} +func TestCandidateElectionTimeoutRandomized2AA(t *testing.T) { + testNonleaderElectionTimeoutRandomized(t, StateCandidate) +} + +// testNonleaderElectionTimeoutRandomized tests that election timeout for +// follower or candidate is randomized. +// Reference: section 5.2 +func testNonleaderElectionTimeoutRandomized(t *testing.T, state StateType) { + et := 10 + r := newTestRaft(1, []uint64{1, 2, 3}, et, 1, NewMemoryStorage()) + timeouts := make(map[int]bool) + for round := 0; round < 50*et; round++ { + switch state { + case StateFollower: + r.becomeFollower(r.Term+1, 2) + case StateCandidate: + r.becomeCandidate() + } + + time := 0 + for len(r.readMessages()) == 0 { + r.tick() + time++ + } + timeouts[time] = true + } + + for d := et + 1; d < 2*et; d++ { + if !timeouts[d] { + t.Errorf("timeout in %d ticks should happen", d) + } + } +} + +func TestFollowersElectionTimeoutNonconflict2AA(t *testing.T) { + testNonleadersElectionTimeoutNonconflict(t, StateFollower) +} +func TestCandidatesElectionTimeoutNonconflict2AA(t *testing.T) { + testNonleadersElectionTimeoutNonconflict(t, StateCandidate) +} + +// testNonleadersElectionTimeoutNonconflict tests that in most cases only a +// single server(follower or candidate) will time out, which reduces the +// likelihood of split vote in the new election. +// Reference: section 5.2 +func testNonleadersElectionTimeoutNonconflict(t *testing.T, state StateType) { + et := 10 + size := 5 + rs := make([]*Raft, size) + ids := idsBySize(size) + for k := range rs { + rs[k] = newTestRaft(ids[k], ids, et, 1, NewMemoryStorage()) + } + conflicts := 0 + for round := 0; round < 1000; round++ { + for _, r := range rs { + switch state { + case StateFollower: + r.becomeFollower(r.Term+1, None) + case StateCandidate: + r.becomeCandidate() + } + } + + timeoutNum := 0 + for timeoutNum == 0 { + for _, r := range rs { + r.tick() + if len(r.readMessages()) > 0 { + timeoutNum++ + } + } + } + // several rafts time out at the same tick + if timeoutNum > 1 { + conflicts++ + } + } + + if g := float64(conflicts) / 1000; g > 0.3 { + t.Errorf("probability of conflicts = %v, want <= 0.3", g) + } +} + +// TestLeaderStartReplication tests that when receiving client proposals, +// the leader appends the proposal to its log as a new entry, then issues +// AppendEntries RPCs in parallel to each of the other servers to replicate +// the entry. Also, when sending an AppendEntries RPC, the leader includes +// the index and term of the entry in its log that immediately precedes +// the new entries. +// Also, it writes the new entry into stable storage. +// Reference: section 5.3 +func TestLeaderStartReplication2AB(t *testing.T) { + s := NewMemoryStorage() + r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s) + r.becomeCandidate() + r.becomeLeader() + commitNoopEntry(r, s) + li := r.RaftLog.LastIndex() + + ents := []*pb.Entry{{Data: []byte("some data")}} + r.Step(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: ents}) + + if g := r.RaftLog.LastIndex(); g != li+1 { + t.Errorf("lastIndex = %d, want %d", g, li+1) + } + if g := r.RaftLog.committed; g != li { + t.Errorf("committed = %d, want %d", g, li) + } + msgs := r.readMessages() + sort.Sort(messageSlice(msgs)) + ent := pb.Entry{Index: li + 1, Term: 1, Data: []byte("some data")} + wents := []pb.Entry{ent} + wmsgs := []pb.Message{ + {From: 1, To: 2, Term: 1, MsgType: pb.MessageType_MsgAppend, Index: li, LogTerm: 1, Entries: []*pb.Entry{&ent}, Commit: li}, + {From: 1, To: 3, Term: 1, MsgType: pb.MessageType_MsgAppend, Index: li, LogTerm: 1, Entries: []*pb.Entry{&ent}, Commit: li}, + } + if !reflect.DeepEqual(msgs, wmsgs) { + t.Errorf("msgs = %+v, want %+v", msgs, wmsgs) + } + if g := r.RaftLog.unstableEntries(); !reflect.DeepEqual(g, wents) { + t.Errorf("ents = %+v, want %+v", g, wents) + } +} + +// TestLeaderCommitEntry tests that when the entry has been safely replicated, +// the leader gives out the applied entries, which can be applied to its state +// machine. +// Also, the leader keeps track of the highest index it knows to be committed, +// and it includes that index in future AppendEntries RPCs so that the other +// servers eventually find out. +// Reference: section 5.3 +func TestLeaderCommitEntry2AB(t *testing.T) { + s := NewMemoryStorage() + r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, s) + r.becomeCandidate() + r.becomeLeader() + commitNoopEntry(r, s) + li := r.RaftLog.LastIndex() + r.Step(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Data: []byte("some data")}}}) + + for _, m := range r.readMessages() { + r.Step(acceptAndReply(m)) + } + + if g := r.RaftLog.committed; g != li+1 { + t.Errorf("committed = %d, want %d", g, li+1) + } + wents := []pb.Entry{{Index: li + 1, Term: 1, Data: []byte("some data")}} + if g := r.RaftLog.nextEnts(); !reflect.DeepEqual(g, wents) { + t.Errorf("nextEnts = %+v, want %+v", g, wents) + } + msgs := r.readMessages() + sort.Sort(messageSlice(msgs)) + for i, m := range msgs { + if w := uint64(i + 2); m.To != w { + t.Errorf("to = %d, want %d", m.To, w) + } + if m.MsgType != pb.MessageType_MsgAppend { + t.Errorf("type = %v, want %v", m.MsgType, pb.MessageType_MsgAppend) + } + if m.Commit != li+1 { + t.Errorf("commit = %d, want %d", m.Commit, li+1) + } + } +} + +// TestLeaderAcknowledgeCommit tests that a log entry is committed once the +// leader that created the entry has replicated it on a majority of the servers. +// Reference: section 5.3 +func TestLeaderAcknowledgeCommit2AB(t *testing.T) { + tests := []struct { + size int + acceptors map[uint64]bool + wack bool + }{ + {1, nil, true}, + {3, nil, false}, + {3, map[uint64]bool{2: true}, true}, + {3, map[uint64]bool{2: true, 3: true}, true}, + {5, nil, false}, + {5, map[uint64]bool{2: true}, false}, + {5, map[uint64]bool{2: true, 3: true}, true}, + {5, map[uint64]bool{2: true, 3: true, 4: true}, true}, + {5, map[uint64]bool{2: true, 3: true, 4: true, 5: true}, true}, + } + for i, tt := range tests { + s := NewMemoryStorage() + r := newTestRaft(1, idsBySize(tt.size), 10, 1, s) + r.becomeCandidate() + r.becomeLeader() + commitNoopEntry(r, s) + li := r.RaftLog.LastIndex() + r.Step(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Data: []byte("some data")}}}) + + for _, m := range r.readMessages() { + if tt.acceptors[m.To] { + r.Step(acceptAndReply(m)) + } + } + + if g := r.RaftLog.committed > li; g != tt.wack { + t.Errorf("#%d: ack commit = %v, want %v", i, g, tt.wack) + } + } +} + +// TestLeaderCommitPrecedingEntries tests that when leader commits a log entry, +// it also commits all preceding entries in the leader’s log, including +// entries created by previous leaders. +// Also, it applies the entry to its local state machine (in log order). +// Reference: section 5.3 +func TestLeaderCommitPrecedingEntries2AB(t *testing.T) { + tests := [][]pb.Entry{ + {}, + {{Term: 2, Index: 1}}, + {{Term: 1, Index: 1}, {Term: 2, Index: 2}}, + {{Term: 1, Index: 1}}, + } + for i, tt := range tests { + storage := NewMemoryStorage() + storage.Append(tt) + r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage) + r.Term = 2 + r.becomeCandidate() + r.becomeLeader() + r.Step(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Data: []byte("some data")}}}) + + for _, m := range r.readMessages() { + r.Step(acceptAndReply(m)) + } + + li := uint64(len(tt)) + wents := append(tt, pb.Entry{Term: 3, Index: li + 1}, pb.Entry{Term: 3, Index: li + 2, Data: []byte("some data")}) + if g := r.RaftLog.nextEnts(); !reflect.DeepEqual(g, wents) { + t.Errorf("#%d: ents = %+v, want %+v", i, g, wents) + } + } +} + +// TestFollowerCommitEntry tests that once a follower learns that a log entry +// is committed, it applies the entry to its local state machine (in log order). +// Reference: section 5.3 +func TestFollowerCommitEntry2AB(t *testing.T) { + tests := []struct { + ents []*pb.Entry + commit uint64 + }{ + { + []*pb.Entry{ + {Term: 1, Index: 1, Data: []byte("some data")}, + }, + 1, + }, + { + []*pb.Entry{ + {Term: 1, Index: 1, Data: []byte("some data")}, + {Term: 1, Index: 2, Data: []byte("some data2")}, + }, + 2, + }, + { + []*pb.Entry{ + {Term: 1, Index: 1, Data: []byte("some data2")}, + {Term: 1, Index: 2, Data: []byte("some data")}, + }, + 2, + }, + { + []*pb.Entry{ + {Term: 1, Index: 1, Data: []byte("some data")}, + {Term: 1, Index: 2, Data: []byte("some data2")}, + }, + 1, + }, + } + for i, tt := range tests { + r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + r.becomeFollower(1, 2) + + r.Step(pb.Message{From: 2, To: 1, MsgType: pb.MessageType_MsgAppend, Term: 1, Entries: tt.ents, Commit: tt.commit}) + + if g := r.RaftLog.committed; g != tt.commit { + t.Errorf("#%d: committed = %d, want %d", i, g, tt.commit) + } + wents := make([]pb.Entry, 0, tt.commit) + for _, ent := range tt.ents[:int(tt.commit)] { + wents = append(wents, *ent) + } + if g := r.RaftLog.nextEnts(); !reflect.DeepEqual(g, wents) { + t.Errorf("#%d: nextEnts = %v, want %v", i, g, wents) + } + } +} + +// TestFollowerCheckMessageType_MsgAppend tests that if the follower does not find an +// entry in its log with the same index and term as the one in AppendEntries RPC, +// then it refuses the new entries. Otherwise it replies that it accepts the +// append entries. +// Reference: section 5.3 +func TestFollowerCheckMessageType_MsgAppend2AB(t *testing.T) { + ents := []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}} + tests := []struct { + term uint64 + index uint64 + wreject bool + }{ + // match with committed entries + {0, 0, false}, + {ents[0].Term, ents[0].Index, false}, + // match with uncommitted entries + {ents[1].Term, ents[1].Index, false}, + + // unmatch with existing entry + {ents[0].Term, ents[1].Index, true}, + // unexisting entry + {ents[1].Term + 1, ents[1].Index + 1, true}, + } + for i, tt := range tests { + storage := NewMemoryStorage() + storage.Append(ents) + r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage) + r.RaftLog.committed = 1 + r.becomeFollower(2, 2) + msgs := r.readMessages() // clear message + + r.Step(pb.Message{From: 2, To: 1, MsgType: pb.MessageType_MsgAppend, Term: 2, LogTerm: tt.term, Index: tt.index}) + + msgs = r.readMessages() + if len(msgs) != 1 { + t.Errorf("#%d: len(msgs) = %+v, want %+v", i, len(msgs), 1) + } + if msgs[0].Term != 2 { + t.Errorf("#%d: term = %+v, want %+v", i, msgs[0].Term, 2) + } + if msgs[0].Reject != tt.wreject { + t.Errorf("#%d: term = %+v, want %+v", i, msgs[0].Reject, tt.wreject) + } + } +} + +// TestFollowerAppendEntries tests that when AppendEntries RPC is valid, +// the follower will delete the existing conflict entry and all that follow it, +// and append any new entries not already in the log. +// Also, it writes the new entry into stable storage. +// Reference: section 5.3 +func TestFollowerAppendEntries2AB(t *testing.T) { + tests := []struct { + index, term uint64 + ents []*pb.Entry + wents []*pb.Entry + wunstable []*pb.Entry + }{ + { + 2, 2, + []*pb.Entry{{Term: 3, Index: 3}}, + []*pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}, {Term: 3, Index: 3}}, + []*pb.Entry{{Term: 3, Index: 3}}, + }, + { + 1, 1, + []*pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}}, + []*pb.Entry{{Term: 1, Index: 1}, {Term: 3, Index: 2}, {Term: 4, Index: 3}}, + []*pb.Entry{{Term: 3, Index: 2}, {Term: 4, Index: 3}}, + }, + { + 0, 0, + []*pb.Entry{{Term: 1, Index: 1}}, + []*pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}}, + []*pb.Entry{}, + }, + { + 0, 0, + []*pb.Entry{{Term: 3, Index: 1}}, + []*pb.Entry{{Term: 3, Index: 1}}, + []*pb.Entry{{Term: 3, Index: 1}}, + }, + } + for i, tt := range tests { + storage := NewMemoryStorage() + storage.Append([]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}}) + r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, storage) + r.becomeFollower(2, 2) + + r.Step(pb.Message{From: 2, To: 1, MsgType: pb.MessageType_MsgAppend, Term: 2, LogTerm: tt.term, Index: tt.index, Entries: tt.ents}) + + wents := make([]pb.Entry, 0, len(tt.wents)) + for _, ent := range tt.wents { + wents = append(wents, *ent) + } + if g := r.RaftLog.entries; !reflect.DeepEqual(g, wents) { + t.Errorf("#%d: ents = %+v, want %+v", i, g, wents) + } + var wunstable []pb.Entry + if tt.wunstable != nil { + wunstable = make([]pb.Entry, 0, len(tt.wunstable)) + } + for _, ent := range tt.wunstable { + wunstable = append(wunstable, *ent) + } + if g := r.RaftLog.unstableEntries(); !reflect.DeepEqual(g, wunstable) { + t.Errorf("#%d: unstableEnts = %+v, want %+v", i, g, wunstable) + } + } +} + +// TestLeaderSyncFollowerLog tests that the leader could bring a follower's log +// into consistency with its own. +// Reference: section 5.3, figure 7 +func TestLeaderSyncFollowerLog2AB(t *testing.T) { + ents := []pb.Entry{ + {}, + {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}, + {Term: 4, Index: 4}, {Term: 4, Index: 5}, + {Term: 5, Index: 6}, {Term: 5, Index: 7}, + {Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, + } + term := uint64(8) + tests := [][]pb.Entry{ + { + {}, + {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}, + {Term: 4, Index: 4}, {Term: 4, Index: 5}, + {Term: 5, Index: 6}, {Term: 5, Index: 7}, + {Term: 6, Index: 8}, {Term: 6, Index: 9}, + }, + { + {}, + {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}, + {Term: 4, Index: 4}, + }, + { + {}, + {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}, + {Term: 4, Index: 4}, {Term: 4, Index: 5}, + {Term: 5, Index: 6}, {Term: 5, Index: 7}, + {Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, {Term: 6, Index: 11}, + }, + { + {}, + {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}, + {Term: 4, Index: 4}, {Term: 4, Index: 5}, + {Term: 5, Index: 6}, {Term: 5, Index: 7}, + {Term: 6, Index: 8}, {Term: 6, Index: 9}, {Term: 6, Index: 10}, + {Term: 7, Index: 11}, {Term: 7, Index: 12}, + }, + { + {}, + {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}, + {Term: 4, Index: 4}, {Term: 4, Index: 5}, {Term: 4, Index: 6}, {Term: 4, Index: 7}, + }, + { + {}, + {Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}, + {Term: 2, Index: 4}, {Term: 2, Index: 5}, {Term: 2, Index: 6}, + {Term: 3, Index: 7}, {Term: 3, Index: 8}, {Term: 3, Index: 9}, {Term: 3, Index: 10}, {Term: 3, Index: 11}, + }, + } + for i, tt := range tests { + leadStorage := NewMemoryStorage() + leadStorage.Append(ents) + lead := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, leadStorage) + lead.Term = term + lead.RaftLog.committed = lead.RaftLog.LastIndex() + followerStorage := NewMemoryStorage() + followerStorage.Append(tt) + follower := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, followerStorage) + follower.Term = term - 1 + // It is necessary to have a three-node cluster. + // The second may have more up-to-date log than the first one, so the + // first node needs the vote from the third node to become the leader. + n := newNetwork(lead, follower, nopStepper) + n.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + // The election occurs in the term after the one we loaded with + // lead's term and commited index setted up above. + n.send(pb.Message{From: 3, To: 1, MsgType: pb.MessageType_MsgRequestVoteResponse, Term: term + 1}) + + n.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{}}}) + + if g := diffu(ltoa(lead.RaftLog), ltoa(follower.RaftLog)); g != "" { + t.Errorf("#%d: log diff:\n%s", i, g) + } + } +} + +// TestVoteRequest tests that the vote request includes information about the candidate’s log +// and are sent to all of the other nodes. +// Reference: section 5.4.1 +func TestVoteRequest2AB(t *testing.T) { + tests := []struct { + ents []*pb.Entry + wterm uint64 + }{ + {[]*pb.Entry{{Term: 1, Index: 1}}, 2}, + {[]*pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}}, 3}, + } + for j, tt := range tests { + r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + r.Step(pb.Message{ + From: 2, To: 1, MsgType: pb.MessageType_MsgAppend, Term: tt.wterm - 1, LogTerm: 0, Index: 0, Entries: tt.ents, + }) + r.readMessages() + + for r.State != StateCandidate { + r.tick() + } + + msgs := r.readMessages() + sort.Sort(messageSlice(msgs)) + if len(msgs) != 2 { + t.Fatalf("#%d: len(msg) = %d, want %d", j, len(msgs), 2) + } + for i, m := range msgs { + if m.MsgType != pb.MessageType_MsgRequestVote { + t.Errorf("#%d: msgType = %d, want %d", i, m.MsgType, pb.MessageType_MsgRequestVote) + } + if m.To != uint64(i+2) { + t.Errorf("#%d: to = %d, want %d", i, m.To, i+2) + } + if m.Term != tt.wterm { + t.Errorf("#%d: term = %d, want %d", i, m.Term, tt.wterm) + } + windex, wlogterm := tt.ents[len(tt.ents)-1].Index, tt.ents[len(tt.ents)-1].Term + if m.Index != windex { + t.Errorf("#%d: index = %d, want %d", i, m.Index, windex) + } + if m.LogTerm != wlogterm { + t.Errorf("#%d: logterm = %d, want %d", i, m.LogTerm, wlogterm) + } + } + } +} + +// TestVoter tests the voter denies its vote if its own log is more up-to-date +// than that of the candidate. +// Reference: section 5.4.1 +func TestVoter2AA(t *testing.T) { + tests := []struct { + ents []pb.Entry + logterm uint64 + index uint64 + + wreject bool + }{ + // same logterm + {[]pb.Entry{{Term: 1, Index: 1}}, 1, 1, false}, + {[]pb.Entry{{Term: 1, Index: 1}}, 1, 2, false}, + {[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true}, + // candidate higher logterm + {[]pb.Entry{{Term: 1, Index: 1}}, 2, 1, false}, + {[]pb.Entry{{Term: 1, Index: 1}}, 2, 2, false}, + {[]pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}}, 2, 1, false}, + // voter higher logterm + {[]pb.Entry{{Term: 2, Index: 1}}, 1, 1, true}, + {[]pb.Entry{{Term: 2, Index: 1}}, 1, 2, true}, + {[]pb.Entry{{Term: 2, Index: 1}, {Term: 1, Index: 2}}, 1, 1, true}, + } + for i, tt := range tests { + storage := NewMemoryStorage() + storage.Append(tt.ents) + r := newTestRaft(1, []uint64{1, 2}, 10, 1, storage) + + r.Step(pb.Message{From: 2, To: 1, MsgType: pb.MessageType_MsgRequestVote, Term: 3, LogTerm: tt.logterm, Index: tt.index}) + + msgs := r.readMessages() + if len(msgs) != 1 { + t.Fatalf("#%d: len(msg) = %d, want %d", i, len(msgs), 1) + } + m := msgs[0] + if m.MsgType != pb.MessageType_MsgRequestVoteResponse { + t.Errorf("#%d: msgType = %d, want %d", i, m.MsgType, pb.MessageType_MsgRequestVoteResponse) + } + if m.Reject != tt.wreject { + t.Errorf("#%d: reject = %t, want %t", i, m.Reject, tt.wreject) + } + } +} + +// TestLeaderOnlyCommitsLogFromCurrentTerm tests that only log entries from the leader’s +// current term are committed by counting replicas. +// Reference: section 5.4.2 +func TestLeaderOnlyCommitsLogFromCurrentTerm2AB(t *testing.T) { + ents := []pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}} + tests := []struct { + index uint64 + wcommit uint64 + }{ + // do not commit log entries in previous terms + {1, 0}, + {2, 0}, + // commit log in current term + {3, 3}, + } + for i, tt := range tests { + storage := NewMemoryStorage() + storage.Append(ents) + r := newTestRaft(1, []uint64{1, 2}, 10, 1, storage) + r.Term = 2 + // become leader at term 3 + r.becomeCandidate() + r.becomeLeader() + r.readMessages() + // propose a entry to current term + r.Step(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{}}}) + + r.Step(pb.Message{From: 2, To: 1, MsgType: pb.MessageType_MsgAppendResponse, Term: r.Term, Index: tt.index}) + if r.RaftLog.committed != tt.wcommit { + t.Errorf("#%d: commit = %d, want %d", i, r.RaftLog.committed, tt.wcommit) + } + } +} + +type messageSlice []pb.Message + +func (s messageSlice) Len() int { return len(s) } +func (s messageSlice) Less(i, j int) bool { return fmt.Sprint(s[i]) < fmt.Sprint(s[j]) } +func (s messageSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func commitNoopEntry(r *Raft, s *MemoryStorage) { + if r.State != StateLeader { + panic("it should only be used when it is the leader") + } + for id := range r.Prs { + if id == r.id { + continue + } + + r.sendAppend(id) + } + // simulate the response of MessageType_MsgAppend + msgs := r.readMessages() + for _, m := range msgs { + if m.MsgType != pb.MessageType_MsgAppend || len(m.Entries) != 1 || m.Entries[0].Data != nil { + panic("not a message to append noop entry") + } + r.Step(acceptAndReply(m)) + } + // ignore further messages to refresh followers' commit index + r.readMessages() + s.Append(r.RaftLog.unstableEntries()) + r.RaftLog.applied = r.RaftLog.committed + r.RaftLog.stabled = r.RaftLog.LastIndex() +} + +func acceptAndReply(m pb.Message) pb.Message { + if m.MsgType != pb.MessageType_MsgAppend { + panic("type should be MessageType_MsgAppend") + } + return pb.Message{ + From: m.To, + To: m.From, + Term: m.Term, + MsgType: pb.MessageType_MsgAppendResponse, + Index: m.Index + uint64(len(m.Entries)), + } +} diff --git a/raft/raft_test.go b/raft/raft_test.go new file mode 100644 index 00000000..fbe50431 --- /dev/null +++ b/raft/raft_test.go @@ -0,0 +1,1718 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "bytes" + "fmt" + "math/rand" + "reflect" + "testing" + + pb "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" +) + +// nextEnts returns the appliable entries and updates the applied index +func nextEnts(r *Raft, s *MemoryStorage) (ents []pb.Entry) { + // Transfer all unstable entries to "stable" storage. + s.Append(r.RaftLog.unstableEntries()) + r.RaftLog.stabled = r.RaftLog.LastIndex() + + ents = r.RaftLog.nextEnts() + r.RaftLog.applied = r.RaftLog.committed + return ents +} + +type stateMachine interface { + Step(m pb.Message) error + readMessages() []pb.Message +} + +func (r *Raft) readMessages() []pb.Message { + msgs := r.msgs + r.msgs = make([]pb.Message, 0) + + return msgs +} + +func TestProgressLeader2AB(t *testing.T) { + r := newTestRaft(1, []uint64{1, 2}, 5, 1, NewMemoryStorage()) + r.becomeCandidate() + r.becomeLeader() + + // Send proposals to r1. The first 5 entries should be appended to the log. + propMsg := pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Data: []byte("foo")}}} + for i := 0; i < 5; i++ { + if pr := r.Prs[r.id]; pr.Match != uint64(i+1) || pr.Next != pr.Match+1 { + t.Errorf("unexpected progress %v", pr) + } + if err := r.Step(propMsg); err != nil { + t.Fatalf("proposal resulted in error: %v", err) + } + } +} + +func TestLeaderElection2AA(t *testing.T) { + var cfg func(*Config) + candState := StateCandidate + candTerm := uint64(1) + tests := []struct { + *network + state StateType + expTerm uint64 + }{ + {newNetworkWithConfig(cfg, nil, nil, nil), StateLeader, 1}, + {newNetworkWithConfig(cfg, nil, nil, nopStepper), StateLeader, 1}, + {newNetworkWithConfig(cfg, nil, nopStepper, nopStepper), candState, candTerm}, + {newNetworkWithConfig(cfg, nil, nopStepper, nopStepper, nil), candState, candTerm}, + {newNetworkWithConfig(cfg, nil, nopStepper, nopStepper, nil, nil), StateLeader, 1}, + + // three logs further along than 0, but in the same term so rejections + // are returned instead of the votes being ignored. + {newNetworkWithConfig(cfg, + nil, entsWithConfig(cfg, 1), entsWithConfig(cfg, 1), entsWithConfig(cfg, 1, 1), nil), + StateFollower, 1}, + } + + for i, tt := range tests { + tt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + sm := tt.network.peers[1].(*Raft) + if sm.State != tt.state { + t.Errorf("#%d: state = %s, want %s", i, sm.State, tt.state) + } + if g := sm.Term; g != tt.expTerm { + t.Errorf("#%d: term = %d, want %d", i, g, tt.expTerm) + } + } +} + +// testLeaderCycle verifies that each node in a cluster can campaign +// and be elected in turn. This ensures that elections work when not +// starting from a clean slate (as they do in TestLeaderElection) +func TestLeaderCycle2AA(t *testing.T) { + var cfg func(*Config) + n := newNetworkWithConfig(cfg, nil, nil, nil) + for campaignerID := uint64(1); campaignerID <= 3; campaignerID++ { + n.send(pb.Message{From: campaignerID, To: campaignerID, MsgType: pb.MessageType_MsgHup}) + + for _, peer := range n.peers { + sm := peer.(*Raft) + if sm.id == campaignerID && sm.State != StateLeader { + t.Errorf("campaigning node %d state = %v, want StateLeader", + sm.id, sm.State) + } else if sm.id != campaignerID && sm.State != StateFollower { + t.Errorf("after campaign of node %d, "+ + "node %d had state = %v, want StateFollower", + campaignerID, sm.id, sm.State) + } + } + } +} + +// TestLeaderElectionOverwriteNewerLogs tests a scenario in which a +// newly-elected leader does *not* have the newest (i.e. highest term) +// log entries, and must overwrite higher-term log entries with +// lower-term ones. +func TestLeaderElectionOverwriteNewerLogs2AB(t *testing.T) { + var cfg func(*Config) + // This network represents the results of the following sequence of + // events: + // - Node 1 won the election in term 1. + // - Node 1 replicated a log entry to node 2 but died before sending + // it to other nodes. + // - Node 3 won the second election in term 2. + // - Node 3 wrote an entry to its logs but died without sending it + // to any other nodes. + // + // At this point, nodes 1, 2, and 3 all have uncommitted entries in + // their logs and could win an election at term 3. The winner's log + // entry overwrites the losers'. (TestLeaderSyncFollowerLog tests + // the case where older log entries are overwritten, so this test + // focuses on the case where the newer entries are lost). + n := newNetworkWithConfig(cfg, + entsWithConfig(cfg, 1), // Node 1: Won first election + entsWithConfig(cfg, 1), // Node 2: Got logs from node 1 + entsWithConfig(cfg, 2), // Node 3: Won second election + votedWithConfig(cfg, 3, 2), // Node 4: Voted but didn't get logs + votedWithConfig(cfg, 3, 2)) // Node 5: Voted but didn't get logs + + // Node 1 campaigns. The election fails because a quorum of nodes + // know about the election that already happened at term 2. Node 1's + // term is pushed ahead to 2. + n.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + sm1 := n.peers[1].(*Raft) + if sm1.State != StateFollower { + t.Errorf("state = %s, want StateFollower", sm1.State) + } + if sm1.Term != 2 { + t.Errorf("term = %d, want 2", sm1.Term) + } + + // Node 1 campaigns again with a higher term. This time it succeeds. + n.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + if sm1.State != StateLeader { + t.Errorf("state = %s, want StateLeader", sm1.State) + } + if sm1.Term != 3 { + t.Errorf("term = %d, want 3", sm1.Term) + } + + // Now all nodes agree on a log entry with term 1 at index 1 (and + // term 3 at index 2). + for i := range n.peers { + sm := n.peers[i].(*Raft) + entries := sm.RaftLog.entries + if len(entries) != 2 { + t.Fatalf("node %d: len(entries) == %d, want 2", i, len(entries)) + } + if entries[0].Term != 1 { + t.Errorf("node %d: term at index 1 == %d, want 1", i, entries[0].Term) + } + if entries[1].Term != 3 { + t.Errorf("node %d: term at index 2 == %d, want 3", i, entries[1].Term) + } + } +} + +func TestVoteFromAnyState2AA(t *testing.T) { + vt := pb.MessageType_MsgRequestVote + vt_resp := pb.MessageType_MsgRequestVoteResponse + for st := StateType(0); st <= StateLeader; st++ { + r := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + r.Term = 1 + + switch st { + case StateFollower: + r.becomeFollower(r.Term, 3) + case StateCandidate: + r.becomeCandidate() + case StateLeader: + r.becomeCandidate() + r.becomeLeader() + } + r.readMessages() // clear message + + // Note that setting our state above may have advanced r.Term + // past its initial value. + newTerm := r.Term + 1 + + msg := pb.Message{ + From: 2, + To: 1, + MsgType: vt, + Term: newTerm, + LogTerm: newTerm, + Index: 42, + } + if err := r.Step(msg); err != nil { + t.Errorf("%s,%s: Step failed: %s", vt, st, err) + } + if len(r.msgs) != 1 { + t.Errorf("%s,%s: %d response messages, want 1: %+v", vt, st, len(r.msgs), r.msgs) + } else { + resp := r.msgs[0] + if resp.MsgType != vt_resp { + t.Errorf("%s,%s: response message is %s, want %s", + vt, st, resp.MsgType, vt_resp) + } + if resp.Reject { + t.Errorf("%s,%s: unexpected rejection", vt, st) + } + } + + // If this was a vote, we reset our state and term. + if r.State != StateFollower { + t.Errorf("%s,%s: state %s, want %s", vt, st, r.State, StateFollower) + } + if r.Term != newTerm { + t.Errorf("%s,%s: term %d, want %d", vt, st, r.Term, newTerm) + } + if r.Vote != 2 { + t.Errorf("%s,%s: vote %d, want 2", vt, st, r.Vote) + } + } +} + +func TestLogReplication2AB(t *testing.T) { + tests := []struct { + *network + msgs []pb.Message + wcommitted uint64 + }{ + { + newNetwork(nil, nil, nil), + []pb.Message{ + {From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Data: []byte("somedata")}}}, + }, + 2, + }, + { + newNetwork(nil, nil, nil), + []pb.Message{ + {From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Data: []byte("somedata")}}}, + {From: 1, To: 2, MsgType: pb.MessageType_MsgHup}, + {From: 1, To: 2, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Data: []byte("somedata")}}}, + }, + 4, + }, + } + + for i, tt := range tests { + tt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + for _, m := range tt.msgs { + tt.send(m) + } + + for j, x := range tt.network.peers { + sm := x.(*Raft) + + if sm.RaftLog.committed != tt.wcommitted { + t.Errorf("#%d.%d: committed = %d, want %d", i, j, sm.RaftLog.committed, tt.wcommitted) + } + + ents := []pb.Entry{} + for _, e := range nextEnts(sm, tt.network.storage[j]) { + if e.Data != nil { + ents = append(ents, e) + } + } + props := []pb.Message{} + for _, m := range tt.msgs { + if m.MsgType == pb.MessageType_MsgPropose { + props = append(props, m) + } + } + for k, m := range props { + if !bytes.Equal(ents[k].Data, m.Entries[0].Data) { + t.Errorf("#%d.%d: data = %d, want %d", i, j, ents[k].Data, m.Entries[0].Data) + } + } + } + } +} + +func TestSingleNodeCommit2AB(t *testing.T) { + tt := newNetwork(nil) + tt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + tt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Data: []byte("some data")}}}) + tt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Data: []byte("some data")}}}) + + sm := tt.peers[1].(*Raft) + if sm.RaftLog.committed != 3 { + t.Errorf("committed = %d, want %d", sm.RaftLog.committed, 3) + } +} + +// TestCommitWithoutNewTermEntry tests the entries could be committed +// when leader changes with noop entry and no new proposal comes in. +func TestCommitWithoutNewTermEntry2AB(t *testing.T) { + tt := newNetwork(nil, nil, nil, nil, nil) + tt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + // 0 cannot reach 2,3,4 + tt.cut(1, 3) + tt.cut(1, 4) + tt.cut(1, 5) + + tt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Data: []byte("some data")}}}) + tt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Data: []byte("some data")}}}) + + sm := tt.peers[1].(*Raft) + if sm.RaftLog.committed != 1 { + t.Errorf("committed = %d, want %d", sm.RaftLog.committed, 1) + } + + // network recovery + tt.recover() + + // elect 2 as the new leader with term 2 + // after append a ChangeTerm entry from the current term, all entries + // should be committed + tt.send(pb.Message{From: 2, To: 2, MsgType: pb.MessageType_MsgHup}) + + if sm.RaftLog.committed != 4 { + t.Errorf("committed = %d, want %d", sm.RaftLog.committed, 4) + } +} + +func TestDuelingCandidates2AB(t *testing.T) { + a := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + b := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + c := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + + nt := newNetwork(a, b, c) + nt.cut(1, 3) + + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + nt.send(pb.Message{From: 3, To: 3, MsgType: pb.MessageType_MsgHup}) + + // 1 becomes leader since it receives votes from 1 and 2 + sm := nt.peers[1].(*Raft) + if sm.State != StateLeader { + t.Errorf("state = %s, want %s", sm.State, StateLeader) + } + + // 3 stays as candidate since it receives a vote from 3 and a rejection from 2 + sm = nt.peers[3].(*Raft) + if sm.State != StateCandidate { + t.Errorf("state = %s, want %s", sm.State, StateCandidate) + } + + nt.recover() + + // candidate 3 now increases its term and tries to vote again + // we expect it to disrupt the leader 1 since it has a higher term + // 3 will be follower again since both 1 and 2 rejects its vote request since 3 does not have a long enough log + nt.send(pb.Message{From: 3, To: 3, MsgType: pb.MessageType_MsgHup}) + + wlog := newLog(&MemoryStorage{ents: []pb.Entry{{}, {Data: nil, Term: 1, Index: 1}}}) + wlog.committed = 1 + tests := []struct { + sm *Raft + state StateType + term uint64 + raftLog *RaftLog + }{ + {a, StateFollower, 2, wlog}, + {b, StateFollower, 2, wlog}, + {c, StateFollower, 2, newLog(NewMemoryStorage())}, + } + + for i, tt := range tests { + if g := tt.sm.State; g != tt.state { + t.Errorf("#%d: state = %s, want %s", i, g, tt.state) + } + if g := tt.sm.Term; g != tt.term { + t.Errorf("#%d: term = %d, want %d", i, g, tt.term) + } + base := ltoa(tt.raftLog) + if sm, ok := nt.peers[1+uint64(i)].(*Raft); ok { + l := ltoa(sm.RaftLog) + if g := diffu(base, l); g != "" { + t.Errorf("#%d: diff:\n%s", i, g) + } + } else { + t.Logf("#%d: empty log", i) + } + } +} + +func TestCandidateConcede2AB(t *testing.T) { + tt := newNetwork(nil, nil, nil) + tt.isolate(1) + + tt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + tt.send(pb.Message{From: 3, To: 3, MsgType: pb.MessageType_MsgHup}) + + // heal the partition + tt.recover() + // send heartbeat; reset wait + tt.send(pb.Message{From: 3, To: 3, MsgType: pb.MessageType_MsgBeat}) + + data := []byte("force follower") + // send a proposal to 3 to flush out a MessageType_MsgAppend to 1 + tt.send(pb.Message{From: 3, To: 3, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Data: data}}}) + // send heartbeat; flush out commit + tt.send(pb.Message{From: 3, To: 3, MsgType: pb.MessageType_MsgBeat}) + + a := tt.peers[1].(*Raft) + if g := a.State; g != StateFollower { + t.Errorf("state = %s, want %s", g, StateFollower) + } + if g := a.Term; g != 1 { + t.Errorf("term = %d, want %d", g, 1) + } + wlog := newLog(&MemoryStorage{ents: []pb.Entry{{}, {Data: nil, Term: 1, Index: 1}, {Term: 1, Index: 2, Data: data}}}) + wlog.committed = 2 + wantLog := ltoa(wlog) + for i, p := range tt.peers { + if sm, ok := p.(*Raft); ok { + l := ltoa(sm.RaftLog) + if g := diffu(wantLog, l); g != "" { + t.Errorf("#%d: diff:\n%s", i, g) + } + } else { + t.Logf("#%d: empty log", i) + } + } +} + +func TestSingleNodeCandidate2AA(t *testing.T) { + tt := newNetwork(nil) + tt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + sm := tt.peers[1].(*Raft) + if sm.State != StateLeader { + t.Errorf("state = %d, want %d", sm.State, StateLeader) + } +} + +func TestOldMessages2AB(t *testing.T) { + tt := newNetwork(nil, nil, nil) + // make 0 leader @ term 3 + tt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + tt.send(pb.Message{From: 2, To: 2, MsgType: pb.MessageType_MsgHup}) + tt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + // pretend we're an old leader trying to make progress; this entry is expected to be ignored. + tt.send(pb.Message{From: 2, To: 1, MsgType: pb.MessageType_MsgAppend, Term: 2, Entries: []*pb.Entry{{Index: 3, Term: 2}}}) + // commit a new entry + tt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Data: []byte("somedata")}}}) + + ilog := newLog( + &MemoryStorage{ + ents: []pb.Entry{ + {}, {Data: nil, Term: 1, Index: 1}, + {Data: nil, Term: 2, Index: 2}, {Data: nil, Term: 3, Index: 3}, + {Data: []byte("somedata"), Term: 3, Index: 4}, + }, + }) + ilog.committed = 4 + base := ltoa(ilog) + for i, p := range tt.peers { + if sm, ok := p.(*Raft); ok { + l := ltoa(sm.RaftLog) + if g := diffu(base, l); g != "" { + t.Errorf("#%d: diff:\n%s", i, g) + } + } else { + t.Logf("#%d: empty log", i) + } + } +} + +func TestProposal2AB(t *testing.T) { + tests := []struct { + *network + success bool + }{ + {newNetwork(nil, nil, nil), true}, + {newNetwork(nil, nil, nopStepper), true}, + {newNetwork(nil, nopStepper, nopStepper), false}, + {newNetwork(nil, nopStepper, nopStepper, nil), false}, + {newNetwork(nil, nopStepper, nopStepper, nil, nil), true}, + } + + for j, tt := range tests { + data := []byte("somedata") + + // promote 1 to become leader + tt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + tt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Data: data}}}) + + wantLog := newLog(NewMemoryStorage()) + if tt.success { + wantLog = newLog(&MemoryStorage{ents: []pb.Entry{{}, {Data: nil, Term: 1, Index: 1}, {Term: 1, Index: 2, Data: data}}}) + wantLog.committed = 2 + } + base := ltoa(wantLog) + for i, p := range tt.peers { + if sm, ok := p.(*Raft); ok { + l := ltoa(sm.RaftLog) + if g := diffu(base, l); g != "" { + t.Errorf("#%d: diff:\n%s", i, g) + } + } else { + t.Logf("#%d: empty log", i) + } + } + sm := tt.network.peers[1].(*Raft) + if g := sm.Term; g != 1 { + t.Errorf("#%d: term = %d, want %d", j, g, 1) + } + } +} + +// TestHandleMessageType_MsgAppend ensures: +// 1. Reply false if log doesn’t contain an entry at prevLogIndex whose term matches prevLogTerm. +// 2. If an existing entry conflicts with a new one (same index but different terms), +// delete the existing entry and all that follow it; append any new entries not already in the log. +// 3. If leaderCommit > commitIndex, set commitIndex = min(leaderCommit, index of last new entry). +func TestHandleMessageType_MsgAppend2AB(t *testing.T) { + tests := []struct { + m pb.Message + wIndex uint64 + wCommit uint64 + wReject bool + }{ + // Ensure 1 + {pb.Message{MsgType: pb.MessageType_MsgAppend, Term: 2, LogTerm: 3, Index: 2, Commit: 3}, 2, 0, true}, // previous log mismatch + {pb.Message{MsgType: pb.MessageType_MsgAppend, Term: 2, LogTerm: 3, Index: 3, Commit: 3}, 2, 0, true}, // previous log non-exist + + // Ensure 2 + {pb.Message{MsgType: pb.MessageType_MsgAppend, Term: 2, LogTerm: 1, Index: 1, Commit: 1}, 2, 1, false}, + {pb.Message{MsgType: pb.MessageType_MsgAppend, Term: 2, LogTerm: 0, Index: 0, Commit: 1, Entries: []*pb.Entry{{Index: 1, Term: 2}}}, 1, 1, false}, + {pb.Message{MsgType: pb.MessageType_MsgAppend, Term: 2, LogTerm: 2, Index: 2, Commit: 3, Entries: []*pb.Entry{{Index: 3, Term: 2}, {Index: 4, Term: 2}}}, 4, 3, false}, + {pb.Message{MsgType: pb.MessageType_MsgAppend, Term: 2, LogTerm: 2, Index: 2, Commit: 4, Entries: []*pb.Entry{{Index: 3, Term: 2}}}, 3, 3, false}, + {pb.Message{MsgType: pb.MessageType_MsgAppend, Term: 2, LogTerm: 1, Index: 1, Commit: 4, Entries: []*pb.Entry{{Index: 2, Term: 2}}}, 2, 2, false}, + + // Ensure 3 + {pb.Message{MsgType: pb.MessageType_MsgAppend, Term: 1, LogTerm: 1, Index: 1, Commit: 3}, 2, 1, false}, // match entry 1, commit up to last new entry 1 + {pb.Message{MsgType: pb.MessageType_MsgAppend, Term: 1, LogTerm: 1, Index: 1, Commit: 3, Entries: []*pb.Entry{{Index: 2, Term: 2}}}, 2, 2, false}, // match entry 1, commit up to last new entry 2 + {pb.Message{MsgType: pb.MessageType_MsgAppend, Term: 2, LogTerm: 2, Index: 2, Commit: 3}, 2, 2, false}, // match entry 2, commit up to last new entry 2 + {pb.Message{MsgType: pb.MessageType_MsgAppend, Term: 2, LogTerm: 2, Index: 2, Commit: 4}, 2, 2, false}, // commit up to log.last() + } + + for i, tt := range tests { + storage := NewMemoryStorage() + storage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}}) + sm := newTestRaft(1, []uint64{1}, 10, 1, storage) + sm.becomeFollower(2, None) + + sm.handleAppendEntries(tt.m) + if sm.RaftLog.LastIndex() != tt.wIndex { + t.Errorf("#%d: lastIndex = %d, want %d", i, sm.RaftLog.LastIndex(), tt.wIndex) + } + if sm.RaftLog.committed != tt.wCommit { + t.Errorf("#%d: committed = %d, want %d", i, sm.RaftLog.committed, tt.wCommit) + } + m := sm.readMessages() + if len(m) != 1 { + t.Fatalf("#%d: msg = nil, want 1", i) + } + if m[0].Reject != tt.wReject { + t.Errorf("#%d: reject = %v, want %v", i, m[0].Reject, tt.wReject) + } + } +} + +// TestHandleHeartbeat ensures that the follower commits to the commit in the message. +func TestHandleHeartbeat2AA(t *testing.T) { + commit := uint64(2) + tests := []struct { + m pb.Message + wCommit uint64 + }{ + {pb.Message{From: 2, To: 1, MsgType: pb.MessageType_MsgHeartbeat, Term: 2, Commit: commit + 1}, commit + 1}, + {pb.Message{From: 2, To: 1, MsgType: pb.MessageType_MsgHeartbeat, Term: 2, Commit: commit - 1}, commit}, // do not decrease commit + } + + for i, tt := range tests { + storage := NewMemoryStorage() + storage.Append([]pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}}) + sm := newTestRaft(1, []uint64{1, 2}, 5, 1, storage) + sm.becomeFollower(2, 2) + sm.RaftLog.committed = commit + sm.handleHeartbeat(tt.m) + if sm.RaftLog.committed != tt.wCommit { + t.Errorf("#%d: committed = %d, want %d", i, sm.RaftLog.committed, tt.wCommit) + } + m := sm.readMessages() + if len(m) != 1 { + t.Fatalf("#%d: msg = nil, want 1", i) + } + if m[0].MsgType != pb.MessageType_MsgHeartbeatResponse { + t.Errorf("#%d: type = %v, want MessageType_MsgHeartbeatResponse", i, m[0].MsgType) + } + } +} + +func TestRecvMessageType_MsgRequestVote2AA(t *testing.T) { + msgType := pb.MessageType_MsgRequestVote + msgRespType := pb.MessageType_MsgRequestVoteResponse + tests := []struct { + state StateType + index, logTerm uint64 + voteFor uint64 + wreject bool + }{ + {StateFollower, 0, 0, None, true}, + {StateFollower, 0, 1, None, true}, + {StateFollower, 0, 2, None, true}, + {StateFollower, 0, 3, None, false}, + + {StateFollower, 1, 0, None, true}, + {StateFollower, 1, 1, None, true}, + {StateFollower, 1, 2, None, true}, + {StateFollower, 1, 3, None, false}, + + {StateFollower, 2, 0, None, true}, + {StateFollower, 2, 1, None, true}, + {StateFollower, 2, 2, None, false}, + {StateFollower, 2, 3, None, false}, + + {StateFollower, 3, 0, None, true}, + {StateFollower, 3, 1, None, true}, + {StateFollower, 3, 2, None, false}, + {StateFollower, 3, 3, None, false}, + + {StateFollower, 3, 2, 2, false}, + {StateFollower, 3, 2, 1, true}, + + {StateLeader, 3, 3, 1, true}, + {StateCandidate, 3, 3, 1, true}, + } + + max := func(a, b uint64) uint64 { + if a > b { + return a + } + return b + } + + for i, tt := range tests { + sm := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage()) + sm.State = tt.state + sm.Vote = tt.voteFor + sm.RaftLog = newLog(&MemoryStorage{ents: []pb.Entry{{}, {Index: 1, Term: 2}, {Index: 2, Term: 2}}}) + + // raft.Term is greater than or equal to raft.RaftLog.lastTerm. In this + // test we're only testing MessageType_MsgRequestVote responses when the campaigning node + // has a different raft log compared to the recipient node. + // Additionally we're verifying behaviour when the recipient node has + // already given out its vote for its current term. We're not testing + // what the recipient node does when receiving a message with a + // different term number, so we simply initialize both term numbers to + // be the same. + lterm, err := sm.RaftLog.Term(sm.RaftLog.LastIndex()) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + term := max(lterm, tt.logTerm) + sm.Term = term + sm.Step(pb.Message{MsgType: msgType, Term: term, From: 2, Index: tt.index, LogTerm: tt.logTerm}) + + msgs := sm.readMessages() + if g := len(msgs); g != 1 { + t.Fatalf("#%d: len(msgs) = %d, want 1", i, g) + continue + } + if g := msgs[0].MsgType; g != msgRespType { + t.Errorf("#%d, m.MsgType = %v, want %v", i, g, msgRespType) + } + if g := msgs[0].Reject; g != tt.wreject { + t.Errorf("#%d, m.Reject = %v, want %v", i, g, tt.wreject) + } + } +} + +func TestAllServerStepdown2AB(t *testing.T) { + tests := []struct { + state StateType + + wstate StateType + wterm uint64 + windex uint64 + }{ + {StateFollower, StateFollower, 3, 0}, + {StateCandidate, StateFollower, 3, 0}, + {StateLeader, StateFollower, 3, 1}, + } + + tmsgTypes := [...]pb.MessageType{pb.MessageType_MsgRequestVote, pb.MessageType_MsgAppend} + tterm := uint64(3) + + for i, tt := range tests { + sm := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + switch tt.state { + case StateFollower: + sm.becomeFollower(1, None) + case StateCandidate: + sm.becomeCandidate() + case StateLeader: + sm.becomeCandidate() + sm.becomeLeader() + } + + for j, msgType := range tmsgTypes { + sm.Step(pb.Message{From: 2, MsgType: msgType, Term: tterm, LogTerm: tterm}) + + if sm.State != tt.wstate { + t.Errorf("#%d.%d state = %v , want %v", i, j, sm.State, tt.wstate) + } + if sm.Term != tt.wterm { + t.Errorf("#%d.%d term = %v , want %v", i, j, sm.Term, tt.wterm) + } + if sm.RaftLog.LastIndex() != tt.windex { + t.Errorf("#%d.%d index = %v , want %v", i, j, sm.RaftLog.LastIndex(), tt.windex) + } + if uint64(len(sm.RaftLog.entries)) != tt.windex { + t.Errorf("#%d.%d len(ents) = %v , want %v", i, j, len(sm.RaftLog.entries), tt.windex) + } + wlead := uint64(2) + if msgType == pb.MessageType_MsgRequestVote { + wlead = None + } + if sm.Lead != wlead { + t.Errorf("#%d, sm.Lead = %d, want %d", i, sm.Lead, None) + } + } + } +} + +func TestCandidateResetTermMessageType_MsgHeartbeat2AA(t *testing.T) { + testCandidateResetTerm(t, pb.MessageType_MsgHeartbeat) +} + +func TestCandidateResetTermMessageType_MsgAppend2AA(t *testing.T) { + testCandidateResetTerm(t, pb.MessageType_MsgAppend) +} + +// testCandidateResetTerm tests when a candidate receives a +// MessageType_MsgHeartbeat or MessageType_MsgAppend from leader, "Step" resets the term +// with leader's and reverts back to follower. +func testCandidateResetTerm(t *testing.T, mt pb.MessageType) { + a := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + b := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + c := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + + nt := newNetwork(a, b, c) + + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + if a.State != StateLeader { + t.Errorf("state = %s, want %s", a.State, StateLeader) + } + if b.State != StateFollower { + t.Errorf("state = %s, want %s", b.State, StateFollower) + } + if c.State != StateFollower { + t.Errorf("state = %s, want %s", c.State, StateFollower) + } + + // isolate 3 and increase term in rest + nt.isolate(3) + + nt.send(pb.Message{From: 2, To: 2, MsgType: pb.MessageType_MsgHup}) + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + if a.State != StateLeader { + t.Errorf("state = %s, want %s", a.State, StateLeader) + } + if b.State != StateFollower { + t.Errorf("state = %s, want %s", b.State, StateFollower) + } + + for c.State != StateCandidate { + c.tick() + } + + nt.recover() + + // leader sends to isolated candidate + // and expects candidate to revert to follower + nt.send(pb.Message{From: 1, To: 3, Term: a.Term, MsgType: mt}) + + if c.State != StateFollower { + t.Errorf("state = %s, want %s", c.State, StateFollower) + } + + // follower c term is reset with leader's + if a.Term != c.Term { + t.Errorf("follower term expected same term as leader's %d, got %d", a.Term, c.Term) + } +} + +// TestDisruptiveFollower tests isolated follower, +// with slow network incoming from leader, election times out +// to become a candidate with an increased term. Then, the +// candiate's response to late leader heartbeat forces the leader +// to step down. +func TestDisruptiveFollower2AA(t *testing.T) { + n1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + n2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + n3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + + n1.becomeFollower(1, None) + n2.becomeFollower(1, None) + n3.becomeFollower(1, None) + + nt := newNetwork(n1, n2, n3) + + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + // check state + // n1.State == StateLeader + // n2.State == StateFollower + // n3.State == StateFollower + if n1.State != StateLeader { + t.Fatalf("node 1 state: %s, want %s", n1.State, StateLeader) + } + if n2.State != StateFollower { + t.Fatalf("node 2 state: %s, want %s", n2.State, StateFollower) + } + if n3.State != StateFollower { + t.Fatalf("node 3 state: %s, want %s", n3.State, StateFollower) + } + + // etcd server "advanceTicksForElection" on restart; + // this is to expedite campaign trigger when given larger + // election timeouts (e.g. multi-datacenter deploy) + // Or leader messages are being delayed while ticks elapse + for n3.State != StateCandidate { + n3.tick() + } + + // n1 is still leader yet + // while its heartbeat to candidate n3 is being delayed + + // check state + // n1.State == StateLeader + // n2.State == StateFollower + // n3.State == StateCandidate + if n1.State != StateLeader { + t.Fatalf("node 1 state: %s, want %s", n1.State, StateLeader) + } + if n2.State != StateFollower { + t.Fatalf("node 2 state: %s, want %s", n2.State, StateFollower) + } + if n3.State != StateCandidate { + t.Fatalf("node 3 state: %s, want %s", n3.State, StateCandidate) + } + // check term + // n1.Term == 2 + // n2.Term == 2 + // n3.Term == 3 + if n1.Term != 2 { + t.Fatalf("node 1 term: %d, want %d", n1.Term, 2) + } + if n2.Term != 2 { + t.Fatalf("node 2 term: %d, want %d", n2.Term, 2) + } + if n3.Term != 3 { + t.Fatalf("node 3 term: %d, want %d", n3.Term, 3) + } + + // while outgoing vote requests are still queued in n3, + // leader heartbeat finally arrives at candidate n3 + // however, due to delayed network from leader, leader + // heartbeat was sent with lower term than candidate's + nt.send(pb.Message{From: 1, To: 3, Term: n1.Term, MsgType: pb.MessageType_MsgHeartbeat}) + + // then candidate n3 responds with "pb.MessageType_MsgAppendResponse" of higher term + // and leader steps down from a message with higher term + // this is to disrupt the current leader, so that candidate + // with higher term can be freed with following election + + // check state + if n1.State != StateFollower { + t.Fatalf("node 1 state: %s, want %s", n1.State, StateFollower) + } + + // check term + if n1.Term != 3 { + t.Fatalf("node 1 term: %d, want %d", n1.Term, 3) + } +} + +// When the leader receives a heartbeat tick, it should +// send a MessageType_MsgHeartbeat with m.Index = 0, m.LogTerm=0 and empty entries. +func TestBcastBeat2AB(t *testing.T) { + offset := uint64(1000) + // make a state machine with log.offset = 1000 + s := pb.Snapshot{ + Metadata: &pb.SnapshotMetadata{ + Index: offset, + Term: 1, + ConfState: &pb.ConfState{Nodes: []uint64{1, 2, 3}}, + }, + } + storage := NewMemoryStorage() + storage.ApplySnapshot(s) + sm := newTestRaft(1, nil, 10, 1, storage) + sm.Term = 1 + + sm.becomeCandidate() + sm.becomeLeader() + sm.Step(pb.Message{MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{}}}) + sm.readMessages() // clear message + // slow follower + sm.Prs[2].Match, sm.Prs[2].Next = 5, 6 + // normal follower + sm.Prs[3].Match, sm.Prs[3].Next = sm.RaftLog.LastIndex(), sm.RaftLog.LastIndex()+1 + + sm.Step(pb.Message{MsgType: pb.MessageType_MsgBeat}) + msgs := sm.readMessages() + if len(msgs) != 2 { + t.Fatalf("len(msgs) = %v, want 2", len(msgs)) + } + wantCommitMap := map[uint64]uint64{ + 2: min(sm.RaftLog.committed, sm.Prs[2].Match), + 3: min(sm.RaftLog.committed, sm.Prs[3].Match), + } + for i, m := range msgs { + if m.MsgType != pb.MessageType_MsgHeartbeat { + t.Fatalf("#%d: type = %v, want = %v", i, m.MsgType, pb.MessageType_MsgHeartbeat) + } + if m.Index != 0 { + t.Fatalf("#%d: prevIndex = %d, want %d", i, m.Index, 0) + } + if m.LogTerm != 0 { + t.Fatalf("#%d: prevTerm = %d, want %d", i, m.LogTerm, 0) + } + if wantCommitMap[m.To] == 0 { + t.Fatalf("#%d: unexpected to %d", i, m.To) + } else { + if m.Commit != wantCommitMap[m.To] { + t.Fatalf("#%d: commit = %d, want %d", i, m.Commit, wantCommitMap[m.To]) + } + delete(wantCommitMap, m.To) + } + if len(m.Entries) != 0 { + t.Fatalf("#%d: len(entries) = %d, want 0", i, len(m.Entries)) + } + } +} + +// tests the output of the state machine when receiving MessageType_MsgBeat +func TestRecvMessageType_MsgBeat2AA(t *testing.T) { + tests := []struct { + state StateType + wMsg int + }{ + {StateLeader, 2}, + // candidate and follower should ignore MessageType_MsgBeat + {StateCandidate, 0}, + {StateFollower, 0}, + } + + for i, tt := range tests { + sm := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + sm.RaftLog = newLog(&MemoryStorage{ents: []pb.Entry{{}, {Index: 1, Term: 0}, {Index: 2, Term: 1}}}) + sm.Term = 1 + sm.State = tt.state + sm.Step(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgBeat}) + + msgs := sm.readMessages() + if len(msgs) != tt.wMsg { + t.Errorf("%d: len(msgs) = %d, want %d", i, len(msgs), tt.wMsg) + } + for _, m := range msgs { + if m.MsgType != pb.MessageType_MsgHeartbeat { + t.Errorf("%d: msg.Msgtype = %v, want %v", i, m.MsgType, pb.MessageType_MsgHeartbeat) + } + } + } +} + +func TestLeaderIncreaseNext2AB(t *testing.T) { + previousEnts := []pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}} + // previous entries + noop entry + propose + 1 + wnext := uint64(len(previousEnts)) + 1 + 1 + 1 + + storage := NewMemoryStorage() + storage.Append(previousEnts) + sm := newTestRaft(1, []uint64{1, 2}, 10, 1, storage) + nt := newNetwork(sm, nil, nil) + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Data: []byte("somedata")}}}) + + p := sm.Prs[2] + if p.Next != wnext { + t.Errorf("next = %d, want %d", p.Next, wnext) + } +} + +func TestRestoreSnapshot2C(t *testing.T) { + s := pb.Snapshot{ + Metadata: &pb.SnapshotMetadata{ + Index: 11, // magic number + Term: 11, // magic number + ConfState: &pb.ConfState{Nodes: []uint64{1, 2, 3}}, + }, + } + + storage := NewMemoryStorage() + sm := newTestRaft(1, []uint64{1, 2}, 10, 1, storage) + sm.handleSnapshot(pb.Message{Snapshot: &s}) + + if sm.RaftLog.LastIndex() != s.Metadata.Index { + t.Errorf("log.lastIndex = %d, want %d", sm.RaftLog.LastIndex(), s.Metadata.Index) + } + if mustTerm(sm.RaftLog.Term(s.Metadata.Index)) != s.Metadata.Term { + t.Errorf("log.lastTerm = %d, want %d", mustTerm(sm.RaftLog.Term(s.Metadata.Index)), s.Metadata.Term) + } + sg := nodes(sm) + if !reflect.DeepEqual(sg, s.Metadata.ConfState.Nodes) { + t.Errorf("sm.Nodes = %+v, want %+v", sg, s.Metadata.ConfState.Nodes) + } +} + +func TestRestoreIgnoreSnapshot2C(t *testing.T) { + previousEnts := []pb.Entry{{Term: 1, Index: 1}, {Term: 1, Index: 2}, {Term: 1, Index: 3}} + storage := NewMemoryStorage() + storage.Append(previousEnts) + sm := newTestRaft(1, []uint64{1, 2}, 10, 1, storage) + sm.RaftLog.committed = 3 + + commit := uint64(1) + s := pb.Snapshot{ + Metadata: &pb.SnapshotMetadata{ + Index: commit, + Term: 1, + ConfState: &pb.ConfState{Nodes: []uint64{1, 2}}, + }, + } + + // ignore snapshot + sm.handleSnapshot(pb.Message{Snapshot: &s}) + if sm.RaftLog.committed == commit { + t.Errorf("commit = %d, want %d", sm.RaftLog.committed, commit) + } +} + +func TestProvideSnap2C(t *testing.T) { + // restore the state machine from a snapshot so it has a compacted log and a snapshot + s := pb.Snapshot{ + Metadata: &pb.SnapshotMetadata{ + Index: 11, // magic number + Term: 11, // magic number + ConfState: &pb.ConfState{Nodes: []uint64{1, 2}}, + }, + } + storage := NewMemoryStorage() + sm := newTestRaft(1, []uint64{1}, 10, 1, storage) + sm.handleSnapshot(pb.Message{Snapshot: &s}) + + sm.becomeCandidate() + sm.becomeLeader() + sm.readMessages() // clear message + + // force set the next of node 2, so that node 2 needs a snapshot + sm.Prs[2].Next = 0 + sm.Step(pb.Message{From: 2, To: 1, MsgType: pb.MessageType_MsgAppendResponse, Index: sm.Prs[2].Next - 1, Reject: true}) + + msgs := sm.readMessages() + if len(msgs) != 1 { + t.Fatalf("len(msgs) = %d, want 1", len(msgs)) + } + m := msgs[0] + if m.MsgType != pb.MessageType_MsgSnapshot { + t.Errorf("m.MsgType = %v, want %v", m.MsgType, pb.MessageType_MsgSnapshot) + } +} + +func TestRestoreFromSnapMsg2C(t *testing.T) { + s := pb.Snapshot{ + Metadata: &pb.SnapshotMetadata{ + Index: 11, // magic number + Term: 11, // magic number + ConfState: &pb.ConfState{Nodes: []uint64{1, 2}}, + }, + } + m := pb.Message{MsgType: pb.MessageType_MsgSnapshot, From: 1, Term: 2, Snapshot: &s} + + sm := newTestRaft(2, []uint64{1, 2}, 10, 1, NewMemoryStorage()) + sm.Step(m) + + if sm.Lead != uint64(1) { + t.Errorf("sm.Lead = %d, want 1", sm.Lead) + } +} + +func TestSlowNodeRestore2C(t *testing.T) { + nt := newNetwork(nil, nil, nil) + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + nt.isolate(3) + for j := 0; j <= 100; j++ { + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{}}}) + } + lead := nt.peers[1].(*Raft) + nextEnts(lead, nt.storage[1]) + nt.storage[1].CreateSnapshot(lead.RaftLog.applied, &pb.ConfState{Nodes: nodes(lead)}, nil) + nt.storage[1].Compact(lead.RaftLog.applied) + + nt.recover() + + // send heartbeats so that the leader can learn everyone is active. + // node 3 will only be considered as active when node 1 receives a reply from it. + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgBeat}) + + // trigger a snapshot + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{}}}) + + follower := nt.peers[3].(*Raft) + + // trigger a commit + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{}}}) + if follower.RaftLog.committed != lead.RaftLog.committed { + t.Errorf("follower.committed = %d, want %d", follower.RaftLog.committed, lead.RaftLog.committed) + } +} + +// TestAddNode tests that addNode could update nodes correctly. +func TestAddNode3A(t *testing.T) { + r := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage()) + r.addNode(2) + nodes := nodes(r) + wnodes := []uint64{1, 2} + if !reflect.DeepEqual(nodes, wnodes) { + t.Errorf("nodes = %v, want %v", nodes, wnodes) + } +} + +// TestRemoveNode tests that removeNode could update nodes and +// and removed list correctly. +func TestRemoveNode3A(t *testing.T) { + r := newTestRaft(1, []uint64{1, 2}, 10, 1, NewMemoryStorage()) + r.removeNode(2) + w := []uint64{1} + if g := nodes(r); !reflect.DeepEqual(g, w) { + t.Errorf("nodes = %v, want %v", g, w) + } + + // remove all nodes from cluster + r.removeNode(1) + w = []uint64{} + if g := nodes(r); !reflect.DeepEqual(g, w) { + t.Errorf("nodes = %v, want %v", g, w) + } +} + +func TestCampaignWhileLeader2AA(t *testing.T) { + cfg := newTestConfig(1, []uint64{1}, 5, 1, NewMemoryStorage()) + r := newRaft(cfg) + if r.State != StateFollower { + t.Errorf("expected new node to be follower but got %s", r.State) + } + // We don't call campaign() directly because it comes after the check + // for our current state. + r.Step(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + if r.State != StateLeader { + t.Errorf("expected single-node election to become leader but got %s", r.State) + } + term := r.Term + r.Step(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + if r.State != StateLeader { + t.Errorf("expected to remain leader but got %s", r.State) + } + if r.Term != term { + t.Errorf("expected to remain in term %v but got %v", term, r.Term) + } +} + +// TestCommitAfterRemoveNode verifies that pending commands can become +// committed when a config change reduces the quorum requirements. +func TestCommitAfterRemoveNode3A(t *testing.T) { + // Create a cluster with two nodes. + s := NewMemoryStorage() + r := newTestRaft(1, []uint64{1, 2}, 5, 1, s) + r.becomeCandidate() + r.becomeLeader() + + // Begin to remove the second node. + cc := pb.ConfChange{ + ChangeType: pb.ConfChangeType_RemoveNode, + NodeId: 2, + } + ccData, err := cc.Marshal() + if err != nil { + t.Fatal(err) + } + r.Step(pb.Message{ + MsgType: pb.MessageType_MsgPropose, + Entries: []*pb.Entry{ + {EntryType: pb.EntryType_EntryConfChange, Data: ccData}, + }, + }) + // Stabilize the log and make sure nothing is committed yet. + if ents := nextEnts(r, s); len(ents) > 0 { + t.Fatalf("unexpected committed entries: %v", ents) + } + ccIndex := r.RaftLog.LastIndex() + + // While the config change is pending, make another proposal. + r.Step(pb.Message{ + MsgType: pb.MessageType_MsgPropose, + Entries: []*pb.Entry{ + {EntryType: pb.EntryType_EntryNormal, Data: []byte("hello")}, + }, + }) + + // Node 2 acknowledges the config change, committing it. + r.Step(pb.Message{ + MsgType: pb.MessageType_MsgAppendResponse, + From: 2, + Index: ccIndex, + }) + ents := nextEnts(r, s) + if len(ents) != 2 { + t.Fatalf("expected two committed entries, got %v", ents) + } + if ents[0].EntryType != pb.EntryType_EntryNormal || ents[0].Data != nil { + t.Fatalf("expected ents[0] to be empty, but got %v", ents[0]) + } + if ents[1].EntryType != pb.EntryType_EntryConfChange { + t.Fatalf("expected ents[1] to be EntryType_EntryConfChange, got %v", ents[1]) + } + + // Apply the config change. This reduces quorum requirements so the + // pending command can now commit. + r.removeNode(2) + ents = nextEnts(r, s) + if len(ents) != 1 || ents[0].EntryType != pb.EntryType_EntryNormal || + string(ents[0].Data) != "hello" { + t.Fatalf("expected one committed EntryType_EntryNormal, got %v", ents) + } +} + +// TestLeaderTransferToUpToDateNode verifies transferring should succeed +// if the transferee has the most up-to-date log entries when transfer starts. +func TestLeaderTransferToUpToDateNode3A(t *testing.T) { + nt := newNetwork(nil, nil, nil) + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + lead := nt.peers[1].(*Raft) + + if lead.Lead != 1 { + t.Fatalf("after election leader is %d, want 1", lead.Lead) + } + + // Transfer leadership to 2. + nt.send(pb.Message{From: 2, To: 1, MsgType: pb.MessageType_MsgTransferLeader}) + + checkLeaderTransferState(t, lead, StateFollower, 2) + + // After some log replication, transfer leadership back to 1. + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{}}}) + + nt.send(pb.Message{From: 1, To: 2, MsgType: pb.MessageType_MsgTransferLeader}) + + checkLeaderTransferState(t, lead, StateLeader, 1) +} + +// TestLeaderTransferToUpToDateNodeFromFollower verifies transferring should succeed +// if the transferee has the most up-to-date log entries when transfer starts. +// Not like TestLeaderTransferToUpToDateNode, where the leader transfer message +// is sent to the leader, in this test case every leader transfer message is sent +// to the follower. +func TestLeaderTransferToUpToDateNodeFromFollower3A(t *testing.T) { + nt := newNetwork(nil, nil, nil) + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + lead := nt.peers[1].(*Raft) + + if lead.Lead != 1 { + t.Fatalf("after election leader is %d, want 1", lead.Lead) + } + + // Transfer leadership to 2. + nt.send(pb.Message{From: 2, To: 2, MsgType: pb.MessageType_MsgTransferLeader}) + + checkLeaderTransferState(t, lead, StateFollower, 2) + + // After some log replication, transfer leadership back to 1. + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{}}}) + + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgTransferLeader}) + + checkLeaderTransferState(t, lead, StateLeader, 1) +} + +func TestLeaderTransferToSlowFollower3A(t *testing.T) { + nt := newNetwork(nil, nil, nil) + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + nt.isolate(3) + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{}}}) + + nt.recover() + lead := nt.peers[1].(*Raft) + if lead.Prs[3].Match != 1 { + t.Fatalf("node 1 has match %d for node 3, want %d", lead.Prs[3].Match, 1) + } + + // Transfer leadership to 3 when node 3 is lack of log. + nt.send(pb.Message{From: 3, To: 1, MsgType: pb.MessageType_MsgTransferLeader}) + + checkLeaderTransferState(t, lead, StateFollower, 3) +} + +func TestLeaderTransferAfterSnapshot3A(t *testing.T) { + nt := newNetwork(nil, nil, nil) + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + nt.isolate(3) + + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{}}}) + lead := nt.peers[1].(*Raft) + nextEnts(lead, nt.storage[1]) + nt.storage[1].CreateSnapshot(lead.RaftLog.applied, &pb.ConfState{Nodes: nodes(lead)}, nil) + nt.storage[1].Compact(lead.RaftLog.applied) + + nt.recover() + if lead.Prs[3].Match != 1 { + t.Fatalf("node 1 has match %d for node 3, want %d", lead.Prs[3].Match, 1) + } + + // Transfer leadership to 3 when node 3 is lack of snapshot. + nt.send(pb.Message{From: 3, To: 1, MsgType: pb.MessageType_MsgTransferLeader}) + // Send pb.MessageType_MsgHeartbeatResponse to leader to trigger a snapshot for node 3. + nt.send(pb.Message{From: 3, To: 1, MsgType: pb.MessageType_MsgHeartbeatResponse}) + + checkLeaderTransferState(t, lead, StateFollower, 3) +} + +func TestLeaderTransferToSelf3A(t *testing.T) { + nt := newNetwork(nil, nil, nil) + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + lead := nt.peers[1].(*Raft) + + // Transfer leadership to self, there will be noop. + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgTransferLeader}) + checkLeaderTransferState(t, lead, StateLeader, 1) +} + +func TestLeaderTransferToNonExistingNode3A(t *testing.T) { + nt := newNetwork(nil, nil, nil) + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + lead := nt.peers[1].(*Raft) + // Transfer leadership to non-existing node, there will be noop. + nt.send(pb.Message{From: 4, To: 1, MsgType: pb.MessageType_MsgTransferLeader}) + checkLeaderTransferState(t, lead, StateLeader, 1) +} + +func TestLeaderTransferReceiveHigherTermVote3A(t *testing.T) { + nt := newNetwork(nil, nil, nil) + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + nt.isolate(3) + + lead := nt.peers[1].(*Raft) + + // Transfer leadership to isolated node to let transfer pending. + nt.send(pb.Message{From: 3, To: 1, MsgType: pb.MessageType_MsgTransferLeader}) + nt.send(pb.Message{From: 2, To: 2, MsgType: pb.MessageType_MsgHup, Index: 1, Term: 2}) + + checkLeaderTransferState(t, lead, StateFollower, 2) +} + +func TestLeaderTransferRemoveNode3A(t *testing.T) { + nt := newNetwork(nil, nil, nil) + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + lead := nt.peers[1].(*Raft) + lead.removeNode(3) + + nt.send(pb.Message{From: 3, To: 1, MsgType: pb.MessageType_MsgTransferLeader}) + + checkLeaderTransferState(t, lead, StateLeader, 1) +} + +// TestLeaderTransferBack verifies leadership can transfer back to self when last transfer is pending. +func TestLeaderTransferBack3A(t *testing.T) { + nt := newNetwork(nil, nil, nil) + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + nt.isolate(3) + + lead := nt.peers[1].(*Raft) + + nt.send(pb.Message{From: 3, To: 1, MsgType: pb.MessageType_MsgTransferLeader}) + + // Transfer leadership back to self. + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgTransferLeader}) + + checkLeaderTransferState(t, lead, StateLeader, 1) +} + +// TestLeaderTransferSecondTransferToAnotherNode verifies leader can transfer to another node +// when last transfer is pending. +func TestLeaderTransferSecondTransferToAnotherNode3A(t *testing.T) { + nt := newNetwork(nil, nil, nil) + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + nt.isolate(3) + + lead := nt.peers[1].(*Raft) + + nt.send(pb.Message{From: 3, To: 1, MsgType: pb.MessageType_MsgTransferLeader}) + // Transfer leadership to another node. + nt.send(pb.Message{From: 2, To: 1, MsgType: pb.MessageType_MsgTransferLeader}) + + checkLeaderTransferState(t, lead, StateFollower, 2) +} + +func checkLeaderTransferState(t *testing.T, r *Raft, state StateType, lead uint64) { + if r.State != state || r.Lead != lead { + t.Fatalf("after transferring, node has state %v lead %v, want state %v lead %v", r.State, r.Lead, state, lead) + } +} + +// TestTransferNonMember verifies that when a MessageType_MsgTimeoutNow arrives at +// a node that has been removed from the group, nothing happens. +// (previously, if the node also got votes, it would panic as it +// transitioned to StateLeader) +func TestTransferNonMember3A(t *testing.T) { + r := newTestRaft(1, []uint64{2, 3, 4}, 5, 1, NewMemoryStorage()) + r.Step(pb.Message{From: 2, To: 1, MsgType: pb.MessageType_MsgTimeoutNow}) + + r.Step(pb.Message{From: 2, To: 1, MsgType: pb.MessageType_MsgRequestVoteResponse}) + r.Step(pb.Message{From: 3, To: 1, MsgType: pb.MessageType_MsgRequestVoteResponse}) + if r.State != StateFollower { + t.Fatalf("state is %s, want StateFollower", r.State) + } +} + +// TestSplitVote verifies that after split vote, cluster can complete +// election in next round. +func TestSplitVote2AA(t *testing.T) { + n1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + n2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + n3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + + n1.becomeFollower(1, None) + n2.becomeFollower(1, None) + n3.becomeFollower(1, None) + + nt := newNetwork(n1, n2, n3) + nt.send(pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgHup}) + + // simulate leader down. followers start split vote. + nt.isolate(1) + nt.send([]pb.Message{ + {From: 2, To: 2, MsgType: pb.MessageType_MsgHup}, + {From: 3, To: 3, MsgType: pb.MessageType_MsgHup}, + }...) + + // check whether the term values are expected + // n2.Term == 3 + // n3.Term == 3 + sm := nt.peers[2].(*Raft) + if sm.Term != 3 { + t.Errorf("peer 2 term: %d, want %d", sm.Term, 3) + } + sm = nt.peers[3].(*Raft) + if sm.Term != 3 { + t.Errorf("peer 3 term: %d, want %d", sm.Term, 3) + } + + // check state + // n2 == candidate + // n3 == candidate + sm = nt.peers[2].(*Raft) + if sm.State != StateCandidate { + t.Errorf("peer 2 state: %s, want %s", sm.State, StateCandidate) + } + sm = nt.peers[3].(*Raft) + if sm.State != StateCandidate { + t.Errorf("peer 3 state: %s, want %s", sm.State, StateCandidate) + } + + // node 2 election timeout first + nt.send(pb.Message{From: 2, To: 2, MsgType: pb.MessageType_MsgHup}) + + // check whether the term values are expected + // n2.Term == 4 + // n3.Term == 4 + sm = nt.peers[2].(*Raft) + if sm.Term != 4 { + t.Errorf("peer 2 term: %d, want %d", sm.Term, 4) + } + sm = nt.peers[3].(*Raft) + if sm.Term != 4 { + t.Errorf("peer 3 term: %d, want %d", sm.Term, 4) + } + + // check state + // n2 == leader + // n3 == follower + sm = nt.peers[2].(*Raft) + if sm.State != StateLeader { + t.Errorf("peer 2 state: %s, want %s", sm.State, StateLeader) + } + sm = nt.peers[3].(*Raft) + if sm.State != StateFollower { + t.Errorf("peer 3 state: %s, want %s", sm.State, StateFollower) + } +} + +func entsWithConfig(configFunc func(*Config), terms ...uint64) *Raft { + storage := NewMemoryStorage() + for i, term := range terms { + storage.Append([]pb.Entry{{Index: uint64(i + 1), Term: term}}) + } + cfg := newTestConfig(1, []uint64{}, 5, 1, storage) + if configFunc != nil { + configFunc(cfg) + } + sm := newRaft(cfg) + sm.Term = terms[len(terms)-1] + return sm +} + +// votedWithConfig creates a raft state machine with Vote and Term set +// to the given value but no log entries (indicating that it voted in +// the given term but has not received any logs). +func votedWithConfig(configFunc func(*Config), vote, term uint64) *Raft { + storage := NewMemoryStorage() + storage.SetHardState(pb.HardState{Vote: vote, Term: term}) + cfg := newTestConfig(1, []uint64{}, 5, 1, storage) + if configFunc != nil { + configFunc(cfg) + } + sm := newRaft(cfg) + sm.Term = term + return sm +} + +type network struct { + peers map[uint64]stateMachine + storage map[uint64]*MemoryStorage + dropm map[connem]float64 + ignorem map[pb.MessageType]bool + + // msgHook is called for each message sent. It may inspect the + // message and return true to send it or false to drop it. + msgHook func(pb.Message) bool +} + +// newNetwork initializes a network from peers. +// A nil node will be replaced with a new *stateMachine. +// A *stateMachine will get its k, id. +// When using stateMachine, the address list is always [1, n]. +func newNetwork(peers ...stateMachine) *network { + return newNetworkWithConfig(nil, peers...) +} + +// newNetworkWithConfig is like newNetwork but calls the given func to +// modify the configuration of any state machines it creates. +func newNetworkWithConfig(configFunc func(*Config), peers ...stateMachine) *network { + size := len(peers) + peerAddrs := idsBySize(size) + + npeers := make(map[uint64]stateMachine, size) + nstorage := make(map[uint64]*MemoryStorage, size) + + for j, p := range peers { + id := peerAddrs[j] + switch v := p.(type) { + case nil: + nstorage[id] = NewMemoryStorage() + cfg := newTestConfig(id, peerAddrs, 10, 1, nstorage[id]) + if configFunc != nil { + configFunc(cfg) + } + sm := newRaft(cfg) + npeers[id] = sm + case *Raft: + v.id = id + v.Prs = make(map[uint64]*Progress) + for i := 0; i < size; i++ { + v.Prs[peerAddrs[i]] = &Progress{} + } + npeers[id] = v + case *blackHole: + npeers[id] = v + default: + panic(fmt.Sprintf("unexpected state machine type: %T", p)) + } + } + return &network{ + peers: npeers, + storage: nstorage, + dropm: make(map[connem]float64), + ignorem: make(map[pb.MessageType]bool), + } +} + +func (nw *network) send(msgs ...pb.Message) { + for len(msgs) > 0 { + m := msgs[0] + p := nw.peers[m.To] + p.Step(m) + msgs = append(msgs[1:], nw.filter(p.readMessages())...) + } +} + +func (nw *network) drop(from, to uint64, perc float64) { + nw.dropm[connem{from, to}] = perc +} + +func (nw *network) cut(one, other uint64) { + nw.drop(one, other, 2.0) // always drop + nw.drop(other, one, 2.0) // always drop +} + +func (nw *network) isolate(id uint64) { + for i := 0; i < len(nw.peers); i++ { + nid := uint64(i) + 1 + if nid != id { + nw.drop(id, nid, 1.0) // always drop + nw.drop(nid, id, 1.0) // always drop + } + } +} + +func (nw *network) ignore(t pb.MessageType) { + nw.ignorem[t] = true +} + +func (nw *network) recover() { + nw.dropm = make(map[connem]float64) + nw.ignorem = make(map[pb.MessageType]bool) +} + +func (nw *network) filter(msgs []pb.Message) []pb.Message { + mm := []pb.Message{} + for _, m := range msgs { + if nw.ignorem[m.MsgType] { + continue + } + switch m.MsgType { + case pb.MessageType_MsgHup: + // hups never go over the network, so don't drop them but panic + panic("unexpected MessageType_MsgHup") + default: + perc := nw.dropm[connem{m.From, m.To}] + if n := rand.Float64(); n < perc { + continue + } + } + if nw.msgHook != nil { + if !nw.msgHook(m) { + continue + } + } + mm = append(mm, m) + } + return mm +} + +type connem struct { + from, to uint64 +} + +type blackHole struct{} + +func (blackHole) Step(pb.Message) error { return nil } +func (blackHole) readMessages() []pb.Message { return nil } + +var nopStepper = &blackHole{} + +func idsBySize(size int) []uint64 { + ids := make([]uint64, size) + for i := 0; i < size; i++ { + ids[i] = 1 + uint64(i) + } + return ids +} + +func newTestConfig(id uint64, peers []uint64, election, heartbeat int, storage Storage) *Config { + return &Config{ + ID: id, + peers: peers, + ElectionTick: election, + HeartbeatTick: heartbeat, + Storage: storage, + } +} + +func newTestRaft(id uint64, peers []uint64, election, heartbeat int, storage Storage) *Raft { + return newRaft(newTestConfig(id, peers, election, heartbeat, storage)) +} diff --git a/raft/rawnode.go b/raft/rawnode.go new file mode 100644 index 00000000..ec939f36 --- /dev/null +++ b/raft/rawnode.go @@ -0,0 +1,177 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + + pb "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" +) + +// ErrStepLocalMsg is returned when try to step a local raft message +var ErrStepLocalMsg = errors.New("raft: cannot step raft local message") + +// ErrStepPeerNotFound is returned when try to step a response message +// but there is no peer found in raft.Prs for that node. +var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found") + +// SoftState provides state that is useful for logging and debugging. +// The state is volatile and does not need to be persisted to the WAL. +type SoftState struct { + Lead uint64 // must use atomic operations to access; keep 64-bit aligned. + RaftState StateType +} + +// Ready encapsulates the entries and messages that are ready to read, +// be saved to stable storage, committed or sent to other peers. +// All fields in Ready are read-only. +type Ready struct { + // The current volatile state of a Node. + // SoftState will be nil if there is no update. + // It is not required to consume or store SoftState. + *SoftState + + // The current state of a Node to be saved to stable storage BEFORE + // Messages are sent. + // HardState will be equal to empty state if there is no update. + pb.HardState + + // Entries specifies entries to be saved to stable storage BEFORE + // Messages are sent. + Entries []pb.Entry + + // Snapshot specifies the snapshot to be saved to stable storage. + Snapshot pb.Snapshot + + // CommittedEntries specifies entries to be committed to a + // store/state-machine. These have previously been committed to stable + // store. + CommittedEntries []pb.Entry + + // Messages specifies outbound messages to be sent AFTER Entries are + // committed to stable storage. + // If it contains a MessageType_MsgSnapshot message, the application MUST report back to raft + // when the snapshot has been received or has failed by calling ReportSnapshot. + Messages []pb.Message +} + +// RawNode is a wrapper of Raft. +type RawNode struct { + Raft *Raft + // Your Data Here (2A). +} + +// NewRawNode returns a new RawNode given configuration and a list of raft peers. +func NewRawNode(config *Config) (*RawNode, error) { + // Your Code Here (2A). + return nil, nil +} + +// Tick advances the internal logical clock by a single tick. +func (rn *RawNode) Tick() { + rn.Raft.tick() +} + +// Campaign causes this RawNode to transition to candidate state. +func (rn *RawNode) Campaign() error { + return rn.Raft.Step(pb.Message{ + MsgType: pb.MessageType_MsgHup, + }) +} + +// Propose proposes data be appended to the raft log. +func (rn *RawNode) Propose(data []byte) error { + ent := pb.Entry{Data: data} + return rn.Raft.Step(pb.Message{ + MsgType: pb.MessageType_MsgPropose, + From: rn.Raft.id, + Entries: []*pb.Entry{&ent}}) +} + +// ProposeConfChange proposes a config change. +func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error { + data, err := cc.Marshal() + if err != nil { + return err + } + ent := pb.Entry{EntryType: pb.EntryType_EntryConfChange, Data: data} + return rn.Raft.Step(pb.Message{ + MsgType: pb.MessageType_MsgPropose, + Entries: []*pb.Entry{&ent}, + }) +} + +// ApplyConfChange applies a config change to the local node. +func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { + if cc.NodeId == None { + return &pb.ConfState{Nodes: nodes(rn.Raft)} + } + switch cc.ChangeType { + case pb.ConfChangeType_AddNode: + rn.Raft.addNode(cc.NodeId) + case pb.ConfChangeType_RemoveNode: + rn.Raft.removeNode(cc.NodeId) + default: + panic("unexpected conf type") + } + return &pb.ConfState{Nodes: nodes(rn.Raft)} +} + +// Step advances the state machine using the given message. +func (rn *RawNode) Step(m pb.Message) error { + // ignore unexpected local messages receiving over network + if IsLocalMsg(m.MsgType) { + return ErrStepLocalMsg + } + if pr := rn.Raft.Prs[m.From]; pr != nil || !IsResponseMsg(m.MsgType) { + return rn.Raft.Step(m) + } + return ErrStepPeerNotFound +} + +// Ready returns the current point-in-time state of this RawNode. +func (rn *RawNode) Ready() Ready { + // Your Code Here (2A). + return Ready{} +} + +// HasReady called when RawNode user need to check if any Ready pending. +func (rn *RawNode) HasReady() bool { + // Your Code Here (2A). + return false +} + +// Advance notifies the RawNode that the application has applied and saved progress in the +// last Ready results. +func (rn *RawNode) Advance(rd Ready) { + // Your Code Here (2A). +} + +// GetProgress return the the Progress of this node and its peers, if this +// node is leader. +func (rn *RawNode) GetProgress() map[uint64]Progress { + prs := make(map[uint64]Progress) + if rn.Raft.State == StateLeader { + for id, p := range rn.Raft.Prs { + prs[id] = *p + } + } + return prs +} + +// TransferLeader tries to transfer leadership to the given transferee. +func (rn *RawNode) TransferLeader(transferee uint64) { + _ = rn.Raft.Step(pb.Message{MsgType: pb.MessageType_MsgTransferLeader, From: transferee}) +} diff --git a/raft/rawnode_test.go b/raft/rawnode_test.go new file mode 100644 index 00000000..973947b6 --- /dev/null +++ b/raft/rawnode_test.go @@ -0,0 +1,252 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "bytes" + "reflect" + "testing" + + pb "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" +) + +type ignoreSizeHintMemStorage struct { + *MemoryStorage +} + +func (s *ignoreSizeHintMemStorage) Entries(lo, hi uint64, maxSize uint64) ([]pb.Entry, error) { + return s.MemoryStorage.Entries(lo, hi) +} + +// TestRawNodeProposeAndConfChange ensures that RawNode.Propose and RawNode.ProposeConfChange +// send the given proposal and ConfChange to the underlying raft. +func TestRawNodeProposeAndConfChange3A(t *testing.T) { + s := NewMemoryStorage() + var err error + rawNode, err := NewRawNode(newTestConfig(1, []uint64{1}, 10, 1, s)) + if err != nil { + t.Fatal(err) + } + rd := rawNode.Ready() + s.Append(rd.Entries) + rawNode.Advance(rd) + + if d := rawNode.Ready(); !IsEmptyHardState(d.HardState) || len(d.Entries) > 0 { + t.Fatalf("expected empty hard state: %#v", d) + } + + rawNode.Campaign() + rd = rawNode.Ready() + if rd.SoftState.Lead != rawNode.Raft.id { + t.Fatalf("expected become leader") + } + + // propose a command and a ConfChange. + rawNode.Propose([]byte("somedata")) + cc := pb.ConfChange{ChangeType: pb.ConfChangeType_AddNode, NodeId: 1} + ccdata, err := cc.Marshal() + if err != nil { + t.Fatal(err) + } + rawNode.ProposeConfChange(cc) + + entries := rawNode.Raft.RaftLog.entries + if l := len(entries); l < 2 { + t.Fatalf("len(entries) = %d, want >= 2", l) + } else { + entries = entries[l-2:] + } + if !bytes.Equal(entries[0].Data, []byte("somedata")) { + t.Errorf("entries[0].Data = %v, want %v", entries[0].Data, []byte("somedata")) + } + if entries[1].EntryType != pb.EntryType_EntryConfChange { + t.Fatalf("type = %v, want %v", entries[1].EntryType, pb.EntryType_EntryConfChange) + } + if !bytes.Equal(entries[1].Data, ccdata) { + t.Errorf("data = %v, want %v", entries[1].Data, ccdata) + } +} + +// TestRawNodeProposeAddDuplicateNode ensures that two proposes to add the same node should +// not affect the later propose to add new node. +func TestRawNodeProposeAddDuplicateNode3A(t *testing.T) { + s := NewMemoryStorage() + rawNode, err := NewRawNode(newTestConfig(1, []uint64{1}, 10, 1, s)) + if err != nil { + t.Fatal(err) + } + rd := rawNode.Ready() + s.Append(rd.Entries) + rawNode.Advance(rd) + + rawNode.Campaign() + for { + rd = rawNode.Ready() + s.Append(rd.Entries) + if rd.SoftState.Lead == rawNode.Raft.id { + rawNode.Advance(rd) + break + } + rawNode.Advance(rd) + } + + proposeConfChangeAndApply := func(cc pb.ConfChange) { + rawNode.ProposeConfChange(cc) + rd = rawNode.Ready() + s.Append(rd.Entries) + for _, entry := range rd.CommittedEntries { + if entry.EntryType == pb.EntryType_EntryConfChange { + var cc pb.ConfChange + cc.Unmarshal(entry.Data) + rawNode.ApplyConfChange(cc) + } + } + rawNode.Advance(rd) + } + + cc1 := pb.ConfChange{ChangeType: pb.ConfChangeType_AddNode, NodeId: 1} + ccdata1, err := cc1.Marshal() + if err != nil { + t.Fatal(err) + } + proposeConfChangeAndApply(cc1) + + // try to add the same node again + proposeConfChangeAndApply(cc1) + + // the new node join should be ok + cc2 := pb.ConfChange{ChangeType: pb.ConfChangeType_AddNode, NodeId: 2} + ccdata2, err := cc2.Marshal() + if err != nil { + t.Fatal(err) + } + proposeConfChangeAndApply(cc2) + + lastIndex, err := s.LastIndex() + if err != nil { + t.Fatal(err) + } + + // the last three entries should be: ConfChange cc1, cc1, cc2 + entries, err := s.Entries(lastIndex-2, lastIndex+1) + if err != nil { + t.Fatal(err) + } + if len(entries) != 3 { + t.Fatalf("len(entries) = %d, want %d", len(entries), 3) + } + if !bytes.Equal(entries[0].Data, ccdata1) { + t.Errorf("entries[0].Data = %v, want %v", entries[0].Data, ccdata1) + } + if !bytes.Equal(entries[2].Data, ccdata2) { + t.Errorf("entries[2].Data = %v, want %v", entries[2].Data, ccdata2) + } +} + +// TestRawNodeStart ensures that a node can be started correctly, and can accept and commit +// proposals. +func TestRawNodeStart2AC(t *testing.T) { + storage := NewMemoryStorage() + rawNode, err := NewRawNode(newTestConfig(1, []uint64{1}, 10, 1, storage)) + if err != nil { + t.Fatal(err) + } + rawNode.Campaign() + rd := rawNode.Ready() + storage.Append(rd.Entries) + rawNode.Advance(rd) + + rawNode.Propose([]byte("foo")) + rd = rawNode.Ready() + if el := len(rd.Entries); el != len(rd.CommittedEntries) || el != 1 { + t.Errorf("got len(Entries): %+v, len(CommittedEntries): %+v, want %+v", el, len(rd.CommittedEntries), 1) + } + if !reflect.DeepEqual(rd.Entries[0].Data, rd.CommittedEntries[0].Data) || !reflect.DeepEqual(rd.Entries[0].Data, []byte("foo")) { + t.Errorf("got %+v %+v , want %+v", rd.Entries[0].Data, rd.CommittedEntries[0].Data, []byte("foo")) + } + storage.Append(rd.Entries) + rawNode.Advance(rd) + + if rawNode.HasReady() { + t.Errorf("unexpected Ready: %+v", rawNode.Ready()) + } +} + +func TestRawNodeRestart2AC(t *testing.T) { + entries := []pb.Entry{ + {Term: 1, Index: 1}, + {Term: 1, Index: 2, Data: []byte("foo")}, + } + st := pb.HardState{Term: 1, Commit: 1} + + want := Ready{ + Entries: []pb.Entry{}, + // commit up to commit index in st + CommittedEntries: entries[:st.Commit], + } + + storage := NewMemoryStorage() + storage.SetHardState(st) + storage.Append(entries) + rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, storage)) + if err != nil { + t.Fatal(err) + } + rd := rawNode.Ready() + if !reflect.DeepEqual(rd, want) { + t.Errorf("g = %+v,\n w %+v", rd, want) + } + rawNode.Advance(rd) + if rawNode.HasReady() { + t.Errorf("unexpected Ready: %+v", rawNode.Ready()) + } +} + +func TestRawNodeRestartFromSnapshot2C(t *testing.T) { + snap := pb.Snapshot{ + Metadata: &pb.SnapshotMetadata{ + ConfState: &pb.ConfState{Nodes: []uint64{1, 2}}, + Index: 2, + Term: 1, + }, + } + entries := []pb.Entry{ + {Term: 1, Index: 3, Data: []byte("foo")}, + } + st := pb.HardState{Term: 1, Commit: 3} + + want := Ready{ + Entries: []pb.Entry{}, + // commit up to commit index in st + CommittedEntries: entries, + } + + s := NewMemoryStorage() + s.SetHardState(st) + s.ApplySnapshot(snap) + s.Append(entries) + rawNode, err := NewRawNode(newTestConfig(1, nil, 10, 1, s)) + if err != nil { + t.Fatal(err) + } + if rd := rawNode.Ready(); !reflect.DeepEqual(rd, want) { + t.Errorf("g = %+v,\n w %+v", rd, want) + } else { + rawNode.Advance(rd) + } + if rawNode.HasReady() { + t.Errorf("unexpected Ready: %+v", rawNode.HasReady()) + } +} diff --git a/raft/storage.go b/raft/storage.go new file mode 100644 index 00000000..6eafa3bf --- /dev/null +++ b/raft/storage.go @@ -0,0 +1,273 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + "sync" + + "github.com/pingcap-incubator/tinykv/log" + pb "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" +) + +// ErrCompacted is returned by Storage.Entries/Compact when a requested +// index is unavailable because it predates the last snapshot. +var ErrCompacted = errors.New("requested index is unavailable due to compaction") + +// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested +// index is older than the existing snapshot. +var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot") + +// ErrUnavailable is returned by Storage interface when the requested log entries +// are unavailable. +var ErrUnavailable = errors.New("requested entry at index is unavailable") + +// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required +// snapshot is temporarily unavailable. +var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable") + +// Storage is an interface that may be implemented by the application +// to retrieve log entries from storage. +// +// If any Storage method returns an error, the raft instance will +// become inoperable and refuse to participate in elections; the +// application is responsible for cleanup and recovery in this case. +type Storage interface { + // InitialState returns the saved HardState and ConfState information. + InitialState() (pb.HardState, pb.ConfState, error) + // Entries returns a slice of log entries in the range [lo,hi). + // MaxSize limits the total size of the log entries returned, but + // Entries returns at least one entry if any. + Entries(lo, hi uint64) ([]pb.Entry, error) + // Term returns the term of entry i, which must be in the range + // [FirstIndex()-1, LastIndex()]. The term of the entry before + // FirstIndex is retained for matching purposes even though the + // rest of that entry may not be available. + Term(i uint64) (uint64, error) + // LastIndex returns the index of the last entry in the log. + LastIndex() (uint64, error) + // FirstIndex returns the index of the first log entry that is + // possibly available via Entries (older entries have been incorporated + // into the latest Snapshot; if storage only contains the dummy entry the + // first log entry is not available). + FirstIndex() (uint64, error) + // Snapshot returns the most recent snapshot. + // If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable, + // so raft state machine could know that Storage needs some time to prepare + // snapshot and call Snapshot later. + Snapshot() (pb.Snapshot, error) +} + +// MemoryStorage implements the Storage interface backed by an +// in-memory array. +type MemoryStorage struct { + // Protects access to all fields. Most methods of MemoryStorage are + // run on the raft goroutine, but Append() is run on an application + // goroutine. + sync.Mutex + + hardState pb.HardState + snapshot pb.Snapshot + // ents[i] has raft log position i+snapshot.Metadata.Index + ents []pb.Entry +} + +// NewMemoryStorage creates an empty MemoryStorage. +func NewMemoryStorage() *MemoryStorage { + return &MemoryStorage{ + // When starting from scratch populate the list with a dummy entry at term zero. + ents: make([]pb.Entry, 1), + snapshot: pb.Snapshot{Metadata: &pb.SnapshotMetadata{ConfState: &pb.ConfState{}}}, + } +} + +// InitialState implements the Storage interface. +func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) { + return ms.hardState, *ms.snapshot.Metadata.ConfState, nil +} + +// SetHardState saves the current HardState. +func (ms *MemoryStorage) SetHardState(st pb.HardState) error { + ms.Lock() + defer ms.Unlock() + ms.hardState = st + return nil +} + +// Entries implements the Storage interface. +func (ms *MemoryStorage) Entries(lo, hi uint64) ([]pb.Entry, error) { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if lo <= offset { + return nil, ErrCompacted + } + if hi > ms.lastIndex()+1 { + log.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex()) + } + + ents := ms.ents[lo-offset : hi-offset] + if len(ms.ents) == 1 && len(ents) != 0 { + // only contains dummy entries. + return nil, ErrUnavailable + } + return ents, nil +} + +// Term implements the Storage interface. +func (ms *MemoryStorage) Term(i uint64) (uint64, error) { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if i < offset { + return 0, ErrCompacted + } + if int(i-offset) >= len(ms.ents) { + return 0, ErrUnavailable + } + return ms.ents[i-offset].Term, nil +} + +// LastIndex implements the Storage interface. +func (ms *MemoryStorage) LastIndex() (uint64, error) { + ms.Lock() + defer ms.Unlock() + return ms.lastIndex(), nil +} + +func (ms *MemoryStorage) lastIndex() uint64 { + return ms.ents[0].Index + uint64(len(ms.ents)) - 1 +} + +// FirstIndex implements the Storage interface. +func (ms *MemoryStorage) FirstIndex() (uint64, error) { + ms.Lock() + defer ms.Unlock() + return ms.firstIndex(), nil +} + +func (ms *MemoryStorage) firstIndex() uint64 { + return ms.ents[0].Index + 1 +} + +// Snapshot implements the Storage interface. +func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) { + ms.Lock() + defer ms.Unlock() + return ms.snapshot, nil +} + +// ApplySnapshot overwrites the contents of this Storage object with +// those of the given snapshot. +func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error { + ms.Lock() + defer ms.Unlock() + + //handle check for old snapshot being applied + msIndex := ms.snapshot.Metadata.Index + snapIndex := snap.Metadata.Index + if msIndex >= snapIndex { + return ErrSnapOutOfDate + } + + ms.snapshot = snap + ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}} + return nil +} + +// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and +// can be used to reconstruct the state at that point. +// If any configuration changes have been made since the last compaction, +// the result of the last ApplyConfChange must be passed in. +func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) { + ms.Lock() + defer ms.Unlock() + if i <= ms.snapshot.Metadata.Index { + return pb.Snapshot{}, ErrSnapOutOfDate + } + + offset := ms.ents[0].Index + if i > ms.lastIndex() { + log.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex()) + } + + ms.snapshot.Metadata.Index = i + ms.snapshot.Metadata.Term = ms.ents[i-offset].Term + if cs != nil { + ms.snapshot.Metadata.ConfState = cs + } + ms.snapshot.Data = data + return ms.snapshot, nil +} + +// Compact discards all log entries prior to compactIndex. +// It is the application's responsibility to not attempt to compact an index +// greater than raftLog.applied. +func (ms *MemoryStorage) Compact(compactIndex uint64) error { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if compactIndex <= offset { + return ErrCompacted + } + if compactIndex > ms.lastIndex() { + log.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex()) + } + + i := compactIndex - offset + ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i) + ents[0].Index = ms.ents[i].Index + ents[0].Term = ms.ents[i].Term + ents = append(ents, ms.ents[i+1:]...) + ms.ents = ents + return nil +} + +// Append the new entries to storage. +// TODO (xiangli): ensure the entries are continuous and +// entries[0].Index > ms.entries[0].Index +func (ms *MemoryStorage) Append(entries []pb.Entry) error { + if len(entries) == 0 { + return nil + } + + ms.Lock() + defer ms.Unlock() + + first := ms.firstIndex() + last := entries[0].Index + uint64(len(entries)) - 1 + + // shortcut if there is no new entry. + if last < first { + return nil + } + // truncate compacted entries + if first > entries[0].Index { + entries = entries[first-entries[0].Index:] + } + + offset := entries[0].Index - ms.ents[0].Index + switch { + case uint64(len(ms.ents)) > offset: + ms.ents = append([]pb.Entry{}, ms.ents[:offset]...) + ms.ents = append(ms.ents, entries...) + case uint64(len(ms.ents)) == offset: + ms.ents = append(ms.ents, entries...) + default: + log.Panicf("missing log entry [last: %d, append at: %d]", + ms.lastIndex(), entries[0].Index) + } + return nil +} diff --git a/raft/util.go b/raft/util.go new file mode 100644 index 00000000..e4cb4a9a --- /dev/null +++ b/raft/util.go @@ -0,0 +1,129 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "sort" + "strings" + + pb "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" +) + +func min(a, b uint64) uint64 { + if a > b { + return b + } + return a +} + +func max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +// IsEmptyHardState returns true if the given HardState is empty. +func IsEmptyHardState(st pb.HardState) bool { + return isHardStateEqual(st, pb.HardState{}) +} + +// IsEmptySnap returns true if the given Snapshot is empty. +func IsEmptySnap(sp *pb.Snapshot) bool { + if sp == nil || sp.Metadata == nil { + return true + } + return sp.Metadata.Index == 0 +} + +func mustTerm(term uint64, err error) uint64 { + if err != nil { + panic(err) + } + return term +} + +func nodes(r *Raft) []uint64 { + nodes := make([]uint64, 0, len(r.Prs)) + for id := range r.Prs { + nodes = append(nodes, id) + } + sort.Sort(uint64Slice(nodes)) + return nodes +} + +func diffu(a, b string) string { + if a == b { + return "" + } + aname, bname := mustTemp("base", a), mustTemp("other", b) + defer os.Remove(aname) + defer os.Remove(bname) + cmd := exec.Command("diff", "-u", aname, bname) + buf, err := cmd.CombinedOutput() + if err != nil { + if _, ok := err.(*exec.ExitError); ok { + // do nothing + return string(buf) + } + panic(err) + } + return string(buf) +} + +func mustTemp(pre, body string) string { + f, err := ioutil.TempFile("", pre) + if err != nil { + panic(err) + } + _, err = io.Copy(f, strings.NewReader(body)) + if err != nil { + panic(err) + } + f.Close() + return f.Name() +} + +func ltoa(l *RaftLog) string { + s := fmt.Sprintf("committed: %d\n", l.committed) + s += fmt.Sprintf("applied: %d\n", l.applied) + for i, e := range l.entries { + s += fmt.Sprintf("#%d: %+v\n", i, e) + } + return s +} + +type uint64Slice []uint64 + +func (p uint64Slice) Len() int { return len(p) } +func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func IsLocalMsg(msgt pb.MessageType) bool { + return msgt == pb.MessageType_MsgHup || msgt == pb.MessageType_MsgBeat +} + +func IsResponseMsg(msgt pb.MessageType) bool { + return msgt == pb.MessageType_MsgAppendResponse || msgt == pb.MessageType_MsgRequestVoteResponse || msgt == pb.MessageType_MsgHeartbeatResponse +} + +func isHardStateEqual(a, b pb.HardState) bool { + return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit +} diff --git a/scheduler/Makefile b/scheduler/Makefile new file mode 100644 index 00000000..3245de25 --- /dev/null +++ b/scheduler/Makefile @@ -0,0 +1,106 @@ +TEST_PKGS := $(shell find . -iname "*_test.go" -exec dirname {} \; | \ + sort -u | sed -e "s/^\./github.com\/pingcap\/pd/") +INTEGRATION_TEST_PKGS := $(shell find . -iname "*_test.go" -exec dirname {} \; | \ + sort -u | sed -e "s/^\./github.com\/pingcap\/pd/" | grep -E "tests") +BASIC_TEST_PKGS := $(filter-out $(INTEGRATION_TEST_PKGS),$(TEST_PKGS)) + +PACKAGES := go list ./... +PACKAGE_DIRECTORIES := $(PACKAGES) | sed 's|github.com/pingcap-incubator/tinykv/scheduler/||' +GOCHECKER := awk '{ print } END { if (NR > 0) { exit 1 } }' +RETOOL := ./scripts/retool +OVERALLS := overalls + +FAILPOINT_ENABLE := $$(find $$PWD/ -type d | grep -vE "(\.git|\.retools)" | xargs ./scripts/retool do failpoint-ctl enable) +FAILPOINT_DISABLE := $$(find $$PWD/ -type d | grep -vE "(\.git|\.retools)" | xargs ./scripts/retool do failpoint-ctl disable) + +GOVER_MAJOR := $(shell go version | sed -E -e "s/.*go([0-9]+)[.]([0-9]+).*/\1/") +GOVER_MINOR := $(shell go version | sed -E -e "s/.*go([0-9]+)[.]([0-9]+).*/\2/") +GO111 := $(shell [ $(GOVER_MAJOR) -gt 1 ] || [ $(GOVER_MAJOR) -eq 1 ] && [ $(GOVER_MINOR) -ge 11 ]; echo $$?) +ifeq ($(GO111), 1) +$(error "go below 1.11 does not support modules") +endif + +default: build + +all: dev + +dev: build check test + +ci: build check basic-test + +build: pd-server +pd-server: export GO111MODULE=on +pd-server: +ifeq ("$(WITH_RACE)", "1") + CGO_ENABLED=1 go build -race -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -o bin/pd-server cmd/pd-server/main.go +else + CGO_ENABLED=0 go build -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -o bin/pd-server cmd/pd-server/main.go +endif + +test: retool-setup + # testing... + @$(DEADLOCK_ENABLE) + @$(FAILPOINT_ENABLE) + CGO_ENABLED=1 GO111MODULE=on go test -race -cover $(TEST_PKGS) || { $(FAILPOINT_DISABLE); $(DEADLOCK_DISABLE); exit 1; } + @$(FAILPOINT_DISABLE) + @$(DEADLOCK_DISABLE) + +basic-test: + @$(FAILPOINT_ENABLE) + GO111MODULE=on go test $(BASIC_TEST_PKGS) || { $(FAILPOINT_DISABLE); exit 1; } + @$(FAILPOINT_DISABLE) + +# These need to be fixed before they can be ran regularly +check-fail: + CGO_ENABLED=0 ./scripts/retool do gometalinter.v2 --disable-all \ + --enable errcheck \ + $$($(PACKAGE_DIRECTORIES)) + CGO_ENABLED=0 ./scripts/retool do gosec $$($(PACKAGE_DIRECTORIES)) + +check-all: static lint tidy + @echo "checking" + +retool-setup: export GO111MODULE=off +retool-setup: + @which retool >/dev/null 2>&1 || go get github.com/twitchtv/retool + @./scripts/retool sync + +check: retool-setup check-all + +static: export GO111MODULE=on +static: + @ # Not running vet and fmt through metalinter becauase it ends up looking at vendor + gofmt -s -l $$($(PACKAGE_DIRECTORIES)) 2>&1 | $(GOCHECKER) + ./scripts/retool do govet --shadow $$($(PACKAGE_DIRECTORIES)) 2>&1 | $(GOCHECKER) + + CGO_ENABLED=0 ./scripts/retool do golangci-lint run --disable-all --deadline 120s \ + --enable misspell \ + --enable staticcheck \ + --enable ineffassign \ + $$($(PACKAGE_DIRECTORIES)) + +lint: + @echo "linting" + CGO_ENABLED=0 ./scripts/retool do revive -formatter friendly -config revive.toml $$($(PACKAGES)) + +tidy: + @echo "go mod tidy" + GO111MODULE=on go mod tidy + git diff --quiet go.mod go.sum + +travis_coverage: export GO111MODULE=on +travis_coverage: +ifeq ("$(TRAVIS_COVERAGE)", "1") + @$(FAILPOINT_ENABLE) + CGO_ENABLED=1 ./scripts/retool do $(OVERALLS) -concurrency=8 -project=github.com/pingcap-incubator/tinykv/scheduler -covermode=count -ignore='.git,vendor' -- -coverpkg=./... || { $(FAILPOINT_DISABLE); exit 1; } + @$(FAILPOINT_DISABLE) +else + @echo "coverage only runs in travis." +endif + +clean-test: + rm -rf /tmp/test_pd* + rm -rf /tmp/pd-tests* + rm -rf /tmp/test_etcd* + +.PHONY: all ci vendor clean-test tidy diff --git a/scheduler/client/client.go b/scheduler/client/client.go new file mode 100644 index 00000000..a39e2812 --- /dev/null +++ b/scheduler/client/client.go @@ -0,0 +1,782 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package pd + +import ( + "context" + "strings" + "sync" + "time" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/grpcutil" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +// Client is a PD (Placement Driver) client. +// It should not be used after calling Close(). +type Client interface { + // GetClusterID gets the cluster ID from PD. + GetClusterID(ctx context.Context) uint64 + // GetTS gets a timestamp from PD. + GetTS(ctx context.Context) (int64, int64, error) + // GetTSAsync gets a timestamp from PD, without block the caller. + GetTSAsync(ctx context.Context) TSFuture + // GetRegion gets a region and its leader Peer from PD by key. + // The region may expire after split. Caller is responsible for caching and + // taking care of region change. + // Also it may return nil if PD finds no Region for the key temporarily, + // client should retry later. + GetRegion(ctx context.Context, key []byte) (*metapb.Region, *metapb.Peer, error) + // GetPrevRegion gets the previous region and its leader Peer of the region where the key is located. + GetPrevRegion(ctx context.Context, key []byte) (*metapb.Region, *metapb.Peer, error) + // GetRegionByID gets a region and its leader Peer from PD by id. + GetRegionByID(ctx context.Context, regionID uint64) (*metapb.Region, *metapb.Peer, error) + // ScanRegion gets a list of regions, starts from the region that contains key. + // Limit limits the maximum number of regions returned. + // If a region has no leader, corresponding leader will be placed by a peer + // with empty value (PeerID is 0). + ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*metapb.Region, []*metapb.Peer, error) + // GetStore gets a store from PD by store id. + // The store may expire later. Caller is responsible for caching and taking care + // of store change. + GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) + // GetAllStores gets all stores from pd. + // The store may expire later. Caller is responsible for caching and taking care + // of store change. + GetAllStores(ctx context.Context, opts ...GetStoreOption) ([]*metapb.Store, error) + // Update GC safe point. TiKV will check it and do GC themselves if necessary. + // If the given safePoint is less than the current one, it will not be updated. + // Returns the new safePoint after updating. + UpdateGCSafePoint(ctx context.Context, safePoint uint64) (uint64, error) + // ScatterRegion scatters the specified region. Should use it for a batch of regions, + // and the distribution of these regions will be dispersed. + ScatterRegion(ctx context.Context, regionID uint64) error + // GetOperator gets the status of operator of the specified region. + GetOperator(ctx context.Context, regionID uint64) (*schedulerpb.GetOperatorResponse, error) + // Close closes the client. + Close() +} + +// GetStoreOp represents available options when getting stores. +type GetStoreOp struct { + excludeTombstone bool +} + +// GetStoreOption configures GetStoreOp. +type GetStoreOption func(*GetStoreOp) + +// WithExcludeTombstone excludes tombstone stores from the result. +func WithExcludeTombstone() GetStoreOption { + return func(op *GetStoreOp) { op.excludeTombstone = true } +} + +type tsoRequest struct { + start time.Time + ctx context.Context + done chan error + physical int64 + logical int64 +} + +const ( + pdTimeout = 3 * time.Second + updateLeaderTimeout = time.Second // Use a shorter timeout to recover faster from network isolation. + maxMergeTSORequests = 10000 + maxInitClusterRetries = 100 +) + +var ( + // errFailInitClusterID is returned when failed to load clusterID from all supplied PD addresses. + errFailInitClusterID = errors.New("[pd] failed to get cluster id") + // errClosing is returned when request is canceled when client is closing. + errClosing = errors.New("[pd] closing") + // errTSOLength is returned when the number of response timestamps is inconsistent with request. + errTSOLength = errors.New("[pd] tso length in rpc response is incorrect") +) + +type client struct { + urls []string + clusterID uint64 + tsoRequests chan *tsoRequest + + connMu struct { + sync.RWMutex + clientConns map[string]*grpc.ClientConn + leader string + } + + tsDeadlineCh chan deadline + checkLeaderCh chan struct{} + + wg sync.WaitGroup + ctx context.Context + cancel context.CancelFunc + + security SecurityOption +} + +// SecurityOption records options about tls +type SecurityOption struct { + CAPath string + CertPath string + KeyPath string +} + +// NewClient creates a PD client. +func NewClient(pdAddrs []string, security SecurityOption) (Client, error) { + log.Info("[pd] create pd client with endpoints", zap.Strings("pd-address", pdAddrs)) + ctx, cancel := context.WithCancel(context.Background()) + c := &client{ + urls: addrsToUrls(pdAddrs), + tsoRequests: make(chan *tsoRequest, maxMergeTSORequests), + tsDeadlineCh: make(chan deadline, 1), + checkLeaderCh: make(chan struct{}, 1), + ctx: ctx, + cancel: cancel, + security: security, + } + c.connMu.clientConns = make(map[string]*grpc.ClientConn) + + if err := c.initRetry(c.initClusterID); err != nil { + return nil, err + } + if err := c.initRetry(c.updateLeader); err != nil { + return nil, err + } + log.Info("[pd] init cluster id", zap.Uint64("cluster-id", c.clusterID)) + + c.wg.Add(3) + go c.tsLoop() + go c.tsCancelLoop() + go c.leaderLoop() + + return c, nil +} + +func (c *client) updateURLs(members []*schedulerpb.Member) { + urls := make([]string, 0, len(members)) + for _, m := range members { + urls = append(urls, m.GetClientUrls()...) + } + c.urls = urls +} + +func (c *client) initRetry(f func() error) error { + var err error + for i := 0; i < maxInitClusterRetries; i++ { + if err = f(); err == nil { + return nil + } + time.Sleep(time.Second) + } + return errors.WithStack(err) +} + +func (c *client) initClusterID() error { + ctx, cancel := context.WithCancel(c.ctx) + defer cancel() + for _, u := range c.urls { + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, pdTimeout) + members, err := c.getMembers(timeoutCtx, u) + timeoutCancel() + if err != nil || members.GetHeader() == nil { + log.Warn("[pd] failed to get cluster id", zap.String("url", u), zap.Error(err)) + continue + } + c.clusterID = members.GetHeader().GetClusterId() + return nil + } + return errors.WithStack(errFailInitClusterID) +} + +func (c *client) updateLeader() error { + for _, u := range c.urls { + ctx, cancel := context.WithTimeout(c.ctx, updateLeaderTimeout) + members, err := c.getMembers(ctx, u) + cancel() + if err != nil || members.GetLeader() == nil || len(members.GetLeader().GetClientUrls()) == 0 { + select { + case <-c.ctx.Done(): + return errors.WithStack(err) + default: + continue + } + } + c.updateURLs(members.GetMembers()) + return c.switchLeader(members.GetLeader().GetClientUrls()) + } + return errors.Errorf("failed to get leader from %v", c.urls) +} + +func (c *client) getMembers(ctx context.Context, url string) (*schedulerpb.GetMembersResponse, error) { + cc, err := c.getOrCreateGRPCConn(url) + if err != nil { + return nil, err + } + members, err := schedulerpb.NewSchedulerClient(cc).GetMembers(ctx, &schedulerpb.GetMembersRequest{}) + if err != nil { + return nil, errors.WithStack(err) + } + return members, nil +} + +func (c *client) switchLeader(addrs []string) error { + // FIXME: How to safely compare leader urls? For now, only allows one client url. + addr := addrs[0] + + c.connMu.RLock() + oldLeader := c.connMu.leader + c.connMu.RUnlock() + + if addr == oldLeader { + return nil + } + + log.Info("[pd] switch leader", zap.String("new-leader", addr), zap.String("old-leader", oldLeader)) + if _, err := c.getOrCreateGRPCConn(addr); err != nil { + return err + } + + c.connMu.Lock() + defer c.connMu.Unlock() + c.connMu.leader = addr + return nil +} + +func (c *client) getOrCreateGRPCConn(addr string) (*grpc.ClientConn, error) { + c.connMu.RLock() + conn, ok := c.connMu.clientConns[addr] + c.connMu.RUnlock() + if ok { + return conn, nil + } + + cc, err := grpcutil.GetClientConn(addr, c.security.CAPath, c.security.CertPath, c.security.KeyPath) + if err != nil { + return nil, errors.WithStack(err) + } + c.connMu.Lock() + defer c.connMu.Unlock() + if old, ok := c.connMu.clientConns[addr]; ok { + cc.Close() + return old, nil + } + + c.connMu.clientConns[addr] = cc + return cc, nil +} + +func (c *client) leaderLoop() { + defer c.wg.Done() + + ctx, cancel := context.WithCancel(c.ctx) + defer cancel() + + for { + select { + case <-c.checkLeaderCh: + case <-time.After(time.Minute): + case <-ctx.Done(): + return + } + + if err := c.updateLeader(); err != nil { + log.Error("[pd] failed updateLeader", zap.Error(err)) + } + } +} + +type deadline struct { + timer <-chan time.Time + done chan struct{} + cancel context.CancelFunc +} + +func (c *client) tsCancelLoop() { + defer c.wg.Done() + + ctx, cancel := context.WithCancel(c.ctx) + defer cancel() + + for { + select { + case d := <-c.tsDeadlineCh: + select { + case <-d.timer: + log.Error("tso request is canceled due to timeout") + d.cancel() + case <-d.done: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } +} + +func (c *client) tsLoop() { + defer c.wg.Done() + + loopCtx, loopCancel := context.WithCancel(c.ctx) + defer loopCancel() + + var requests []*tsoRequest + var opts []opentracing.StartSpanOption + var stream schedulerpb.Scheduler_TsoClient + var cancel context.CancelFunc + + for { + var err error + + if stream == nil { + var ctx context.Context + ctx, cancel = context.WithCancel(loopCtx) + stream, err = c.leaderClient().Tso(ctx) + if err != nil { + select { + case <-loopCtx.Done(): + cancel() + return + default: + } + log.Error("[pd] create tso stream error", zap.Error(err)) + c.ScheduleCheckLeader() + cancel() + c.revokeTSORequest(errors.WithStack(err)) + select { + case <-time.After(time.Second): + case <-loopCtx.Done(): + return + } + continue + } + } + + select { + case first := <-c.tsoRequests: + requests = append(requests, first) + pending := len(c.tsoRequests) + for i := 0; i < pending; i++ { + requests = append(requests, <-c.tsoRequests) + } + done := make(chan struct{}) + dl := deadline{ + timer: time.After(pdTimeout), + done: done, + cancel: cancel, + } + select { + case c.tsDeadlineCh <- dl: + case <-loopCtx.Done(): + cancel() + return + } + opts = extractSpanReference(requests, opts[:0]) + err = c.processTSORequests(stream, requests, opts) + close(done) + requests = requests[:0] + case <-loopCtx.Done(): + cancel() + return + } + + if err != nil { + select { + case <-loopCtx.Done(): + cancel() + return + default: + } + log.Error("[pd] getTS error", zap.Error(err)) + c.ScheduleCheckLeader() + cancel() + stream, cancel = nil, nil + } + } +} + +func extractSpanReference(requests []*tsoRequest, opts []opentracing.StartSpanOption) []opentracing.StartSpanOption { + for _, req := range requests { + if span := opentracing.SpanFromContext(req.ctx); span != nil { + opts = append(opts, opentracing.ChildOf(span.Context())) + } + } + return opts +} + +func (c *client) processTSORequests(stream schedulerpb.Scheduler_TsoClient, requests []*tsoRequest, opts []opentracing.StartSpanOption) error { + if len(opts) > 0 { + span := opentracing.StartSpan("pdclient.processTSORequests", opts...) + defer span.Finish() + } + count := len(requests) + req := &schedulerpb.TsoRequest{ + Header: c.requestHeader(), + Count: uint32(count), + } + + if err := stream.Send(req); err != nil { + err = errors.WithStack(err) + c.finishTSORequest(requests, 0, 0, err) + return err + } + resp, err := stream.Recv() + if err != nil { + err = errors.WithStack(err) + c.finishTSORequest(requests, 0, 0, err) + return err + } + + if resp.GetCount() != uint32(len(requests)) { + err = errors.WithStack(errTSOLength) + c.finishTSORequest(requests, 0, 0, err) + return err + } + + physical, logical := resp.GetTimestamp().GetPhysical(), resp.GetTimestamp().GetLogical() + // Server returns the highest ts. + logical -= int64(resp.GetCount() - 1) + c.finishTSORequest(requests, physical, logical, nil) + return nil +} + +func (c *client) finishTSORequest(requests []*tsoRequest, physical, firstLogical int64, err error) { + for i := 0; i < len(requests); i++ { + if span := opentracing.SpanFromContext(requests[i].ctx); span != nil { + span.Finish() + } + requests[i].physical, requests[i].logical = physical, firstLogical+int64(i) + requests[i].done <- err + } +} + +func (c *client) revokeTSORequest(err error) { + n := len(c.tsoRequests) + for i := 0; i < n; i++ { + req := <-c.tsoRequests + req.done <- err + } +} + +func (c *client) Close() { + c.cancel() + c.wg.Wait() + + c.revokeTSORequest(errors.WithStack(errClosing)) + + c.connMu.Lock() + defer c.connMu.Unlock() + for _, cc := range c.connMu.clientConns { + if err := cc.Close(); err != nil { + log.Error("[pd] failed close grpc clientConn", zap.Error(err)) + } + } +} + +// leaderClient gets the client of current PD leader. +func (c *client) leaderClient() schedulerpb.SchedulerClient { + c.connMu.RLock() + defer c.connMu.RUnlock() + + return schedulerpb.NewSchedulerClient(c.connMu.clientConns[c.connMu.leader]) +} + +func (c *client) ScheduleCheckLeader() { + select { + case c.checkLeaderCh <- struct{}{}: + default: + } +} + +func (c *client) GetClusterID(context.Context) uint64 { + return c.clusterID +} + +// For testing use. +func (c *client) GetLeaderAddr() string { + c.connMu.RLock() + defer c.connMu.RUnlock() + return c.connMu.leader +} + +// For testing use. It should only be called when the client is closed. +func (c *client) GetURLs() []string { + return c.urls +} + +var tsoReqPool = sync.Pool{ + New: func() interface{} { + return &tsoRequest{ + done: make(chan error, 1), + } + }, +} + +func (c *client) GetTSAsync(ctx context.Context) TSFuture { + if span := opentracing.SpanFromContext(ctx); span != nil { + span = opentracing.StartSpan("GetTSAsync", opentracing.ChildOf(span.Context())) + ctx = opentracing.ContextWithSpan(ctx, span) + } + req := tsoReqPool.Get().(*tsoRequest) + req.start = time.Now() + req.ctx = ctx + req.physical = 0 + req.logical = 0 + c.tsoRequests <- req + + return req +} + +// TSFuture is a future which promises to return a TSO. +type TSFuture interface { + // Wait gets the physical and logical time, it would block caller if data is not available yet. + Wait() (int64, int64, error) +} + +func (req *tsoRequest) Wait() (physical int64, logical int64, err error) { + // If tso command duration is observed very high, the reason could be it + // takes too long for Wait() be called. + select { + case err = <-req.done: + err = errors.WithStack(err) + defer tsoReqPool.Put(req) + if err != nil { + return 0, 0, err + } + physical, logical = req.physical, req.logical + return + case <-req.ctx.Done(): + return 0, 0, errors.WithStack(req.ctx.Err()) + } +} + +func (c *client) GetTS(ctx context.Context) (physical int64, logical int64, err error) { + resp := c.GetTSAsync(ctx) + return resp.Wait() +} + +func (c *client) GetRegion(ctx context.Context, key []byte) (*metapb.Region, *metapb.Peer, error) { + if span := opentracing.SpanFromContext(ctx); span != nil { + span = opentracing.StartSpan("pdclient.GetRegion", opentracing.ChildOf(span.Context())) + defer span.Finish() + } + + ctx, cancel := context.WithTimeout(ctx, pdTimeout) + resp, err := c.leaderClient().GetRegion(ctx, &schedulerpb.GetRegionRequest{ + Header: c.requestHeader(), + RegionKey: key, + }) + cancel() + + if err != nil { + c.ScheduleCheckLeader() + return nil, nil, errors.WithStack(err) + } + return resp.GetRegion(), resp.GetLeader(), nil +} + +func (c *client) GetPrevRegion(ctx context.Context, key []byte) (*metapb.Region, *metapb.Peer, error) { + if span := opentracing.SpanFromContext(ctx); span != nil { + span = opentracing.StartSpan("pdclient.GetPrevRegion", opentracing.ChildOf(span.Context())) + defer span.Finish() + } + + ctx, cancel := context.WithTimeout(ctx, pdTimeout) + resp, err := c.leaderClient().GetPrevRegion(ctx, &schedulerpb.GetRegionRequest{ + Header: c.requestHeader(), + RegionKey: key, + }) + cancel() + + if err != nil { + c.ScheduleCheckLeader() + return nil, nil, errors.WithStack(err) + } + return resp.GetRegion(), resp.GetLeader(), nil +} + +func (c *client) GetRegionByID(ctx context.Context, regionID uint64) (*metapb.Region, *metapb.Peer, error) { + if span := opentracing.SpanFromContext(ctx); span != nil { + span = opentracing.StartSpan("pdclient.GetRegionByID", opentracing.ChildOf(span.Context())) + defer span.Finish() + } + + ctx, cancel := context.WithTimeout(ctx, pdTimeout) + resp, err := c.leaderClient().GetRegionByID(ctx, &schedulerpb.GetRegionByIDRequest{ + Header: c.requestHeader(), + RegionId: regionID, + }) + cancel() + + if err != nil { + c.ScheduleCheckLeader() + return nil, nil, errors.WithStack(err) + } + return resp.GetRegion(), resp.GetLeader(), nil +} + +func (c *client) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*metapb.Region, []*metapb.Peer, error) { + if span := opentracing.SpanFromContext(ctx); span != nil { + span = opentracing.StartSpan("pdclient.ScanRegions", opentracing.ChildOf(span.Context())) + defer span.Finish() + } + ctx, cancel := context.WithTimeout(ctx, pdTimeout) + resp, err := c.leaderClient().ScanRegions(ctx, &schedulerpb.ScanRegionsRequest{ + Header: c.requestHeader(), + StartKey: key, + EndKey: endKey, + Limit: int32(limit), + }) + cancel() + if err != nil { + c.ScheduleCheckLeader() + return nil, nil, errors.WithStack(err) + } + return resp.GetRegions(), resp.GetLeaders(), nil +} + +func (c *client) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) { + if span := opentracing.SpanFromContext(ctx); span != nil { + span = opentracing.StartSpan("pdclient.GetStore", opentracing.ChildOf(span.Context())) + defer span.Finish() + } + + ctx, cancel := context.WithTimeout(ctx, pdTimeout) + resp, err := c.leaderClient().GetStore(ctx, &schedulerpb.GetStoreRequest{ + Header: c.requestHeader(), + StoreId: storeID, + }) + cancel() + + if err != nil { + c.ScheduleCheckLeader() + return nil, errors.WithStack(err) + } + store := resp.GetStore() + if store == nil { + return nil, errors.New("[pd] store field in rpc response not set") + } + if store.GetState() == metapb.StoreState_Tombstone { + return nil, nil + } + return store, nil +} + +func (c *client) GetAllStores(ctx context.Context, opts ...GetStoreOption) ([]*metapb.Store, error) { + // Applies options + options := &GetStoreOp{} + for _, opt := range opts { + opt(options) + } + + if span := opentracing.SpanFromContext(ctx); span != nil { + span = opentracing.StartSpan("pdclient.GetAllStores", opentracing.ChildOf(span.Context())) + defer span.Finish() + } + + ctx, cancel := context.WithTimeout(ctx, pdTimeout) + resp, err := c.leaderClient().GetAllStores(ctx, &schedulerpb.GetAllStoresRequest{ + Header: c.requestHeader(), + ExcludeTombstoneStores: options.excludeTombstone, + }) + cancel() + + if err != nil { + c.ScheduleCheckLeader() + return nil, errors.WithStack(err) + } + stores := resp.GetStores() + return stores, nil +} + +func (c *client) UpdateGCSafePoint(ctx context.Context, safePoint uint64) (uint64, error) { + if span := opentracing.SpanFromContext(ctx); span != nil { + span = opentracing.StartSpan("pdclient.UpdateGCSafePoint", opentracing.ChildOf(span.Context())) + defer span.Finish() + } + + ctx, cancel := context.WithTimeout(ctx, pdTimeout) + resp, err := c.leaderClient().UpdateGCSafePoint(ctx, &schedulerpb.UpdateGCSafePointRequest{ + Header: c.requestHeader(), + SafePoint: safePoint, + }) + cancel() + + if err != nil { + c.ScheduleCheckLeader() + return 0, errors.WithStack(err) + } + return resp.GetNewSafePoint(), nil +} + +func (c *client) ScatterRegion(ctx context.Context, regionID uint64) error { + if span := opentracing.SpanFromContext(ctx); span != nil { + span = opentracing.StartSpan("pdclient.ScatterRegion", opentracing.ChildOf(span.Context())) + defer span.Finish() + } + + ctx, cancel := context.WithTimeout(ctx, pdTimeout) + resp, err := c.leaderClient().ScatterRegion(ctx, &schedulerpb.ScatterRegionRequest{ + Header: c.requestHeader(), + RegionId: regionID, + }) + cancel() + if err != nil { + return err + } + if resp.Header.GetError() != nil { + return errors.Errorf("scatter region %d failed: %s", regionID, resp.Header.GetError().String()) + } + return nil +} + +func (c *client) GetOperator(ctx context.Context, regionID uint64) (*schedulerpb.GetOperatorResponse, error) { + if span := opentracing.SpanFromContext(ctx); span != nil { + span = opentracing.StartSpan("pdclient.GetOperator", opentracing.ChildOf(span.Context())) + defer span.Finish() + } + + ctx, cancel := context.WithTimeout(ctx, pdTimeout) + defer cancel() + return c.leaderClient().GetOperator(ctx, &schedulerpb.GetOperatorRequest{ + Header: c.requestHeader(), + RegionId: regionID, + }) +} + +func (c *client) requestHeader() *schedulerpb.RequestHeader { + return &schedulerpb.RequestHeader{ + ClusterId: c.clusterID, + } +} + +func addrsToUrls(addrs []string) []string { + // Add default schema "http://" to addrs. + urls := make([]string, 0, len(addrs)) + for _, addr := range addrs { + if strings.Contains(addr, "://") { + urls = append(urls, addr) + } else { + urls = append(urls, "http://"+addr) + } + } + return urls +} diff --git a/scheduler/client/client_test.go b/scheduler/client/client_test.go new file mode 100644 index 00000000..da9cdda9 --- /dev/null +++ b/scheduler/client/client_test.go @@ -0,0 +1,420 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package pd + +import ( + "context" + "math" + "sync" + "testing" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockid" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/testutil" + "github.com/pingcap-incubator/tinykv/scheduler/server" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + . "github.com/pingcap/check" +) + +func TestClient(t *testing.T) { + server.EnableZap = true + TestingT(t) +} + +var _ = Suite(&testClientSuite{}) + +type idAllocator struct { + allocator *mockid.IDAllocator +} + +func (i *idAllocator) alloc() uint64 { + id, _ := i.allocator.Alloc() + return id +} + +var ( + regionIDAllocator = &idAllocator{allocator: &mockid.IDAllocator{}} + // Note: IDs below are entirely arbitrary. They are only for checking + // whether GetRegion/GetStore works. + // If we alloc ID in client in the future, these IDs must be updated. + stores = []*metapb.Store{ + {Id: 1, + Address: "localhost:1", + }, + {Id: 2, + Address: "localhost:2", + }, + {Id: 3, + Address: "localhost:3", + }, + {Id: 4, + Address: "localhost:4", + }, + } + + peers = []*metapb.Peer{ + {Id: regionIDAllocator.alloc(), + StoreId: stores[0].GetId(), + }, + {Id: regionIDAllocator.alloc(), + StoreId: stores[1].GetId(), + }, + {Id: regionIDAllocator.alloc(), + StoreId: stores[2].GetId(), + }, + } +) + +type testClientSuite struct { + cleanup server.CleanupFunc + srv *server.Server + client Client + grpcSchedulerClient schedulerpb.SchedulerClient + regionHeartbeat schedulerpb.Scheduler_RegionHeartbeatClient +} + +func (s *testClientSuite) SetUpSuite(c *C) { + var err error + s.srv, s.cleanup, err = server.NewTestServer(c) + c.Assert(err, IsNil) + s.grpcSchedulerClient = testutil.MustNewGrpcClient(c, s.srv.GetAddr()) + + mustWaitLeader(c, map[string]*server.Server{s.srv.GetAddr(): s.srv}) + bootstrapServer(c, newHeader(s.srv), s.grpcSchedulerClient) + + s.client, err = NewClient(s.srv.GetEndpoints(), SecurityOption{}) + c.Assert(err, IsNil) + s.regionHeartbeat, err = s.grpcSchedulerClient.RegionHeartbeat(context.Background()) + c.Assert(err, IsNil) + cluster := s.srv.GetRaftCluster() + c.Assert(cluster, NotNil) + for _, store := range stores { + s.srv.PutStore(context.Background(), &schedulerpb.PutStoreRequest{Header: newHeader(s.srv), Store: store}) + } +} + +func (s *testClientSuite) TearDownSuite(c *C) { + s.client.Close() + s.cleanup() +} + +func mustWaitLeader(c *C, svrs map[string]*server.Server) *server.Server { + for i := 0; i < 500; i++ { + for _, s := range svrs { + if !s.IsClosed() && s.GetMember().IsLeader() { + return s + } + } + time.Sleep(100 * time.Millisecond) + } + c.Fatal("no leader") + return nil +} + +func newHeader(srv *server.Server) *schedulerpb.RequestHeader { + return &schedulerpb.RequestHeader{ + ClusterId: srv.ClusterID(), + } +} + +func bootstrapServer(c *C, header *schedulerpb.RequestHeader, client schedulerpb.SchedulerClient) { + req := &schedulerpb.BootstrapRequest{ + Header: header, + Store: stores[0], + } + _, err := client.Bootstrap(context.Background(), req) + c.Assert(err, IsNil) +} + +func (s *testClientSuite) TestTSO(c *C) { + var tss []int64 + for i := 0; i < 100; i++ { + p, l, err := s.client.GetTS(context.Background()) + c.Assert(err, IsNil) + tss = append(tss, p<<18+l) + } + + var last int64 + for _, ts := range tss { + c.Assert(ts, Greater, last) + last = ts + } +} + +func (s *testClientSuite) TestTSORace(c *C) { + var wg sync.WaitGroup + begin := make(chan struct{}) + count := 10 + wg.Add(count) + for i := 0; i < count; i++ { + go func() { + <-begin + for i := 0; i < 100; i++ { + _, _, err := s.client.GetTS(context.Background()) + c.Assert(err, IsNil) + } + wg.Done() + }() + } + close(begin) + wg.Wait() +} + +func (s *testClientSuite) TestGetRegion(c *C) { + regionID := regionIDAllocator.alloc() + region := &metapb.Region{ + Id: regionID, + RegionEpoch: &metapb.RegionEpoch{ + ConfVer: 1, + Version: 1, + }, + Peers: peers, + } + req := &schedulerpb.RegionHeartbeatRequest{ + Header: newHeader(s.srv), + Region: region, + Leader: peers[0], + } + err := s.regionHeartbeat.Send(req) + c.Assert(err, IsNil) + + testutil.WaitUntil(c, func(c *C) bool { + r, leader, err := s.client.GetRegion(context.Background(), []byte("a")) + c.Assert(err, IsNil) + return c.Check(r, DeepEquals, region) && + c.Check(leader, DeepEquals, peers[0]) + }) + c.Succeed() +} + +func (s *testClientSuite) TestGetPrevRegion(c *C) { + regionLen := 10 + regions := make([]*metapb.Region, 0, regionLen) + for i := 0; i < regionLen; i++ { + regionID := regionIDAllocator.alloc() + r := &metapb.Region{ + Id: regionID, + RegionEpoch: &metapb.RegionEpoch{ + ConfVer: 1, + Version: 1, + }, + StartKey: []byte{byte(i)}, + EndKey: []byte{byte(i + 1)}, + Peers: peers, + } + regions = append(regions, r) + req := &schedulerpb.RegionHeartbeatRequest{ + Header: newHeader(s.srv), + Region: r, + Leader: peers[0], + } + err := s.regionHeartbeat.Send(req) + c.Assert(err, IsNil) + } + for i := 0; i < 20; i++ { + testutil.WaitUntil(c, func(c *C) bool { + r, leader, err := s.client.GetPrevRegion(context.Background(), []byte{byte(i)}) + c.Assert(err, IsNil) + if i > 0 && i < regionLen { + return c.Check(leader, DeepEquals, peers[0]) && + c.Check(r, DeepEquals, regions[i-1]) + } + return c.Check(leader, IsNil) && + c.Check(r, IsNil) + }) + } + c.Succeed() +} + +func (s *testClientSuite) TestScanRegions(c *C) { + regionLen := 10 + regions := make([]*metapb.Region, 0, regionLen) + for i := 0; i < regionLen; i++ { + regionID := regionIDAllocator.alloc() + r := &metapb.Region{ + Id: regionID, + RegionEpoch: &metapb.RegionEpoch{ + ConfVer: 1, + Version: 1, + }, + StartKey: []byte{byte(i)}, + EndKey: []byte{byte(i + 1)}, + Peers: peers, + } + regions = append(regions, r) + req := &schedulerpb.RegionHeartbeatRequest{ + Header: newHeader(s.srv), + Region: r, + Leader: peers[0], + } + err := s.regionHeartbeat.Send(req) + c.Assert(err, IsNil) + } + + // Wait for region heartbeats. + testutil.WaitUntil(c, func(c *C) bool { + scanRegions, _, err := s.client.ScanRegions(context.Background(), []byte{0}, nil, 10) + return err == nil && len(scanRegions) == 10 + }) + + // Set leader of region3 to nil. + region3 := core.NewRegionInfo(regions[3], nil) + s.srv.GetRaftCluster().HandleRegionHeartbeat(region3) + + check := func(start, end []byte, limit int, expect []*metapb.Region) { + scanRegions, leaders, err := s.client.ScanRegions(context.Background(), start, end, limit) + c.Assert(err, IsNil) + c.Assert(scanRegions, HasLen, len(expect)) + c.Assert(leaders, HasLen, len(expect)) + c.Log("scanRegions", scanRegions) + c.Log("expect", expect) + c.Log("scanLeaders", leaders) + for i := range expect { + c.Assert(scanRegions[i], DeepEquals, expect[i]) + if scanRegions[i].GetId() == region3.GetID() { + c.Assert(leaders[i], DeepEquals, &metapb.Peer{}) + } else { + c.Assert(leaders[i], DeepEquals, expect[i].Peers[0]) + } + } + } + + check([]byte{0}, nil, 10, regions) + check([]byte{1}, nil, 5, regions[1:6]) + check([]byte{100}, nil, 1, nil) + check([]byte{1}, []byte{6}, 0, regions[1:6]) + check([]byte{1}, []byte{6}, 2, regions[1:3]) +} + +func (s *testClientSuite) TestGetRegionByID(c *C) { + regionID := regionIDAllocator.alloc() + region := &metapb.Region{ + Id: regionID, + RegionEpoch: &metapb.RegionEpoch{ + ConfVer: 1, + Version: 1, + }, + Peers: peers, + } + req := &schedulerpb.RegionHeartbeatRequest{ + Header: newHeader(s.srv), + Region: region, + Leader: peers[0], + } + err := s.regionHeartbeat.Send(req) + c.Assert(err, IsNil) + + testutil.WaitUntil(c, func(c *C) bool { + r, leader, err := s.client.GetRegionByID(context.Background(), regionID) + c.Assert(err, IsNil) + return c.Check(r, DeepEquals, region) && + c.Check(leader, DeepEquals, peers[0]) + }) + c.Succeed() +} + +func (s *testClientSuite) TestGetStore(c *C) { + cluster := s.srv.GetRaftCluster() + c.Assert(cluster, NotNil) + store := stores[0] + + // Get an up store should be OK. + n, err := s.client.GetStore(context.Background(), store.GetId()) + c.Assert(err, IsNil) + c.Assert(n, DeepEquals, store) + + stores, err := s.client.GetAllStores(context.Background()) + c.Assert(err, IsNil) + c.Assert(stores, DeepEquals, stores) + + // Mark the store as offline. + err = cluster.RemoveStore(store.GetId()) + c.Assert(err, IsNil) + offlineStore := proto.Clone(store).(*metapb.Store) + offlineStore.State = metapb.StoreState_Offline + + // Get an offline store should be OK. + n, err = s.client.GetStore(context.Background(), store.GetId()) + c.Assert(err, IsNil) + c.Assert(n, DeepEquals, offlineStore) + + // Should return offline stores. + contains := false + stores, err = s.client.GetAllStores(context.Background()) + c.Assert(err, IsNil) + for _, store := range stores { + if store.GetId() == offlineStore.GetId() { + contains = true + c.Assert(store, DeepEquals, offlineStore) + } + } + c.Assert(contains, IsTrue) + + // Mark the store as tombstone. + err = cluster.BuryStore(store.GetId(), true) + c.Assert(err, IsNil) + tombstoneStore := proto.Clone(store).(*metapb.Store) + tombstoneStore.State = metapb.StoreState_Tombstone + + // Get a tombstone store should fail. + n, err = s.client.GetStore(context.Background(), store.GetId()) + c.Assert(err, IsNil) + c.Assert(n, IsNil) + + // Should return tombstone stores. + contains = false + stores, err = s.client.GetAllStores(context.Background()) + c.Assert(err, IsNil) + for _, store := range stores { + if store.GetId() == tombstoneStore.GetId() { + contains = true + c.Assert(store, DeepEquals, tombstoneStore) + } + } + c.Assert(contains, IsTrue) + + // Should not return tombstone stores. + stores, err = s.client.GetAllStores(context.Background(), WithExcludeTombstone()) + c.Assert(err, IsNil) + for _, store := range stores { + c.Assert(store, Not(Equals), tombstoneStore) + } +} + +func (s *testClientSuite) checkGCSafePoint(c *C, expectedSafePoint uint64) { + req := &schedulerpb.GetGCSafePointRequest{ + Header: newHeader(s.srv), + } + resp, err := s.srv.GetGCSafePoint(context.Background(), req) + c.Assert(err, IsNil) + c.Assert(resp.SafePoint, Equals, expectedSafePoint) +} + +func (s *testClientSuite) TestUpdateGCSafePoint(c *C) { + s.checkGCSafePoint(c, 0) + for _, safePoint := range []uint64{0, 1, 2, 3, 233, 23333, 233333333333, math.MaxUint64} { + newSafePoint, err := s.client.UpdateGCSafePoint(context.Background(), safePoint) + c.Assert(err, IsNil) + c.Assert(newSafePoint, Equals, safePoint) + s.checkGCSafePoint(c, safePoint) + } + // If the new safe point is less than the old one, it should not be updated. + newSafePoint, err := s.client.UpdateGCSafePoint(context.Background(), 1) + c.Assert(newSafePoint, Equals, uint64(math.MaxUint64)) + c.Assert(err, IsNil) + s.checkGCSafePoint(c, math.MaxUint64) +} diff --git a/scheduler/conf/config.toml b/scheduler/conf/config.toml new file mode 100644 index 00000000..a3ad3eb0 --- /dev/null +++ b/scheduler/conf/config.toml @@ -0,0 +1,81 @@ +# PD Configuration. + +name = "pd" +data-dir = "default.pd" + +client-urls = "http://127.0.0.1:2379" +## if not set, use ${client-urls} +advertise-client-urls = "" + +peer-urls = "http://127.0.0.1:2380" +## if not set, use ${peer-urls} +advertise-peer-urls = "" + +initial-cluster = "pd=http://127.0.0.1:2380" +initial-cluster-state = "new" + +lease = 3 +tso-save-interval = "3s" + +[security] +## Path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty +cacert-path = "" +## Path of file that contains X509 certificate in PEM format. +cert-path = "" +## Path of file that contains X509 key in PEM format. +key-path = "" + +[log] +level = "info" + +## log format, one of json, text, console +# format = "text" + +## disable automatic timestamps in output +# disable-timestamp = false + +# file logging +[log.file] +# filename = "" +## max log file size in MB +# max-size = 300 +## max log file keep days +# max-days = 28 +## maximum number of old log files to retain +# max-backups = 7 +## rotate log by day +# log-rotate = true + +[schedule] +split-merge-interval = "1h" +max-snapshot-count = 3 +max-pending-peer-count = 16 +max-store-down-time = "30m" +leader-schedule-limit = 4 +region-schedule-limit = 2048 +replica-schedule-limit = 64 +## There are some strategics supported: ["count", "size"], default: "count" +# leader-schedule-strategy = "count" +## When the score difference between the leader or Region of the two stores is +## less than specified multiple times of the Region size, it is considered in balance by PD. +## If it equals 0.0, PD will automatically adjust it. +# tolerant-size-ratio = 0.0 + +## This three parameters control the merge scheduler behavior. +## If it is true, it means a region can only be merged into the next region of it. +# enable-one-way-merge = false +## There are some strategics supported: ["table", "raw", "txn"], default: "table" +# merge-strategy = "table" +## If it is true, it means two region within different tables can be merged. +## This option only works when merge strategy is "table". +# enable-cross-table-merge = false + +## customized schedulers, the format is as below +## if empty, it will use balance-leader, balance-region as default +# [[schedule.schedulers]] +# type = "evict-leader" +# args = ["1"] + +[replication] +## The number of replicas for each region. +max-replicas = 3 diff --git a/scheduler/conf/simconfig.toml b/scheduler/conf/simconfig.toml new file mode 100644 index 00000000..68ab11e6 --- /dev/null +++ b/scheduler/conf/simconfig.toml @@ -0,0 +1,32 @@ +# PD Simulator Configuration + +[tick] +## the tick interval when starting PD inside (default: "100ms") +sim-tick-interval = "100ms" + +[store] +## the capacity size of a new store in GB (default: 1024) +store-capacity = 1024 +## the available size of a new store in GB (default: 1024) +store-available = 1024 +## the io rate of a new store in MB/s (default: 40) +store-io-per-second = 40 +## the version of a new store (default: "2.1.0") +store-version = "2.1.0" + +## the meaning of these configurations below are similar with config.toml +[server] +lease = 1 +tso-save-interval = "200ms" +tick-interval = "100ms" +election-interval = "3s" +leader-priority-check-interval = "100ms" + +[server.schedule] +split-merge-interval = "1ms" +max-store-down-time = "30s" +leader-schedule-limit = 32 +region-schedule-limit = 128 +replica-schedule-limit = 32 +merge-schedule-limit = 32 +store-balance-rate = 512.0 diff --git a/scheduler/main.go b/scheduler/main.go new file mode 100644 index 00000000..831e1f1d --- /dev/null +++ b/scheduler/main.go @@ -0,0 +1,122 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "flag" + "os" + "os/signal" + "syscall" + + "github.com/pingcap-incubator/tinykv/scheduler/pkg/logutil" + "github.com/pingcap-incubator/tinykv/scheduler/server" + "github.com/pingcap-incubator/tinykv/scheduler/server/config" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.uber.org/zap" + + // Register schedulers. + _ "github.com/pingcap-incubator/tinykv/scheduler/server/schedulers" +) + +func main() { + cfg := config.NewConfig() + err := cfg.Parse(os.Args[1:]) + + if cfg.Version { + server.PrintPDInfo() + exit(0) + } + + defer logutil.LogPanic() + + switch errors.Cause(err) { + case nil: + case flag.ErrHelp: + exit(0) + default: + log.Fatal("parse cmd flags error", zap.Error(err)) + } + + if cfg.ConfigCheck { + server.PrintConfigCheckMsg(cfg) + exit(0) + } + + // New zap logger + err = cfg.SetupLogger() + if err == nil { + log.ReplaceGlobals(cfg.GetZapLogger(), cfg.GetZapLogProperties()) + } else { + log.Fatal("initialize logger error", zap.Error(err)) + } + // Flushing any buffered log entries + defer log.Sync() + + // The old logger + err = logutil.InitLogger(&cfg.Log) + if err != nil { + log.Fatal("initialize logger error", zap.Error(err)) + } + + server.LogPDInfo() + + for _, msg := range cfg.WarningMsgs { + log.Warn(msg) + } + + svr, err := server.CreateServer(cfg) + if err != nil { + log.Fatal("create server failed", zap.Error(err)) + } + + if err = server.InitHTTPClient(svr); err != nil { + log.Fatal("initial http client for api handler failed", zap.Error(err)) + } + + sc := make(chan os.Signal, 1) + signal.Notify(sc, + syscall.SIGHUP, + syscall.SIGINT, + syscall.SIGTERM, + syscall.SIGQUIT) + + ctx, cancel := context.WithCancel(context.Background()) + var sig os.Signal + go func() { + sig = <-sc + cancel() + }() + + if err := svr.Run(ctx); err != nil { + log.Fatal("run server failed", zap.Error(err)) + } + + <-ctx.Done() + log.Info("Got signal to exit", zap.String("signal", sig.String())) + + svr.Close() + switch sig { + case syscall.SIGTERM: + exit(0) + default: + exit(1) + } +} + +func exit(code int) { + log.Sync() + os.Exit(code) +} diff --git a/scheduler/pkg/apiutil/apiutil.go b/scheduler/pkg/apiutil/apiutil.go new file mode 100644 index 00000000..18c2074e --- /dev/null +++ b/scheduler/pkg/apiutil/apiutil.go @@ -0,0 +1,29 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package apiutil + +// JSONError lets callers check for just one error type +type JSONError struct { + Err error +} + +func (e JSONError) Error() string { + return e.Err.Error() +} + +// FieldError connects an error to a particular field +type FieldError struct { + error + field string +} diff --git a/scheduler/pkg/btree/.travis.yml b/scheduler/pkg/btree/.travis.yml new file mode 100644 index 00000000..4f2ee4d9 --- /dev/null +++ b/scheduler/pkg/btree/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/scheduler/pkg/btree/LICENSE b/scheduler/pkg/btree/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/scheduler/pkg/btree/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/scheduler/pkg/btree/README.md b/scheduler/pkg/btree/README.md new file mode 100644 index 00000000..6062a4da --- /dev/null +++ b/scheduler/pkg/btree/README.md @@ -0,0 +1,12 @@ +# BTree implementation for Go + +![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) + +This package provides an in-memory B-Tree implementation for Go, useful as +an ordered, mutable data structure. + +The API is based off of the wonderful +http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to +act as a drop-in replacement for gollrb trees. + +See http://godoc.org/github.com/google/btree for documentation. diff --git a/scheduler/pkg/btree/btree.go b/scheduler/pkg/btree/btree.go new file mode 100644 index 00000000..2ab69bf9 --- /dev/null +++ b/scheduler/pkg/btree/btree.go @@ -0,0 +1,1088 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This package is a modified version of google/btree. Change as follows: +// * Add `indices` array for `node`, and related codes to maintain it. +// This modification may affect performance of insertion and deletion. +// * Add `GetAt` and `GetWithIndex` method for `BTree`. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + // DefaultFreeListSize is the default size of free list. + DefaultFreeListSize = 32 +) + +var ( + nilItems = make(items, 16) + nilChildren = make(children, 16) +) + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList struct { + mu sync.Mutex + freelist []*node +} + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return &FreeList{freelist: make([]*node, 0, size)} +} + +func (f *FreeList) newNode() (n *node) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +// freeNode adds the given node to the list, returning true if it was added +// and false if it was discarded. +func (f *FreeList) freeNode(n *node) (out bool) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + out = true + } + f.mu.Unlock() + return +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator func(i Item) bool + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + cow: ©OnWriteContext{freelist: f}, + } +} + +// items stores items in a node. +type items []Item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, item Item) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) Item { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items) pop() (out Item) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items) truncate(index int) { + var toClear items + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilItems):] + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item Item) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return item.Less(s[i]) + }) + if i > 0 && !s[i-1].Less(item) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index children. index must be less than or equal to length. +func (s *children) truncate(index int) { + var toClear children + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilChildren):] + } +} + +// indices stores indices of items in a node. +// If the node has any children, indices[i] is the index of items[i] in the subtree. +// We have following formulas: +// +// indices[i] = if i == 0 { children[0].length() } +// else { indices[i-1] + 1 + children[i].length() } +type indices []int + +func (s *indices) addAt(index int, delta int) { + for i := index; i < len(*s); i++ { + (*s)[i] += delta + } +} + +func (s *indices) insertAt(index int, sz int) { + *s = append(*s, -1) + for i := len(*s) - 1; i >= index && i > 0; i-- { + (*s)[i] = (*s)[i-1] + sz + 1 + } + if index == 0 { + (*s)[0] = sz + } +} + +func (s *indices) push(sz int) { + if len(*s) == 0 { + *s = append(*s, sz) + } else { + *s = append(*s, (*s)[len(*s)-1]+1+sz) + } +} + +// children[i] is splited. +func (s *indices) split(index, nextSize int) { + s.insertAt(index+1, -1) + (*s)[index] -= 1 + nextSize +} + +// children[i] and children[i+1] is merged +func (s *indices) merge(index int) { + for i := index; i < len(*s)-1; i++ { + (*s)[i] = (*s)[i+1] + } + *s = (*s)[:len(*s)-1] +} + +func (s *indices) removeAt(index int) int { + sz := (*s)[index] + if index > 0 { + sz = sz - (*s)[index-1] - 1 + } + for i := index + 1; i < len(*s); i++ { + (*s)[i-1] = (*s)[i] - sz - 1 + } + *s = (*s)[:len(*s)-1] + return sz +} + +func (s *indices) pop() int { + l := len(*s) + out := (*s)[l-1] + if l != 1 { + out -= (*s)[l-2] + 1 + } + *s = (*s)[:len(*s)-1] + return out +} + +func (s *indices) truncate(index int) { + *s = (*s)[:index] + // no need to clear +} + +func (s indices) find(k int) (index int, found bool) { + i := sort.SearchInts(s, k) + return i, s[i] == k +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + indices indices + cow *copyOnWriteContext +} + +func (n *node) length() int { + if len(n.indices) <= 0 { + return len(n.items) + } + return n.indices[len(n.indices)-1] +} + +func (n *node) initSize() { + l := len(n.children) + if l <= 0 { + n.indices.truncate(0) + return + } else if l <= cap(n.indices) { + n.indices = n.indices[:l] + } else { + n.indices = make([]int, l) + } + n.indices[0] = n.children[0].length() + for i := 1; i < l; i++ { + n.indices[i] = n.indices[i-1] + 1 + n.children[i].length() + } +} + +func (n *node) mutableFor(cow *copyOnWriteContext) *node { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items, len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(children, len(n.children), cap(n.children)) + } + copy(out.children, n.children) + // Copy indices + if cap(out.indices) >= len(n.indices) { + out.indices = out.indices[:len(n.indices)] + } else { + out.indices = make(indices, len(n.indices), cap(n.indices)) + } + copy(out.indices, n.indices) + return out +} + +func (n *node) mutableChild(i int) *node { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (Item, *node) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + next.initSize() + n.children.truncate(i + 1) + n.indices.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + n.indices.split(i, second.length()) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node) insert(item Item, maxItems int) Item { + i, found := n.items.find(item) + if found { + out := n.items[i] + n.items[i] = item + return out + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return nil + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case item.Less(inTree): + // no change, we want first split node + case inTree.Less(item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out + } + } + out := n.mutableChild(i).insert(item, maxItems) + if out == nil { + n.indices.addAt(i, 1) + } + return out +} + +func (n *node) getAt(k int) Item { + if k >= n.length() || k < 0 { + return nil + } + if len(n.children) == 0 { + return n.items[k] + } + i, found := n.indices.find(k) + if found { + return n.items[i] + } + if i == 0 { + return n.children[0].getAt(k) + } + return n.children[i].getAt(k - n.indices[i-1] - 1) +} + +// index is the number of items < key +func (n *node) getWithIndex(key Item) (Item, int) { + i, found := n.items.find(key) + if found { + rk := i + if len(n.indices) > 0 { + rk = n.indices[i] + } + return n.items[i], rk + } else if len(n.children) > 0 { + out, rk := n.children[i].getWithIndex(key) + if i > 0 { + rk += n.indices[i-1] + 1 + } + return out, rk + } + return nil, i +} + +// get finds the given key in the subtree and returns it. +func (n *node) get(key Item) Item { + i, found := n.items.find(key) + if found { + return n.items[i] + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return nil +} + +// min returns the first item in the subtree. +func min(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// max returns the last item in the subtree. +func max(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(item Item, minItems int, typ toRemove) (out Item) { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop() + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0) + } + i = 0 + case removeItem: + i, found = n.items.find(item) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i) + } + return nil + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out = n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i] = child.remove(nil, minItems, removeMax) + } else { + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + out = child.remove(item, minItems, typ) + } + if out != nil { + n.indices.addAt(i, -1) + } + return +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + n.indices[i-1] -= 1 + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + stealSize := stealFrom.indices.pop() + n.indices[i-1] -= stealSize + child.indices.insertAt(0, stealSize) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + n.indices[i] += 1 + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + stealSize := stealFrom.indices.removeAt(0) + n.indices[i] += stealSize + child.indices.push(stealSize) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + for _, nn := range mergeChild.children { + child.indices.push(nn.length()) + } + n.indices.merge(i) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { // revive:disable-line:flag-parameter,confusing-results + var ok, found bool + var index int + switch dir { + case ascend: + if start != nil { + index, _ = n.items.find(start) + } + for i := index; i < len(n.items); i++ { + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { + hit = true + continue + } + hit = true + if stop != nil && !n.items[i].Less(stop) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + case descend: + if start != nil { + index, found = n.items.find(start) + if !found { + index = index - 1 + } + } else { + index = len(n.items) - 1 + } + for i := index; i >= 0; i-- { + if start != nil && !n.items[i].Less(start) { + if !includeStart || hit || start.Less(n.items[i]) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if stop != nil && !stop.Less(n.items[i]) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + } + return hit, true +} + +// Used for testing/debugging purposes. +func (n *node) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v, %v\n", strings.Repeat(" ", level), n.items, n.indices) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + length int + root *node + cow *copyOnWriteContext +} + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext struct { + freelist *FreeList +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext) newNode() (n *node) { + n = c.freelist.newNode() + n.cow = c + return +} + +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext) freeNode(n *node) freeType { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.indices.truncate(0) + n.cow = nil + if c.freelist.freeNode(n) { + return ftStored + } + return ftFreelistFull + } + return ftNotOwned +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("nil item being added to BTree") + } + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return nil + } + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + t.root.initSize() + } + out := t.root.insert(item, t.maxItems()) + if out == nil { + t.length++ + } + return out +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + return t.deleteItem(nil, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + return t.deleteItem(nil, removeMax) +} + +func (t *BTree) deleteItem(item Item, typ toRemove) Item { + if t.root == nil || len(t.root.items) == 0 { + return nil + } + t.root = t.root.mutableFor(t.cow) + out := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if out != nil { + t.length-- + } + return out +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, pivot, false, false, iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, pivot, nil, true, false, iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, nil, false, false, iterator) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, pivot, nil, true, false, iterator) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range (pivot, last], until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, pivot, false, false, iterator) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, nil, false, false, iterator) +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + if t.root == nil { + return nil + } + return t.root.get(key) +} + +// GetWithIndex gets the key and its index. +// If the key is not in the tree, the the index is the number of items < key. +func (t *BTree) GetWithIndex(key Item) (Item, int) { + if t.root == nil { + return nil, 0 + } + return t.root.getWithIndex(key) +} + +// GetAt returns the item with index k. If k < 0 or k >= t.Len(), returns nil. +func (t *BTree) GetAt(k int) Item { + if t.root == nil { + return nil + } + return t.root.getAt(k) +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + return min(t.root) +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return t.Get(key) != nil +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return t.length +} + +// function for test. +func (t *BTree) getRootLength() int { + if t.root == nil { + return 0 + } + return t.root.length() +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { // revive:disable-line:flag-parameter + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node) reset(c *copyOnWriteContext) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} diff --git a/scheduler/pkg/btree/btree_mem.go b/scheduler/pkg/btree/btree_mem.go new file mode 100644 index 00000000..cb95b7fa --- /dev/null +++ b/scheduler/pkg/btree/btree_mem.go @@ -0,0 +1,76 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build ignore + +// This binary compares memory usage between btree and gollrb. +package main + +import ( + "flag" + "fmt" + "math/rand" + "runtime" + "time" + + "github.com/google/btree" + "github.com/petar/GoLLRB/llrb" +) + +var ( + size = flag.Int("size", 1000000, "size of the tree to build") + degree = flag.Int("degree", 8, "degree of btree") + gollrb = flag.Bool("llrb", false, "use llrb instead of btree") +) + +func main() { + flag.Parse() + vals := rand.Perm(*size) + var t, v interface{} + v = vals + var stats runtime.MemStats + for i := 0; i < 10; i++ { + runtime.GC() + } + fmt.Println("-------- BEFORE ----------") + runtime.ReadMemStats(&stats) + fmt.Printf("%+v\n", stats) + start := time.Now() + if *gollrb { + tr := llrb.New() + for _, v := range vals { + tr.ReplaceOrInsert(llrb.Int(v)) + } + t = tr // keep it around + } else { + tr := btree.New(*degree) + for _, v := range vals { + tr.ReplaceOrInsert(btree.Int(v)) + } + t = tr // keep it around + } + fmt.Printf("%v inserts in %v\n", *size, time.Since(start)) + fmt.Println("-------- AFTER ----------") + runtime.ReadMemStats(&stats) + fmt.Printf("%+v\n", stats) + for i := 0; i < 10; i++ { + runtime.GC() + } + fmt.Println("-------- AFTER GC ----------") + runtime.ReadMemStats(&stats) + fmt.Printf("%+v\n", stats) + if t == v { + fmt.Println("to make sure vals and tree aren't GC'd") + } +} diff --git a/scheduler/pkg/btree/btree_test.go b/scheduler/pkg/btree/btree_test.go new file mode 100644 index 00000000..cd27bdba --- /dev/null +++ b/scheduler/pkg/btree/btree_test.go @@ -0,0 +1,866 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package btree + +import ( + "flag" + "fmt" + "math/rand" + "reflect" + "sort" + "sync" + "testing" + "time" +) + +func init() { + seed := time.Now().Unix() + fmt.Println(seed) + rand.Seed(seed) +} + +// perm returns a random permutation of n Int items in the range [0, n). +func perm(n int) (out []Item) { + for _, v := range rand.Perm(n) { + out = append(out, Int(v)) + } + return +} + +// rang returns an ordered list of Int items in the range [0, n). +func rang(n int) (out []Item) { + for i := 0; i < n; i++ { + out = append(out, Int(i)) + } + return +} + +// all extracts all items from a tree in order as a slice. +func all(t *BTree) (out []Item) { + t.Ascend(func(a Item) bool { + out = append(out, a) + return true + }) + return +} + +// rangerev returns a reversed ordered list of Int items in the range [0, n). +func rangrev(n int) (out []Item) { + for i := n - 1; i >= 0; i-- { + out = append(out, Int(i)) + } + return +} + +// allrev extracts all items from a tree in reverse order as a slice. +func allrev(t *BTree) (out []Item) { + t.Descend(func(a Item) bool { + out = append(out, a) + return true + }) + return +} + +func assertEq(t *testing.T, desc string, got, need interface{}) { + if !reflect.DeepEqual(need, got) { + t.Fatalf("%s failed: need %T %v, but got %T %v", desc, need, need, got, got) + } +} + +func TestBTreeSizeInfo(t *testing.T) { + tr := New(*btreeDegree) + const maxElt = 10000 + elements := perm(maxElt) + // insert 0..maxElt + for _, elt := range elements { + tr.ReplaceOrInsert(elt) + assertEq(t, "root length", tr.getRootLength(), tr.Len()) + assertEq(t, "check min", tr.GetAt(0), tr.Min()) + assertEq(t, "check max", tr.GetAt(tr.Len()-1), tr.Max()) + } + for k := 0; k < maxElt; k++ { + assertEq(t, "get k-th", tr.GetAt(k), Int(k)) + } + for x := 0; x < maxElt; x++ { + y, rk := tr.GetWithIndex(Int(x)) + assertEq(t, "get", y, Int(x)) + assertEq(t, "get rank", rk, x) + } + + // get rank of maxElt + 1 + { + y, rk := tr.GetWithIndex(Int(maxElt + 1)) + assertEq(t, "get max+1", y, nil) + assertEq(t, "get max+1 rank", rk, tr.Len()) + } + + // delete x if x % 3 == 0 + for _, elt := range elements { + if int(elt.(Int))%3 != 0 { + tr.Delete(elt) + } + assertEq(t, "after delete root length", tr.getRootLength(), tr.Len()) + assertEq(t, "after delete check min", tr.GetAt(0), tr.Min()) + assertEq(t, "after delete check max", tr.GetAt(tr.Len()-1), tr.Max()) + } + for k := 0; k < maxElt/3; k++ { + assertEq(t, "after delete get k-th", tr.GetAt(k), Int(3*k)) + } + for x := 0; x < maxElt; x++ { + y, rk := tr.GetWithIndex(Int(x)) + if x%3 == 0 { + assertEq(t, "after delete get", y, Int(x)) + assertEq(t, "after delete get rank", rk, x/3) + } else { + assertEq(t, "after delete get nil", y, nil) + assertEq(t, "after delete get nil rank", rk, x/3+1) + } + } + + // delete max until tr.Len() <= 100 + for tr.Len() > 100 { + tr.DeleteMax() + assertEq(t, "delete max root length", tr.getRootLength(), tr.Len()) + assertEq(t, "delete max check min", tr.GetAt(0), tr.Min()) + assertEq(t, "delete max check max", tr.GetAt(tr.Len()-1), tr.Max()) + } + for k := 0; k < maxElt/3 && k < 100; k++ { + assertEq(t, "delete max get k-th", tr.GetAt(k), Int(3*k)) + } + for x := 0; x < maxElt && x < 300; x++ { + y, rk := tr.GetWithIndex(Int(x)) + if x%3 == 0 { + assertEq(t, "delete max get", y, Int(x)) + assertEq(t, "delete max get rank", rk, x/3) + } else { + assertEq(t, "delete max get nil", y, nil) + assertEq(t, "delete max get nil rank", rk, x/3+1) + } + } + +} + +var btreeDegree = flag.Int("degree", 32, "B-Tree degree") + +func TestBTree(t *testing.T) { + tr := New(*btreeDegree) + const treeSize = 10000 + for i := 0; i < 10; i++ { + if min := tr.Min(); min != nil { + t.Fatalf("empty min, got %+v", min) + } + if max := tr.Max(); max != nil { + t.Fatalf("empty max, got %+v", max) + } + for _, item := range perm(treeSize) { + if x := tr.ReplaceOrInsert(item); x != nil { + t.Fatal("insert found item", item) + } + } + for _, item := range perm(treeSize) { + if x := tr.ReplaceOrInsert(item); x == nil { + t.Fatal("insert didn't find item", item) + } + } + if min, want := tr.Min(), Item(Int(0)); min != want { + t.Fatalf("min: want %+v, got %+v", want, min) + } + if max, want := tr.Max(), Item(Int(treeSize-1)); max != want { + t.Fatalf("max: want %+v, got %+v", want, max) + } + got := all(tr) + want := rang(treeSize) + if !reflect.DeepEqual(got, want) { + t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want) + } + + gotrev := allrev(tr) + wantrev := rangrev(treeSize) + if !reflect.DeepEqual(gotrev, wantrev) { + t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want) + } + + for _, item := range perm(treeSize) { + if x := tr.Delete(item); x == nil { + t.Fatalf("didn't find %v", item) + } + } + if got = all(tr); len(got) > 0 { + t.Fatalf("some left!: %v", got) + } + } +} + +func ExampleBTree() { + tr := New(*btreeDegree) + for i := Int(0); i < 10; i++ { + tr.ReplaceOrInsert(i) + } + fmt.Println("len: ", tr.Len()) + fmt.Println("get3: ", tr.Get(Int(3))) + fmt.Println("get100: ", tr.Get(Int(100))) + fmt.Println("del4: ", tr.Delete(Int(4))) + fmt.Println("del100: ", tr.Delete(Int(100))) + fmt.Println("replace5: ", tr.ReplaceOrInsert(Int(5))) + fmt.Println("replace100:", tr.ReplaceOrInsert(Int(100))) + fmt.Println("min: ", tr.Min()) + fmt.Println("delmin: ", tr.DeleteMin()) + fmt.Println("max: ", tr.Max()) + fmt.Println("delmax: ", tr.DeleteMax()) + fmt.Println("len: ", tr.Len()) + // Output: + // len: 10 + // get3: 3 + // get100: + // del4: 4 + // del100: + // replace5: 5 + // replace100: + // min: 0 + // delmin: 0 + // max: 100 + // delmax: 100 + // len: 8 +} + +func TestDeleteMin(t *testing.T) { + tr := New(3) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + for v := tr.DeleteMin(); v != nil; v = tr.DeleteMin() { + got = append(got, v) + } + if want := rang(100); !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } +} + +func TestDeleteMax(t *testing.T) { + tr := New(3) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + for v := tr.DeleteMax(); v != nil; v = tr.DeleteMax() { + got = append(got, v) + } + // Reverse our list. + for i := 0; i < len(got)/2; i++ { + got[i], got[len(got)-i-1] = got[len(got)-i-1], got[i] + } + if want := rang(100); !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } +} + +func TestAscendRange(t *testing.T) { + tr := New(2) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + tr.AscendRange(Int(40), Int(60), func(a Item) bool { + got = append(got, a) + return true + }) + if want := rang(100)[40:60]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } + got = got[:0] + tr.AscendRange(Int(40), Int(60), func(a Item) bool { + if a.(Int) > 50 { + return false + } + got = append(got, a) + return true + }) + if want := rang(100)[40:51]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } +} + +func TestDescendRange(t *testing.T) { + tr := New(2) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + tr.DescendRange(Int(60), Int(40), func(a Item) bool { + got = append(got, a) + return true + }) + if want := rangrev(100)[39:59]; !reflect.DeepEqual(got, want) { + t.Fatalf("descendrange:\n got: %v\nwant: %v", got, want) + } + got = got[:0] + tr.DescendRange(Int(60), Int(40), func(a Item) bool { + if a.(Int) < 50 { + return false + } + got = append(got, a) + return true + }) + if want := rangrev(100)[39:50]; !reflect.DeepEqual(got, want) { + t.Fatalf("descendrange:\n got: %v\nwant: %v", got, want) + } +} +func TestAscendLessThan(t *testing.T) { + tr := New(*btreeDegree) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + tr.AscendLessThan(Int(60), func(a Item) bool { + got = append(got, a) + return true + }) + if want := rang(100)[:60]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } + got = got[:0] + tr.AscendLessThan(Int(60), func(a Item) bool { + if a.(Int) > 50 { + return false + } + got = append(got, a) + return true + }) + if want := rang(100)[:51]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } +} + +func TestDescendLessOrEqual(t *testing.T) { + tr := New(*btreeDegree) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + tr.DescendLessOrEqual(Int(40), func(a Item) bool { + got = append(got, a) + return true + }) + if want := rangrev(100)[59:]; !reflect.DeepEqual(got, want) { + t.Fatalf("descendlessorequal:\n got: %v\nwant: %v", got, want) + } + got = got[:0] + tr.DescendLessOrEqual(Int(60), func(a Item) bool { + if a.(Int) < 50 { + return false + } + got = append(got, a) + return true + }) + if want := rangrev(100)[39:50]; !reflect.DeepEqual(got, want) { + t.Fatalf("descendlessorequal:\n got: %v\nwant: %v", got, want) + } +} +func TestAscendGreaterOrEqual(t *testing.T) { + tr := New(*btreeDegree) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + tr.AscendGreaterOrEqual(Int(40), func(a Item) bool { + got = append(got, a) + return true + }) + if want := rang(100)[40:]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } + got = got[:0] + tr.AscendGreaterOrEqual(Int(40), func(a Item) bool { + if a.(Int) > 50 { + return false + } + got = append(got, a) + return true + }) + if want := rang(100)[40:51]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } +} + +func TestDescendGreaterThan(t *testing.T) { + tr := New(*btreeDegree) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + tr.DescendGreaterThan(Int(40), func(a Item) bool { + got = append(got, a) + return true + }) + if want := rangrev(100)[:59]; !reflect.DeepEqual(got, want) { + t.Fatalf("descendgreaterthan:\n got: %v\nwant: %v", got, want) + } + got = got[:0] + tr.DescendGreaterThan(Int(40), func(a Item) bool { + if a.(Int) < 50 { + return false + } + got = append(got, a) + return true + }) + if want := rangrev(100)[:50]; !reflect.DeepEqual(got, want) { + t.Fatalf("descendgreaterthan:\n got: %v\nwant: %v", got, want) + } +} + +const benchmarkTreeSize = 10000 + +func BenchmarkInsert(b *testing.B) { + b.StopTimer() + insertP := perm(benchmarkTreeSize) + b.StartTimer() + i := 0 + for i < b.N { + tr := New(*btreeDegree) + for _, item := range insertP { + tr.ReplaceOrInsert(item) + i++ + if i >= b.N { + return + } + } + } +} + +func BenchmarkSeek(b *testing.B) { + b.StopTimer() + size := 100000 + insertP := perm(size) + tr := New(*btreeDegree) + for _, item := range insertP { + tr.ReplaceOrInsert(item) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + tr.AscendGreaterOrEqual(Int(i%size), func(i Item) bool { return false }) + } +} + +func BenchmarkDeleteInsert(b *testing.B) { + b.StopTimer() + insertP := perm(benchmarkTreeSize) + tr := New(*btreeDegree) + for _, item := range insertP { + tr.ReplaceOrInsert(item) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + tr.Delete(insertP[i%benchmarkTreeSize]) + tr.ReplaceOrInsert(insertP[i%benchmarkTreeSize]) + } +} + +func BenchmarkDeleteInsertCloneOnce(b *testing.B) { + b.StopTimer() + insertP := perm(benchmarkTreeSize) + tr := New(*btreeDegree) + for _, item := range insertP { + tr.ReplaceOrInsert(item) + } + tr = tr.Clone() + b.StartTimer() + for i := 0; i < b.N; i++ { + tr.Delete(insertP[i%benchmarkTreeSize]) + tr.ReplaceOrInsert(insertP[i%benchmarkTreeSize]) + } +} + +func BenchmarkDeleteInsertCloneEachTime(b *testing.B) { + b.StopTimer() + insertP := perm(benchmarkTreeSize) + tr := New(*btreeDegree) + for _, item := range insertP { + tr.ReplaceOrInsert(item) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + tr = tr.Clone() + tr.Delete(insertP[i%benchmarkTreeSize]) + tr.ReplaceOrInsert(insertP[i%benchmarkTreeSize]) + } +} + +func BenchmarkDelete(b *testing.B) { + b.StopTimer() + insertP := perm(benchmarkTreeSize) + removeP := perm(benchmarkTreeSize) + b.StartTimer() + i := 0 + for i < b.N { + b.StopTimer() + tr := New(*btreeDegree) + for _, v := range insertP { + tr.ReplaceOrInsert(v) + } + b.StartTimer() + for _, item := range removeP { + tr.Delete(item) + i++ + if i >= b.N { + return + } + } + if tr.Len() > 0 { + panic(tr.Len()) + } + } +} + +func BenchmarkGet(b *testing.B) { + b.StopTimer() + insertP := perm(benchmarkTreeSize) + removeP := perm(benchmarkTreeSize) + b.StartTimer() + i := 0 + for i < b.N { + b.StopTimer() + tr := New(*btreeDegree) + for _, v := range insertP { + tr.ReplaceOrInsert(v) + } + b.StartTimer() + for _, item := range removeP { + tr.Get(item) + i++ + if i >= b.N { + return + } + } + } +} + +func BenchmarkGetCloneEachTime(b *testing.B) { + b.StopTimer() + insertP := perm(benchmarkTreeSize) + removeP := perm(benchmarkTreeSize) + b.StartTimer() + i := 0 + for i < b.N { + b.StopTimer() + tr := New(*btreeDegree) + for _, v := range insertP { + tr.ReplaceOrInsert(v) + } + b.StartTimer() + for _, item := range removeP { + tr = tr.Clone() + tr.Get(item) + i++ + if i >= b.N { + return + } + } + } +} + +type byInts []Item + +func (a byInts) Len() int { + return len(a) +} + +func (a byInts) Less(i, j int) bool { + return a[i].(Int) < a[j].(Int) +} + +func (a byInts) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func BenchmarkAscend(b *testing.B) { + arr := perm(benchmarkTreeSize) + tr := New(*btreeDegree) + for _, v := range arr { + tr.ReplaceOrInsert(v) + } + sort.Sort(byInts(arr)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + j := 0 + tr.Ascend(func(item Item) bool { + if item.(Int) != arr[j].(Int) { + b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) + } + j++ + return true + }) + } +} + +func BenchmarkDescend(b *testing.B) { + arr := perm(benchmarkTreeSize) + tr := New(*btreeDegree) + for _, v := range arr { + tr.ReplaceOrInsert(v) + } + sort.Sort(byInts(arr)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + j := len(arr) - 1 + tr.Descend(func(item Item) bool { + if item.(Int) != arr[j].(Int) { + b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) + } + j-- + return true + }) + } +} +func BenchmarkAscendRange(b *testing.B) { + arr := perm(benchmarkTreeSize) + tr := New(*btreeDegree) + for _, v := range arr { + tr.ReplaceOrInsert(v) + } + sort.Sort(byInts(arr)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + j := 100 + tr.AscendRange(Int(100), arr[len(arr)-100], func(item Item) bool { + if item.(Int) != arr[j].(Int) { + b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) + } + j++ + return true + }) + if j != len(arr)-100 { + b.Fatalf("expected: %v, got %v", len(arr)-100, j) + } + } +} + +func BenchmarkDescendRange(b *testing.B) { + arr := perm(benchmarkTreeSize) + tr := New(*btreeDegree) + for _, v := range arr { + tr.ReplaceOrInsert(v) + } + sort.Sort(byInts(arr)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + j := len(arr) - 100 + tr.DescendRange(arr[len(arr)-100], Int(100), func(item Item) bool { + if item.(Int) != arr[j].(Int) { + b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) + } + j-- + return true + }) + if j != 100 { + b.Fatalf("expected: %v, got %v", len(arr)-100, j) + } + } +} +func BenchmarkAscendGreaterOrEqual(b *testing.B) { + arr := perm(benchmarkTreeSize) + tr := New(*btreeDegree) + for _, v := range arr { + tr.ReplaceOrInsert(v) + } + sort.Sort(byInts(arr)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + j := 100 + k := 0 + tr.AscendGreaterOrEqual(Int(100), func(item Item) bool { + if item.(Int) != arr[j].(Int) { + b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) + } + j++ + k++ + return true + }) + if j != len(arr) { + b.Fatalf("expected: %v, got %v", len(arr), j) + } + if k != len(arr)-100 { + b.Fatalf("expected: %v, got %v", len(arr)-100, k) + } + } +} +func BenchmarkDescendLessOrEqual(b *testing.B) { + arr := perm(benchmarkTreeSize) + tr := New(*btreeDegree) + for _, v := range arr { + tr.ReplaceOrInsert(v) + } + sort.Sort(byInts(arr)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + j := len(arr) - 100 + k := len(arr) + tr.DescendLessOrEqual(arr[len(arr)-100], func(item Item) bool { + if item.(Int) != arr[j].(Int) { + b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) + } + j-- + k-- + return true + }) + if j != -1 { + b.Fatalf("expected: %v, got %v", -1, j) + } + if k != 99 { + b.Fatalf("expected: %v, got %v", 99, k) + } + } +} + +const cloneTestSize = 10000 + +func cloneTest(t *testing.T, b *BTree, start int, p []Item, wg *sync.WaitGroup, trees *[]*BTree, lock *sync.Mutex) { + t.Logf("Starting new clone at %v", start) + lock.Lock() + *trees = append(*trees, b) + lock.Unlock() + for i := start; i < cloneTestSize; i++ { + b.ReplaceOrInsert(p[i]) + if i%(cloneTestSize/5) == 0 { + wg.Add(1) + go cloneTest(t, b.Clone(), i+1, p, wg, trees, lock) + } + } + wg.Done() +} + +func TestCloneConcurrentOperations(t *testing.T) { + b := New(*btreeDegree) + var trees []*BTree + p := perm(cloneTestSize) + var wg sync.WaitGroup + wg.Add(1) + go cloneTest(t, b, 0, p, &wg, &trees, &sync.Mutex{}) + wg.Wait() + want := rang(cloneTestSize) + t.Logf("Starting equality checks on %d trees", len(trees)) + for i, tree := range trees { + if !reflect.DeepEqual(want, all(tree)) { + t.Errorf("tree %v mismatch", i) + } + } + t.Log("Removing half from first half") + toRemove := rang(cloneTestSize)[cloneTestSize/2:] + for i := 0; i < len(trees)/2; i++ { + tree := trees[i] + wg.Add(1) + go func() { + for _, item := range toRemove { + tree.Delete(item) + } + wg.Done() + }() + } + wg.Wait() + t.Log("Checking all values again") + for i, tree := range trees { + var wantpart []Item + if i < len(trees)/2 { + wantpart = want[:cloneTestSize/2] + } else { + wantpart = want + } + if got := all(tree); !reflect.DeepEqual(wantpart, got) { + t.Errorf("tree %v mismatch, want %v got %v", i, len(want), len(got)) + } + } +} + +func BenchmarkDeleteAndRestore(b *testing.B) { + items := perm(16392) + b.ResetTimer() + b.Run(`CopyBigFreeList`, func(b *testing.B) { + fl := NewFreeList(16392) + tr := NewWithFreeList(*btreeDegree, fl) + for _, v := range items { + tr.ReplaceOrInsert(v) + } + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + dels := make([]Item, 0, tr.Len()) + tr.Ascend(ItemIterator(func(b Item) bool { + dels = append(dels, b) + return true + })) + for _, del := range dels { + tr.Delete(del) + } + // tr is now empty, we make a new empty copy of it. + tr = NewWithFreeList(*btreeDegree, fl) + for _, v := range items { + tr.ReplaceOrInsert(v) + } + } + }) + b.Run(`Copy`, func(b *testing.B) { + tr := New(*btreeDegree) + for _, v := range items { + tr.ReplaceOrInsert(v) + } + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + dels := make([]Item, 0, tr.Len()) + tr.Ascend(ItemIterator(func(b Item) bool { + dels = append(dels, b) + return true + })) + for _, del := range dels { + tr.Delete(del) + } + // tr is now empty, we make a new empty copy of it. + tr = New(*btreeDegree) + for _, v := range items { + tr.ReplaceOrInsert(v) + } + } + }) + b.Run(`ClearBigFreelist`, func(b *testing.B) { + fl := NewFreeList(16392) + tr := NewWithFreeList(*btreeDegree, fl) + for _, v := range items { + tr.ReplaceOrInsert(v) + } + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + tr.Clear(true) + for _, v := range items { + tr.ReplaceOrInsert(v) + } + } + }) + b.Run(`Clear`, func(b *testing.B) { + tr := New(*btreeDegree) + for _, v := range items { + tr.ReplaceOrInsert(v) + } + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + tr.Clear(true) + for _, v := range items { + tr.ReplaceOrInsert(v) + } + } + }) +} diff --git a/scheduler/pkg/cache/cache.go b/scheduler/pkg/cache/cache.go new file mode 100644 index 00000000..674224e3 --- /dev/null +++ b/scheduler/pkg/cache/cache.go @@ -0,0 +1,36 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package cache + +// Item is the cache entry. +type Item struct { + Key uint64 + Value interface{} +} + +// Cache is an interface for cache system +type Cache interface { + // Put puts an item into cache. + Put(key uint64, value interface{}) + // Get retrives an item from cache. + Get(key uint64) (interface{}, bool) + // Peek reads an item from cache. The action is no considered 'Use'. + Peek(key uint64) (interface{}, bool) + // Remove eliminates an item from cache. + Remove(key uint64) + // Elems return all items in cache. + Elems() []*Item + // Len returns current cache size + Len() int +} diff --git a/scheduler/pkg/cache/cache_test.go b/scheduler/pkg/cache/cache_test.go new file mode 100644 index 00000000..abcf3946 --- /dev/null +++ b/scheduler/pkg/cache/cache_test.go @@ -0,0 +1,82 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package cache + +import ( + "context" + "testing" + "time" + + . "github.com/pingcap/check" +) + +func TestCore(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testRegionCacheSuite{}) + +type testRegionCacheSuite struct { +} + +func (s *testRegionCacheSuite) TestExpireRegionCache(c *C) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cache := NewTTL(ctx, time.Second, 2*time.Second) + cache.PutWithTTL(1, 1, 1*time.Second) + cache.PutWithTTL(2, "v2", 5*time.Second) + cache.PutWithTTL(3, 3.0, 5*time.Second) + + value, ok := cache.Get(1) + c.Assert(ok, IsTrue) + c.Assert(value, Equals, 1) + + value, ok = cache.Get(2) + c.Assert(ok, IsTrue) + c.Assert(value, Equals, "v2") + + value, ok = cache.Get(3) + c.Assert(ok, IsTrue) + c.Assert(value, Equals, 3.0) + + c.Assert(cache.Len(), Equals, 3) + + time.Sleep(2 * time.Second) + + value, ok = cache.Get(1) + c.Assert(ok, IsFalse) + c.Assert(value, IsNil) + + value, ok = cache.Get(2) + c.Assert(ok, IsTrue) + c.Assert(value, Equals, "v2") + + value, ok = cache.Get(3) + c.Assert(ok, IsTrue) + c.Assert(value, Equals, 3.0) + + c.Assert(cache.Len(), Equals, 2) + + cache.Remove(2) + + value, ok = cache.Get(2) + c.Assert(ok, IsFalse) + c.Assert(value, IsNil) + + value, ok = cache.Get(3) + c.Assert(ok, IsTrue) + c.Assert(value, Equals, 3.0) + + c.Assert(cache.Len(), Equals, 1) +} diff --git a/scheduler/pkg/cache/ttl.go b/scheduler/pkg/cache/ttl.go new file mode 100644 index 00000000..49b4a9d0 --- /dev/null +++ b/scheduler/pkg/cache/ttl.go @@ -0,0 +1,152 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package cache + +import ( + "context" + "sync" + "time" + + "github.com/pingcap/log" + "go.uber.org/zap" +) + +type ttlCacheItem struct { + value interface{} + expire time.Time +} + +// TTL is a cache that assigns TTL(Time-To-Live) for each items. +type TTL struct { + sync.RWMutex + ctx context.Context + + items map[uint64]ttlCacheItem + ttl time.Duration + gcInterval time.Duration +} + +// NewTTL returns a new TTL cache. +func NewTTL(ctx context.Context, gcInterval time.Duration, ttl time.Duration) *TTL { + c := &TTL{ + ctx: ctx, + items: make(map[uint64]ttlCacheItem), + ttl: ttl, + gcInterval: gcInterval, + } + + go c.doGC() + return c +} + +// Put puts an item into cache. +func (c *TTL) Put(key uint64, value interface{}) { + c.PutWithTTL(key, value, c.ttl) +} + +// PutWithTTL puts an item into cache with specified TTL. +func (c *TTL) PutWithTTL(key uint64, value interface{}, ttl time.Duration) { + c.Lock() + defer c.Unlock() + + c.items[key] = ttlCacheItem{ + value: value, + expire: time.Now().Add(ttl), + } +} + +// Get retrives an item from cache. +func (c *TTL) Get(key uint64) (interface{}, bool) { + c.RLock() + defer c.RUnlock() + + item, ok := c.items[key] + if !ok { + return nil, false + } + + if item.expire.Before(time.Now()) { + return nil, false + } + + return item.value, true +} + +// Remove eliminates an item from cache. +func (c *TTL) Remove(key uint64) { + c.Lock() + defer c.Unlock() + + delete(c.items, key) +} + +// Len returns current cache size. +func (c *TTL) Len() int { + c.RLock() + defer c.RUnlock() + + return len(c.items) +} + +// Clear removes all items in the ttl cache. +func (c *TTL) Clear() { + c.Lock() + defer c.Unlock() + + for k := range c.items { + delete(c.items, k) + } +} + +func (c *TTL) doGC() { + ticker := time.NewTicker(c.gcInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + count := 0 + now := time.Now() + c.Lock() + for key := range c.items { + if value, ok := c.items[key]; ok { + if value.expire.Before(now) { + count++ + delete(c.items, key) + } + } + } + c.Unlock() + log.Debug("TTL GC items", zap.Int("count", count)) + case <-c.ctx.Done(): + return + } + } +} + +// TTLUint64 is simple TTL saves only uint64s. +type TTLUint64 struct { + *TTL +} + +// Put saves an ID in cache. +func (c *TTLUint64) Put(id uint64) { + c.TTL.Put(id, nil) +} + +// Exists checks if an ID exists in cache. +func (c *TTLUint64) Exists(id uint64) bool { + _, ok := c.TTL.Get(id) + return ok +} diff --git a/scheduler/pkg/codec/codec.go b/scheduler/pkg/codec/codec.go new file mode 100644 index 00000000..e864299d --- /dev/null +++ b/scheduler/pkg/codec/codec.go @@ -0,0 +1,163 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package codec + +import ( + "bytes" + "encoding/binary" + + "github.com/pkg/errors" +) + +var ( + tablePrefix = []byte{'t'} + metaPrefix = []byte{'m'} +) + +const ( + signMask uint64 = 0x8000000000000000 + + encGroupSize = 8 + encMarker = byte(0xFF) + encPad = byte(0x0) +) + +// Key represents high-level Key type. +type Key []byte + +// TableID returns the table ID of the key, if the key is not table key, returns 0. +func (k Key) TableID() int64 { + _, key, err := DecodeBytes(k) + if err != nil { + // should never happen + return 0 + } + if !bytes.HasPrefix(key, tablePrefix) { + return 0 + } + key = key[len(tablePrefix):] + + _, tableID, _ := DecodeInt(key) + return tableID +} + +// MetaOrTable checks if the key is a meta key or table key. +// If the key is a meta key, it returns true and 0. +// If the key is a table key, it returns false and table ID. +// Otherwise, it returns false and 0. +func (k Key) MetaOrTable() (bool, int64) { + _, key, err := DecodeBytes(k) + if err != nil { + return false, 0 + } + if bytes.HasPrefix(key, metaPrefix) { + return true, 0 + } + if bytes.HasPrefix(key, tablePrefix) { + key = key[len(tablePrefix):] + _, tableID, _ := DecodeInt(key) + return false, tableID + } + return false, 0 +} + +var pads = make([]byte, encGroupSize) + +// EncodeBytes guarantees the encoded value is in ascending order for comparison, +// encoding with the following rule: +// [group1][marker1]...[groupN][markerN] +// group is 8 bytes slice which is padding with 0. +// marker is `0xFF - padding 0 count` +// For example: +// [] -> [0, 0, 0, 0, 0, 0, 0, 0, 247] +// [1, 2, 3] -> [1, 2, 3, 0, 0, 0, 0, 0, 250] +// [1, 2, 3, 0] -> [1, 2, 3, 0, 0, 0, 0, 0, 251] +// [1, 2, 3, 4, 5, 6, 7, 8] -> [1, 2, 3, 4, 5, 6, 7, 8, 255, 0, 0, 0, 0, 0, 0, 0, 0, 247] +// Refer: https://github.com/facebook/mysql-5.6/wiki/MyRocks-record-format#memcomparable-format +func EncodeBytes(data []byte) Key { + // Allocate more space to avoid unnecessary slice growing. + // Assume that the byte slice size is about `(len(data) / encGroupSize + 1) * (encGroupSize + 1)` bytes, + // that is `(len(data) / 8 + 1) * 9` in our implement. + dLen := len(data) + result := make([]byte, 0, (dLen/encGroupSize+1)*(encGroupSize+1)) + for idx := 0; idx <= dLen; idx += encGroupSize { + remain := dLen - idx + padCount := 0 + if remain >= encGroupSize { + result = append(result, data[idx:idx+encGroupSize]...) + } else { + padCount = encGroupSize - remain + result = append(result, data[idx:]...) + result = append(result, pads[:padCount]...) + } + + marker := encMarker - byte(padCount) + result = append(result, marker) + } + return result +} + +// DecodeInt decodes value encoded by EncodeInt before. +// It returns the leftover un-decoded slice, decoded value if no error. +func DecodeInt(b []byte) ([]byte, int64, error) { + if len(b) < 8 { + return nil, 0, errors.New("insufficient bytes to decode value") + } + + u := binary.BigEndian.Uint64(b[:8]) + v := decodeCmpUintToInt(u) + b = b[8:] + return b, v, nil +} + +func decodeCmpUintToInt(u uint64) int64 { + return int64(u ^ signMask) +} + +// DecodeBytes decodes bytes which is encoded by EncodeBytes before, +// returns the leftover bytes and decoded value if no error. +func DecodeBytes(b []byte) ([]byte, []byte, error) { + data := make([]byte, 0, len(b)) + for { + if len(b) < encGroupSize+1 { + return nil, nil, errors.New("insufficient bytes to decode value") + } + + groupBytes := b[:encGroupSize+1] + + group := groupBytes[:encGroupSize] + marker := groupBytes[encGroupSize] + + padCount := encMarker - marker + if padCount > encGroupSize { + return nil, nil, errors.Errorf("invalid marker byte, group bytes %q", groupBytes) + } + + realGroupSize := encGroupSize - padCount + data = append(data, group[:realGroupSize]...) + b = b[encGroupSize+1:] + + if padCount != 0 { + var padByte = encPad + // Check validity of padding bytes. + for _, v := range group[realGroupSize:] { + if v != padByte { + return nil, nil, errors.Errorf("invalid padding byte, group bytes %q", groupBytes) + } + } + break + } + } + return b, data, nil +} diff --git a/scheduler/pkg/codec/codec_test.go b/scheduler/pkg/codec/codec_test.go new file mode 100644 index 00000000..cf15f8ab --- /dev/null +++ b/scheduler/pkg/codec/codec_test.go @@ -0,0 +1,54 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package codec + +import ( + "testing" + + . "github.com/pingcap/check" +) + +func TestTable(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testCodecSuite{}) + +type testCodecSuite struct{} + +func (s *testCodecSuite) TestDecodeBytes(c *C) { + key := "abcdefghijklmnopqrstuvwxyz" + for i := 0; i < len(key); i++ { + _, k, err := DecodeBytes(EncodeBytes([]byte(key[:i]))) + c.Assert(err, IsNil) + c.Assert(string(k), Equals, key[:i]) + } +} + +func (s *testCodecSuite) TestTableID(c *C) { + key := EncodeBytes([]byte("t\x80\x00\x00\x00\x00\x00\x00\xff")) + c.Assert(key.TableID(), Equals, int64(0xff)) + + key = EncodeBytes([]byte("t\x80\x00\x00\x00\x00\x00\x00\xff_i\x01\x02")) + c.Assert(key.TableID(), Equals, int64(0xff)) + + key = []byte("t\x80\x00\x00\x00\x00\x00\x00\xff") + c.Assert(key.TableID(), Equals, int64(0)) + + key = EncodeBytes([]byte("T\x00\x00\x00\x00\x00\x00\x00\xff")) + c.Assert(key.TableID(), Equals, int64(0)) + + key = EncodeBytes([]byte("t\x80\x00\x00\x00\x00\x00\xff")) + c.Assert(key.TableID(), Equals, int64(0)) +} diff --git a/scheduler/pkg/etcdutil/etcdutil.go b/scheduler/pkg/etcdutil/etcdutil.go new file mode 100644 index 00000000..6c2265ad --- /dev/null +++ b/scheduler/pkg/etcdutil/etcdutil.go @@ -0,0 +1,141 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdutil + +import ( + "context" + "crypto/tls" + "net/http" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/pkg/types" + "go.uber.org/zap" +) + +const ( + // DefaultDialTimeout is the maximum amount of time a dial will wait for a + // connection to setup. 30s is long enough for most of the network conditions. + DefaultDialTimeout = 30 * time.Second + + // DefaultRequestTimeout 10s is long enough for most of etcd clusters. + DefaultRequestTimeout = 10 * time.Second + + // DefaultSlowRequestTime 1s for the threshold for normal request, for those + // longer then 1s, they are considered as slow requests. + DefaultSlowRequestTime = 1 * time.Second +) + +// ListEtcdMembers returns a list of internal etcd members. +func ListEtcdMembers(client *clientv3.Client) (*clientv3.MemberListResponse, error) { + ctx, cancel := context.WithTimeout(client.Ctx(), DefaultRequestTimeout) + listResp, err := client.MemberList(ctx) + cancel() + return listResp, errors.WithStack(err) +} + +// CheckClusterID checks Etcd's cluster ID, returns an error if mismatch. +// This function will never block even quorum is not satisfied. +func CheckClusterID(localClusterID types.ID, um types.URLsMap, tlsConfig *tls.Config) error { + if len(um) == 0 { + return nil + } + + var peerURLs []string + for _, urls := range um { + peerURLs = append(peerURLs, urls.StringSlice()...) + } + + for _, u := range peerURLs { + trp := &http.Transport{ + TLSClientConfig: tlsConfig, + } + remoteCluster, gerr := etcdserver.GetClusterFromRemotePeers(nil, []string{u}, trp) + trp.CloseIdleConnections() + if gerr != nil { + // Do not return error, because other members may be not ready. + log.Error("failed to get cluster from remote", zap.Error(gerr)) + continue + } + + remoteClusterID := remoteCluster.ID() + if remoteClusterID != localClusterID { + return errors.Errorf("Etcd cluster ID mismatch, expect %d, got %d", localClusterID, remoteClusterID) + } + } + return nil +} + +// EtcdKVGet returns the etcd GetResponse by given key or key prefix +func EtcdKVGet(c *clientv3.Client, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) { + ctx, cancel := context.WithTimeout(c.Ctx(), DefaultRequestTimeout) + defer cancel() + + start := time.Now() + resp, err := clientv3.NewKV(c).Get(ctx, key, opts...) + if err != nil { + log.Error("load from etcd meet error", zap.Error(err)) + } + if cost := time.Since(start); cost > DefaultSlowRequestTime { + log.Warn("kv gets too slow", zap.String("request-key", key), zap.Duration("cost", cost), zap.Error(err)) + } + + return resp, errors.WithStack(err) +} + +// GetValue gets value with key from etcd. +func GetValue(c *clientv3.Client, key string, opts ...clientv3.OpOption) ([]byte, error) { + resp, err := get(c, key, opts...) + if err != nil { + return nil, err + } + if resp == nil { + return nil, nil + } + return resp.Kvs[0].Value, nil +} + +func get(c *clientv3.Client, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) { + resp, err := EtcdKVGet(c, key, opts...) + if err != nil { + return nil, err + } + + if n := len(resp.Kvs); n == 0 { + return nil, nil + } else if n > 1 { + return nil, errors.Errorf("invalid get value resp %v, must only one", resp.Kvs) + } + return resp, nil +} + +// GetProtoMsgWithModRev returns boolean to indicate whether the key exists or not. +func GetProtoMsgWithModRev(c *clientv3.Client, key string, msg proto.Message, opts ...clientv3.OpOption) (bool, int64, error) { + resp, err := get(c, key, opts...) + if err != nil { + return false, 0, err + } + if resp == nil { + return false, 0, nil + } + value := resp.Kvs[0].Value + if err = proto.Unmarshal(value, msg); err != nil { + return false, 0, errors.WithStack(err) + } + return true, resp.Kvs[0].ModRevision, nil +} diff --git a/scheduler/pkg/etcdutil/etcdutil_test.go b/scheduler/pkg/etcdutil/etcdutil_test.go new file mode 100644 index 00000000..c78f36d0 --- /dev/null +++ b/scheduler/pkg/etcdutil/etcdutil_test.go @@ -0,0 +1,110 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdutil + +import ( + "context" + "fmt" + "io/ioutil" + "net/url" + "os" + "testing" + + "github.com/pingcap-incubator/tinykv/scheduler/pkg/tempurl" + . "github.com/pingcap/check" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/embed" +) + +func Test(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testEtcdutilSuite{}) + +type testEtcdutilSuite struct { +} + +func newTestSingleConfig() *embed.Config { + cfg := embed.NewConfig() + cfg.Name = "test_etcd" + cfg.Dir, _ = ioutil.TempDir("/tmp", "test_etcd") + cfg.WalDir = "" + cfg.Logger = "zap" + cfg.LogOutputs = []string{"stdout"} + + pu, _ := url.Parse(tempurl.Alloc()) + cfg.LPUrls = []url.URL{*pu} + cfg.APUrls = cfg.LPUrls + cu, _ := url.Parse(tempurl.Alloc()) + cfg.LCUrls = []url.URL{*cu} + cfg.ACUrls = cfg.LCUrls + + cfg.StrictReconfigCheck = false + cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, &cfg.LPUrls[0]) + cfg.ClusterState = embed.ClusterStateFlagNew + return cfg +} + +func cleanConfig(cfg *embed.Config) { + // Clean data directory + os.RemoveAll(cfg.Dir) +} + +func (s *testEtcdutilSuite) TestEtcdKVGet(c *C) { + cfg := newTestSingleConfig() + etcd, err := embed.StartEtcd(cfg) + c.Assert(err, IsNil) + + ep := cfg.LCUrls[0].String() + client, err := clientv3.New(clientv3.Config{ + Endpoints: []string{ep}, + }) + c.Assert(err, IsNil) + + <-etcd.Server.ReadyNotify() + + keys := []string{"test/key1", "test/key2", "test/key3", "test/key4", "test/key5"} + vals := []string{"val1", "val2", "val3", "val4", "val5"} + + kv := clientv3.NewKV(client) + for i := range keys { + _, err = kv.Put(context.TODO(), keys[i], vals[i]) + c.Assert(err, IsNil) + } + + // Test simple point get + resp, err := EtcdKVGet(client, "test/key1") + c.Assert(err, IsNil) + c.Assert(string(resp.Kvs[0].Value), Equals, "val1") + + // Test range get + withRange := clientv3.WithRange("test/zzzz") + withLimit := clientv3.WithLimit(3) + resp, err = EtcdKVGet(client, "test/", withRange, withLimit, clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)) + c.Assert(err, IsNil) + c.Assert(len(resp.Kvs), Equals, 3) + + for i := range resp.Kvs { + c.Assert(string(resp.Kvs[i].Key), Equals, keys[i]) + c.Assert(string(resp.Kvs[i].Value), Equals, vals[i]) + } + + lastKey := string(resp.Kvs[len(resp.Kvs)-1].Key) + next := clientv3.GetPrefixRangeEnd(lastKey) + resp, err = EtcdKVGet(client, next, withRange, withLimit, clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)) + c.Assert(err, IsNil) + c.Assert(len(resp.Kvs), Equals, 2) + cleanConfig(cfg) +} diff --git a/scheduler/pkg/grpcutil/grpcutil.go b/scheduler/pkg/grpcutil/grpcutil.go new file mode 100644 index 00000000..05d90901 --- /dev/null +++ b/scheduler/pkg/grpcutil/grpcutil.go @@ -0,0 +1,69 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcutil + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net/url" + + "github.com/pkg/errors" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// GetClientConn returns a gRPC client connection. +func GetClientConn(addr string, caPath string, certPath string, keyPath string) (*grpc.ClientConn, error) { + opt := grpc.WithInsecure() + if len(caPath) != 0 { + var certificates []tls.Certificate + if len(certPath) != 0 && len(keyPath) != 0 { + // Load the client certificates from disk + certificate, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, errors.Errorf("could not load client key pair: %s", err) + } + certificates = append(certificates, certificate) + } + + // Create a certificate pool from the certificate authority + certPool := x509.NewCertPool() + ca, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, errors.Errorf("could not read ca certificate: %s", err) + } + + // Append the certificates from the CA + if !certPool.AppendCertsFromPEM(ca) { + return nil, errors.New("failed to append ca certs") + } + + creds := credentials.NewTLS(&tls.Config{ + Certificates: certificates, + RootCAs: certPool, + }) + + opt = grpc.WithTransportCredentials(creds) + } + u, err := url.Parse(addr) + if err != nil { + return nil, errors.WithStack(err) + } + cc, err := grpc.Dial(u.Host, opt) + if err != nil { + return nil, errors.WithStack(err) + } + return cc, nil +} diff --git a/scheduler/pkg/logutil/log.go b/scheduler/pkg/logutil/log.go new file mode 100644 index 00000000..15141cff --- /dev/null +++ b/scheduler/pkg/logutil/log.go @@ -0,0 +1,296 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "bytes" + "fmt" + "os" + "path" + "runtime" + "strings" + "sync" + + "github.com/coreos/pkg/capnslog" + zaplog "github.com/pingcap/log" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "go.etcd.io/etcd/raft" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "google.golang.org/grpc/grpclog" + lumberjack "gopkg.in/natefinch/lumberjack.v2" +) + +const ( + defaultLogTimeFormat = "2006/01/02 15:04:05.000" + defaultLogMaxSize = 300 // MB + defaultLogFormat = "text" + defaultLogLevel = log.InfoLevel +) + +// FileLogConfig serializes file log related config in toml/json. +type FileLogConfig struct { + // Log filename, leave empty to disable file log. + Filename string `toml:"filename" json:"filename"` + // Is log rotate enabled. TODO. + LogRotate bool `toml:"log-rotate" json:"log-rotate"` + // Max size for a single file, in MB. + MaxSize int `toml:"max-size" json:"max-size"` + // Max log keep days, default is never deleting. + MaxDays int `toml:"max-days" json:"max-days"` + // Maximum number of old log files to retain. + MaxBackups int `toml:"max-backups" json:"max-backups"` +} + +// LogConfig serializes log related config in toml/json. +type LogConfig struct { + // Log level. + Level string `toml:"level" json:"level"` + // Log format. one of json, text, or console. + Format string `toml:"format" json:"format"` + // Disable automatic timestamps in output. + DisableTimestamp bool `toml:"disable-timestamp" json:"disable-timestamp"` + // File log config. + File FileLogConfig `toml:"file" json:"file"` +} + +// redirectFormatter will redirect etcd logs to logrus logs. +type redirectFormatter struct{} + +// Format implements capnslog.Formatter hook. +func (rf *redirectFormatter) Format(pkg string, level capnslog.LogLevel, depth int, entries ...interface{}) { + if pkg != "" { + pkg = fmt.Sprint(pkg, ": ") + } + + logStr := fmt.Sprint(pkg, entries) + + switch level { + case capnslog.CRITICAL: + log.Fatalf(logStr) + case capnslog.ERROR: + log.Errorf(logStr) + case capnslog.WARNING: + log.Warningf(logStr) + case capnslog.NOTICE: + log.Infof(logStr) + case capnslog.INFO: + log.Infof(logStr) + case capnslog.DEBUG, capnslog.TRACE: + log.Debugf(logStr) + } +} + +// Flush only for implementing Formatter. +func (rf *redirectFormatter) Flush() {} + +// isSKippedPackageName tests wether path name is on log library calling stack. +func isSkippedPackageName(name string) bool { + return strings.Contains(name, "github.com/sirupsen/logrus") || + strings.Contains(name, "github.com/coreos/pkg/capnslog") +} + +// modifyHook injects file name and line pos into log entry. +type contextHook struct{} + +// Fire implements logrus.Hook interface +// https://github.com/sirupsen/logrus/issues/63 +func (hook *contextHook) Fire(entry *log.Entry) error { + pc := make([]uintptr, 4) + cnt := runtime.Callers(6, pc) + + for i := 0; i < cnt; i++ { + fu := runtime.FuncForPC(pc[i] - 1) + name := fu.Name() + if !isSkippedPackageName(name) { + file, line := fu.FileLine(pc[i] - 1) + entry.Data["file"] = path.Base(file) + entry.Data["line"] = line + break + } + } + return nil +} + +// Levels implements logrus.Hook interface. +func (hook *contextHook) Levels() []log.Level { + return log.AllLevels +} + +// StringToLogLevel translates log level string to log level. +func StringToLogLevel(level string) log.Level { + switch strings.ToLower(level) { + case "fatal": + return log.FatalLevel + case "error": + return log.ErrorLevel + case "warn", "warning": + return log.WarnLevel + case "debug": + return log.DebugLevel + case "info": + return log.InfoLevel + } + return defaultLogLevel +} + +// StringToZapLogLevel translates log level string to log level. +func StringToZapLogLevel(level string) zapcore.Level { + switch strings.ToLower(level) { + case "fatal": + return zapcore.FatalLevel + case "error": + return zapcore.ErrorLevel + case "warn", "warning": + return zapcore.WarnLevel + case "debug": + return zapcore.DebugLevel + case "info": + return zapcore.InfoLevel + } + return zapcore.InfoLevel +} + +// textFormatter is for compatibility with ngaut/log +type textFormatter struct { + DisableTimestamp bool +} + +// Format implements logrus.Formatter +func (f *textFormatter) Format(entry *log.Entry) ([]byte, error) { + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + if !f.DisableTimestamp { + fmt.Fprintf(b, "%s ", entry.Time.Format(defaultLogTimeFormat)) + } + if file, ok := entry.Data["file"]; ok { + fmt.Fprintf(b, "%s:%v:", file, entry.Data["line"]) + } + fmt.Fprintf(b, " [%s] %s", entry.Level.String(), entry.Message) + for k, v := range entry.Data { + if k != "file" && k != "line" { + fmt.Fprintf(b, " %v=%v", k, v) + } + } + b.WriteByte('\n') + return b.Bytes(), nil +} + +// StringToLogFormatter uses the different log formatter according to a given format name. +func StringToLogFormatter(format string, disableTimestamp bool) log.Formatter { + switch strings.ToLower(format) { + case "text": + return &textFormatter{ + DisableTimestamp: disableTimestamp, + } + case "json": + return &log.JSONFormatter{ + TimestampFormat: defaultLogTimeFormat, + DisableTimestamp: disableTimestamp, + } + case "console": + return &log.TextFormatter{ + FullTimestamp: true, + TimestampFormat: defaultLogTimeFormat, + DisableTimestamp: disableTimestamp, + } + default: + return &textFormatter{} + } +} + +// InitFileLog initializes file based logging options. +func InitFileLog(cfg *zaplog.FileLogConfig) error { + if st, err := os.Stat(cfg.Filename); err == nil { + if st.IsDir() { + return errors.New("can't use directory as log file name") + } + } + if cfg.MaxSize == 0 { + cfg.MaxSize = defaultLogMaxSize + } + + // use lumberjack to logrotate + output := &lumberjack.Logger{ + Filename: cfg.Filename, + MaxSize: cfg.MaxSize, + MaxBackups: cfg.MaxBackups, + MaxAge: cfg.MaxDays, + LocalTime: true, + } + + log.SetOutput(output) + return nil +} + +type wrapLogrus struct { + *log.Logger +} + +// V provides the functionality that returns whether a particular log level is at +// least l - this is needed to meet the LoggerV2 interface. GRPC's logging levels +// are: https://github.com/grpc/grpc-go/blob/master/grpclog/loggerv2.go#L71 +// 0=info, 1=warning, 2=error, 3=fatal +// logrus's are: https://github.com/sirupsen/logrus/blob/master/logrus.go +// 0=panic, 1=fatal, 2=error, 3=warn, 4=info, 5=debug +func (lg *wrapLogrus) V(l int) bool { + // translate to logrus level + logrusLevel := 4 - l + return int(lg.Logger.Level) <= logrusLevel +} + +var once sync.Once + +// InitLogger initializes PD's logger. +func InitLogger(cfg *zaplog.Config) error { + var err error + + once.Do(func() { + log.SetLevel(StringToLogLevel(cfg.Level)) + log.AddHook(&contextHook{}) + + if cfg.Format == "" { + cfg.Format = defaultLogFormat + } + log.SetFormatter(StringToLogFormatter(cfg.Format, cfg.DisableTimestamp)) + + // etcd log + capnslog.SetFormatter(&redirectFormatter{}) + // grpc log + lg := &wrapLogrus{log.StandardLogger()} + grpclog.SetLoggerV2(lg) + // raft log + raft.SetLogger(lg) + + if len(cfg.File.Filename) == 0 { + return + } + + err = InitFileLog(&cfg.File) + }) + return err +} + +// LogPanic logs the panic reason and stack, then exit the process. +// Commonly used with a `defer`. +func LogPanic() { + if e := recover(); e != nil { + zaplog.Fatal("panic", zap.Reflect("recover", e)) + } +} diff --git a/scheduler/pkg/logutil/log_test.go b/scheduler/pkg/logutil/log_test.go new file mode 100644 index 00000000..f5625f55 --- /dev/null +++ b/scheduler/pkg/logutil/log_test.go @@ -0,0 +1,111 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "bytes" + "strings" + "testing" + + "github.com/coreos/pkg/capnslog" + . "github.com/pingcap/check" + zaplog "github.com/pingcap/log" + log "github.com/sirupsen/logrus" + "go.uber.org/zap/zapcore" +) + +const ( + logPattern = `\d\d\d\d/\d\d/\d\d \d\d:\d\d:\d\d\.\d\d\d ([\w_%!$@.,+~-]+|\\.)+:\d+: \[(fatal|error|warning|info|debug)\] .*?\n` +) + +func Test(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testLogSuite{}) + +type testLogSuite struct { + buf *bytes.Buffer +} + +func (s *testLogSuite) SetUpSuite(c *C) { + s.buf = &bytes.Buffer{} +} + +func (s *testLogSuite) TestStringToLogLevel(c *C) { + c.Assert(StringToLogLevel("fatal"), Equals, log.FatalLevel) + c.Assert(StringToLogLevel("ERROR"), Equals, log.ErrorLevel) + c.Assert(StringToLogLevel("warn"), Equals, log.WarnLevel) + c.Assert(StringToLogLevel("warning"), Equals, log.WarnLevel) + c.Assert(StringToLogLevel("debug"), Equals, log.DebugLevel) + c.Assert(StringToLogLevel("info"), Equals, log.InfoLevel) + c.Assert(StringToLogLevel("whatever"), Equals, log.InfoLevel) +} + +func (s *testLogSuite) TestStringToZapLogLevel(c *C) { + c.Assert(StringToZapLogLevel("fatal"), Equals, zapcore.FatalLevel) + c.Assert(StringToZapLogLevel("ERROR"), Equals, zapcore.ErrorLevel) + c.Assert(StringToZapLogLevel("warn"), Equals, zapcore.WarnLevel) + c.Assert(StringToZapLogLevel("warning"), Equals, zapcore.WarnLevel) + c.Assert(StringToZapLogLevel("debug"), Equals, zapcore.DebugLevel) + c.Assert(StringToZapLogLevel("info"), Equals, zapcore.InfoLevel) + c.Assert(StringToZapLogLevel("whatever"), Equals, zapcore.InfoLevel) +} + +func (s *testLogSuite) TestStringToLogFormatter(c *C) { + c.Assert(StringToLogFormatter("text", true), DeepEquals, &textFormatter{ + DisableTimestamp: true, + }) + c.Assert(StringToLogFormatter("json", true), DeepEquals, &log.JSONFormatter{ + DisableTimestamp: true, + TimestampFormat: defaultLogTimeFormat, + }) + c.Assert(StringToLogFormatter("console", true), DeepEquals, &log.TextFormatter{ + DisableTimestamp: true, + FullTimestamp: true, + TimestampFormat: defaultLogTimeFormat, + }) + c.Assert(StringToLogFormatter("", true), DeepEquals, &textFormatter{}) +} + +// TestLogging assure log format and log redirection works. +func (s *testLogSuite) TestLogging(c *C) { + conf := &zaplog.Config{Level: "warn", File: zaplog.FileLogConfig{}} + c.Assert(InitLogger(conf), IsNil) + + log.SetOutput(s.buf) + + tlog := capnslog.NewPackageLogger("github.com/pingcap-incubator/tinykv/scheduler/pkg/logutil", "test") + + tlog.Infof("[this message should not be sent to buf]") + c.Assert(s.buf.Len(), Equals, 0) + + tlog.Warningf("[this message should be sent to buf]") + entry, err := s.buf.ReadString('\n') + c.Assert(err, IsNil) + c.Assert(entry, Matches, logPattern) + // All capnslog log will be trigered in logutil/log.go + c.Assert(strings.Contains(entry, "log.go"), IsTrue) + + log.Warnf("this message comes from logrus") + entry, err = s.buf.ReadString('\n') + c.Assert(err, IsNil) + c.Assert(entry, Matches, logPattern) + c.Assert(strings.Contains(entry, "log_test.go"), IsTrue) +} + +func (s *testLogSuite) TestFileLog(c *C) { + c.Assert(InitFileLog(&zaplog.FileLogConfig{Filename: "/tmp"}), NotNil) + c.Assert(InitFileLog(&zaplog.FileLogConfig{Filename: "/tmp/test_file_log", MaxSize: 0}), IsNil) +} diff --git a/scheduler/pkg/mock/mockcluster/mockcluster.go b/scheduler/pkg/mock/mockcluster/mockcluster.go new file mode 100644 index 00000000..1ce32990 --- /dev/null +++ b/scheduler/pkg/mock/mockcluster/mockcluster.go @@ -0,0 +1,353 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mockcluster + +import ( + "fmt" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockid" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockoption" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap/log" + "go.uber.org/zap" +) + +// Cluster is used to mock clusterInfo for test use. +type Cluster struct { + *core.BasicCluster + *mockid.IDAllocator + *mockoption.ScheduleOptions + ID uint64 +} + +// NewCluster creates a new Cluster +func NewCluster(opt *mockoption.ScheduleOptions) *Cluster { + return &Cluster{ + BasicCluster: core.NewBasicCluster(), + IDAllocator: mockid.NewIDAllocator(), + ScheduleOptions: opt, + } +} + +func (mc *Cluster) allocID() (uint64, error) { + return mc.Alloc() +} + +// ScanRegions scans region with start key, until number greater than limit. +func (mc *Cluster) ScanRegions(startKey, endKey []byte, limit int) []*core.RegionInfo { + return mc.Regions.ScanRange(startKey, endKey, limit) +} + +// LoadRegion puts region info without leader +func (mc *Cluster) LoadRegion(regionID uint64, followerIds ...uint64) { + // regions load from etcd will have no leader + r := mc.newMockRegionInfo(regionID, 0, followerIds...).Clone(core.WithLeader(nil)) + mc.PutRegion(r) +} + +// GetStoreRegionCount gets region count with a given store. +func (mc *Cluster) GetStoreRegionCount(storeID uint64) int { + return mc.Regions.GetStoreRegionCount(storeID) +} + +// GetStore gets a store with a given store ID. +func (mc *Cluster) GetStore(storeID uint64) *core.StoreInfo { + return mc.Stores.GetStore(storeID) +} + +// AllocPeer allocs a new peer on a store. +func (mc *Cluster) AllocPeer(storeID uint64) (*metapb.Peer, error) { + peerID, err := mc.allocID() + if err != nil { + log.Error("failed to alloc peer", zap.Error(err)) + return nil, err + } + peer := &metapb.Peer{ + Id: peerID, + StoreId: storeID, + } + return peer, nil +} + +// SetStoreUp sets store state to be up. +func (mc *Cluster) SetStoreUp(storeID uint64) { + store := mc.GetStore(storeID) + newStore := store.Clone( + core.SetStoreState(metapb.StoreState_Up), + core.SetLastHeartbeatTS(time.Now()), + ) + mc.PutStore(newStore) +} + +// SetStoreDisconnect changes a store's state to disconnected. +func (mc *Cluster) SetStoreDisconnect(storeID uint64) { + store := mc.GetStore(storeID) + newStore := store.Clone( + core.SetStoreState(metapb.StoreState_Up), + core.SetLastHeartbeatTS(time.Now().Add(-time.Second*30)), + ) + mc.PutStore(newStore) +} + +// SetStoreDown sets store down. +func (mc *Cluster) SetStoreDown(storeID uint64) { + store := mc.GetStore(storeID) + newStore := store.Clone( + core.SetStoreState(metapb.StoreState_Up), + core.SetLastHeartbeatTS(time.Time{}), + ) + mc.PutStore(newStore) +} + +// SetStoreOffline sets store state to be offline. +func (mc *Cluster) SetStoreOffline(storeID uint64) { + store := mc.GetStore(storeID) + newStore := store.Clone(core.SetStoreState(metapb.StoreState_Offline)) + mc.PutStore(newStore) +} + +// SetStoreBusy sets store busy. +func (mc *Cluster) SetStoreBusy(storeID uint64, busy bool) { + store := mc.GetStore(storeID) + newStats := proto.Clone(store.GetStoreStats()).(*schedulerpb.StoreStats) + newStats.IsBusy = busy + newStore := store.Clone( + core.SetStoreStats(newStats), + core.SetLastHeartbeatTS(time.Now()), + ) + mc.PutStore(newStore) +} + +// AddLeaderStore adds store with specified count of leader. +func (mc *Cluster) AddLeaderStore(storeID uint64, leaderCount int, leaderSizes ...int64) { + stats := &schedulerpb.StoreStats{} + stats.Capacity = 1000 * (1 << 20) + stats.Available = stats.Capacity - uint64(leaderCount)*10 + var leaderSize int64 + if len(leaderSizes) != 0 { + leaderSize = leaderSizes[0] + } else { + leaderSize = int64(leaderCount) * 10 + } + + store := core.NewStoreInfo( + &metapb.Store{Id: storeID}, + core.SetStoreStats(stats), + core.SetLeaderCount(leaderCount), + core.SetLeaderSize(leaderSize), + core.SetLastHeartbeatTS(time.Now()), + ) + mc.PutStore(store) +} + +// AddRegionStore adds store with specified count of region. +func (mc *Cluster) AddRegionStore(storeID uint64, regionCount int) { + stats := &schedulerpb.StoreStats{} + stats.Capacity = 1000 * (1 << 20) + stats.Available = stats.Capacity - uint64(regionCount)*10 + store := core.NewStoreInfo( + &metapb.Store{Id: storeID}, + core.SetStoreStats(stats), + core.SetRegionCount(regionCount), + core.SetRegionSize(int64(regionCount)*10), + core.SetLastHeartbeatTS(time.Now()), + ) + mc.PutStore(store) +} + +// AddLeaderRegion adds region with specified leader and followers. +func (mc *Cluster) AddLeaderRegion(regionID uint64, leaderID uint64, followerIds ...uint64) { + origin := mc.newMockRegionInfo(regionID, leaderID, followerIds...) + region := origin.Clone(core.SetApproximateSize(10)) + mc.PutRegion(region) +} + +// AddLeaderRegionWithRange adds region with specified leader, followers and key range. +func (mc *Cluster) AddLeaderRegionWithRange(regionID uint64, startKey string, endKey string, leaderID uint64, followerIds ...uint64) { + o := mc.newMockRegionInfo(regionID, leaderID, followerIds...) + r := o.Clone( + core.WithStartKey([]byte(startKey)), + core.WithEndKey([]byte(endKey)), + ) + mc.PutRegion(r) +} + +// UpdateStoreLeaderWeight updates store leader weight. +func (mc *Cluster) UpdateStoreLeaderWeight(storeID uint64, weight float64) { + store := mc.GetStore(storeID) + newStore := store.Clone(core.SetLeaderWeight(weight)) + mc.PutStore(newStore) +} + +// UpdateStoreRegionWeight updates store region weight. +func (mc *Cluster) UpdateStoreRegionWeight(storeID uint64, weight float64) { + store := mc.GetStore(storeID) + newStore := store.Clone(core.SetRegionWeight(weight)) + mc.PutStore(newStore) +} + +// UpdateStoreLeaderSize updates store leader size. +func (mc *Cluster) UpdateStoreLeaderSize(storeID uint64, size int64) { + store := mc.GetStore(storeID) + newStats := proto.Clone(store.GetStoreStats()).(*schedulerpb.StoreStats) + newStats.Available = newStats.Capacity - uint64(store.GetLeaderSize()) + newStore := store.Clone( + core.SetStoreStats(newStats), + core.SetLeaderSize(size), + ) + mc.PutStore(newStore) +} + +// UpdateStoreRegionSize updates store region size. +func (mc *Cluster) UpdateStoreRegionSize(storeID uint64, size int64) { + store := mc.GetStore(storeID) + newStats := proto.Clone(store.GetStoreStats()).(*schedulerpb.StoreStats) + newStats.Available = newStats.Capacity - uint64(store.GetRegionSize()) + newStore := store.Clone( + core.SetStoreStats(newStats), + core.SetRegionSize(size), + ) + mc.PutStore(newStore) +} + +// UpdateLeaderCount updates store leader count. +func (mc *Cluster) UpdateLeaderCount(storeID uint64, leaderCount int) { + store := mc.GetStore(storeID) + newStore := store.Clone( + core.SetLeaderCount(leaderCount), + core.SetLeaderSize(int64(leaderCount)*10), + ) + mc.PutStore(newStore) +} + +// UpdateRegionCount updates store region count. +func (mc *Cluster) UpdateRegionCount(storeID uint64, regionCount int) { + store := mc.GetStore(storeID) + newStore := store.Clone( + core.SetRegionCount(regionCount), + core.SetRegionSize(int64(regionCount)*10), + ) + mc.PutStore(newStore) +} + +// UpdateSnapshotCount updates store snapshot count. +func (mc *Cluster) UpdateSnapshotCount(storeID uint64, snapshotCount int) { + store := mc.GetStore(storeID) + newStats := proto.Clone(store.GetStoreStats()).(*schedulerpb.StoreStats) + newStats.ApplyingSnapCount = uint32(snapshotCount) + newStore := store.Clone(core.SetStoreStats(newStats)) + mc.PutStore(newStore) +} + +// UpdatePendingPeerCount updates store pending peer count. +func (mc *Cluster) UpdatePendingPeerCount(storeID uint64, pendingPeerCount int) { + store := mc.GetStore(storeID) + newStore := store.Clone(core.SetPendingPeerCount(pendingPeerCount)) + mc.PutStore(newStore) +} + +// UpdateStorageRatio updates store storage ratio count. +func (mc *Cluster) UpdateStorageRatio(storeID uint64, usedRatio, availableRatio float64) { + store := mc.GetStore(storeID) + newStats := proto.Clone(store.GetStoreStats()).(*schedulerpb.StoreStats) + newStats.Capacity = 1000 * (1 << 20) + newStats.UsedSize = uint64(float64(newStats.Capacity) * usedRatio) + newStats.Available = uint64(float64(newStats.Capacity) * availableRatio) + newStore := store.Clone(core.SetStoreStats(newStats)) + mc.PutStore(newStore) +} + +// UpdateStoreStatus updates store status. +func (mc *Cluster) UpdateStoreStatus(id uint64) { + leaderCount := mc.Regions.GetStoreLeaderCount(id) + regionCount := mc.Regions.GetStoreRegionCount(id) + pendingPeerCount := mc.Regions.GetStorePendingPeerCount(id) + leaderSize := mc.Regions.GetStoreLeaderRegionSize(id) + regionSize := mc.Regions.GetStoreRegionSize(id) + store := mc.Stores.GetStore(id) + stats := &schedulerpb.StoreStats{} + stats.Capacity = 1000 * (1 << 20) + stats.Available = stats.Capacity - uint64(store.GetRegionSize()) + stats.UsedSize = uint64(store.GetRegionSize()) + newStore := store.Clone( + core.SetStoreStats(stats), + core.SetLeaderCount(leaderCount), + core.SetRegionCount(regionCount), + core.SetPendingPeerCount(pendingPeerCount), + core.SetLeaderSize(leaderSize), + core.SetRegionSize(regionSize), + ) + mc.PutStore(newStore) +} + +func (mc *Cluster) newMockRegionInfo(regionID uint64, leaderID uint64, followerIDs ...uint64) *core.RegionInfo { + return mc.MockRegionInfo(regionID, leaderID, followerIDs, nil) +} + +// GetOpt mocks method. +func (mc *Cluster) GetOpt() *mockoption.ScheduleOptions { + return mc.ScheduleOptions +} + +// GetLeaderScheduleLimit mocks method. +func (mc *Cluster) GetLeaderScheduleLimit() uint64 { + return mc.ScheduleOptions.GetLeaderScheduleLimit() +} + +// GetRegionScheduleLimit mocks method. +func (mc *Cluster) GetRegionScheduleLimit() uint64 { + return mc.ScheduleOptions.GetRegionScheduleLimit() +} + +// GetReplicaScheduleLimit mocks method. +func (mc *Cluster) GetReplicaScheduleLimit() uint64 { + return mc.ScheduleOptions.GetReplicaScheduleLimit() +} + +// GetMaxReplicas mocks method. +func (mc *Cluster) GetMaxReplicas() int { + return mc.ScheduleOptions.GetMaxReplicas() +} + +// PutRegionStores mocks method. +func (mc *Cluster) PutRegionStores(id uint64, stores ...uint64) { + meta := &metapb.Region{Id: id} + for _, s := range stores { + meta.Peers = append(meta.Peers, &metapb.Peer{StoreId: s}) + } + mc.PutRegion(core.NewRegionInfo(meta, &metapb.Peer{StoreId: stores[0]})) +} + +// MockRegionInfo returns a mock region +func (mc *Cluster) MockRegionInfo(regionID uint64, leaderID uint64, + followerIDs []uint64, epoch *metapb.RegionEpoch) *core.RegionInfo { + + region := &metapb.Region{ + Id: regionID, + StartKey: []byte(fmt.Sprintf("%20d", regionID)), + EndKey: []byte(fmt.Sprintf("%20d", regionID+1)), + RegionEpoch: epoch, + } + leader, _ := mc.AllocPeer(leaderID) + region.Peers = []*metapb.Peer{leader} + for _, id := range followerIDs { + peer, _ := mc.AllocPeer(id) + region.Peers = append(region.Peers, peer) + } + return core.NewRegionInfo(region, leader) +} diff --git a/scheduler/pkg/mock/mockhbstream/mockhbstream.go b/scheduler/pkg/mock/mockhbstream/mockhbstream.go new file mode 100644 index 00000000..4f48149a --- /dev/null +++ b/scheduler/pkg/mock/mockhbstream/mockhbstream.go @@ -0,0 +1,103 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mockhbstream + +import ( + "context" + "errors" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" +) + +// HeartbeatStream is used to mock HeartbeatStream for test use. +type HeartbeatStream struct { + ch chan *schedulerpb.RegionHeartbeatResponse +} + +// NewHeartbeatStream creates a new HeartbeatStream. +func NewHeartbeatStream() HeartbeatStream { + return HeartbeatStream{ + ch: make(chan *schedulerpb.RegionHeartbeatResponse), + } +} + +// Send mocks method. +func (s HeartbeatStream) Send(m *schedulerpb.RegionHeartbeatResponse) error { + select { + case <-time.After(time.Second): + return errors.New("timeout") + case s.ch <- m: + } + return nil +} + +// SendMsg is used to send the message. +func (s HeartbeatStream) SendMsg(region *core.RegionInfo, msg *schedulerpb.RegionHeartbeatResponse) { + return +} + +// Recv mocks method. +func (s HeartbeatStream) Recv() *schedulerpb.RegionHeartbeatResponse { + select { + case <-time.After(time.Millisecond * 10): + return nil + case res := <-s.ch: + return res + } +} + +// HeartbeatStreams is used to mock heartbeatstreams for test use. +type HeartbeatStreams struct { + ctx context.Context + cancel context.CancelFunc + clusterID uint64 + msgCh chan *schedulerpb.RegionHeartbeatResponse +} + +// NewHeartbeatStreams creates a new HeartbeatStreams. +func NewHeartbeatStreams(clusterID uint64) *HeartbeatStreams { + ctx, cancel := context.WithCancel(context.Background()) + hs := &HeartbeatStreams{ + ctx: ctx, + cancel: cancel, + clusterID: clusterID, + msgCh: make(chan *schedulerpb.RegionHeartbeatResponse, 1024), + } + return hs +} + +// SendMsg is used to send the message. +func (mhs *HeartbeatStreams) SendMsg(region *core.RegionInfo, msg *schedulerpb.RegionHeartbeatResponse) { + if region.GetLeader() == nil { + return + } + + msg.Header = &schedulerpb.ResponseHeader{ClusterId: mhs.clusterID} + msg.RegionId = region.GetID() + msg.RegionEpoch = region.GetRegionEpoch() + msg.TargetPeer = region.GetLeader() + + select { + case mhs.msgCh <- msg: + case <-mhs.ctx.Done(): + } +} + +// MsgCh returns the internal channel which contains the heartbeat responses +// from PD. It can be used to inspect the content of a PD response +func (mhs *HeartbeatStreams) MsgCh() chan *schedulerpb.RegionHeartbeatResponse { + return mhs.msgCh +} diff --git a/scheduler/pkg/mock/mockid/mockid.go b/scheduler/pkg/mock/mockid/mockid.go new file mode 100644 index 00000000..6357bd9c --- /dev/null +++ b/scheduler/pkg/mock/mockid/mockid.go @@ -0,0 +1,31 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mockid + +import "sync/atomic" + +// IDAllocator mocks IDAllocator and it is only used for test. +type IDAllocator struct { + base uint64 +} + +// NewIDAllocator creates a new IDAllocator. +func NewIDAllocator() *IDAllocator { + return &IDAllocator{base: 0} +} + +// Alloc returns a new id. +func (alloc *IDAllocator) Alloc() (uint64, error) { + return atomic.AddUint64(&alloc.base, 1), nil +} diff --git a/scheduler/pkg/mock/mockoption/mockoption.go b/scheduler/pkg/mock/mockoption/mockoption.go new file mode 100644 index 00000000..ebb42b3f --- /dev/null +++ b/scheduler/pkg/mock/mockoption/mockoption.go @@ -0,0 +1,99 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mockoption + +import ( + "time" +) + +const ( + defaultMaxReplicas = 3 + defaultMaxSnapshotCount = 3 + defaultMaxPendingPeerCount = 16 + defaultMaxMergeRegionSize = 0 + defaultMaxMergeRegionKeys = 0 + defaultMaxStoreDownTime = 30 * time.Minute + defaultLeaderScheduleLimit = 4 + defaultRegionScheduleLimit = 64 + defaultReplicaScheduleLimit = 64 +) + +// ScheduleOptions is a mock of ScheduleOptions +// which implements Options interface +type ScheduleOptions struct { + RegionScheduleLimit uint64 + LeaderScheduleLimit uint64 + ReplicaScheduleLimit uint64 + MaxSnapshotCount uint64 + MaxPendingPeerCount uint64 + MaxMergeRegionSize uint64 + MaxMergeRegionKeys uint64 + MaxStoreDownTime time.Duration + MaxReplicas int +} + +// NewScheduleOptions creates a mock schedule option. +func NewScheduleOptions() *ScheduleOptions { + mso := &ScheduleOptions{} + mso.RegionScheduleLimit = defaultRegionScheduleLimit + mso.LeaderScheduleLimit = defaultLeaderScheduleLimit + mso.ReplicaScheduleLimit = defaultReplicaScheduleLimit + mso.MaxSnapshotCount = defaultMaxSnapshotCount + mso.MaxMergeRegionSize = defaultMaxMergeRegionSize + mso.MaxMergeRegionKeys = defaultMaxMergeRegionKeys + mso.MaxStoreDownTime = defaultMaxStoreDownTime + mso.MaxReplicas = defaultMaxReplicas + mso.MaxPendingPeerCount = defaultMaxPendingPeerCount + return mso +} + +// GetLeaderScheduleLimit mocks method +func (mso *ScheduleOptions) GetLeaderScheduleLimit() uint64 { + return mso.LeaderScheduleLimit +} + +// GetRegionScheduleLimit mocks method +func (mso *ScheduleOptions) GetRegionScheduleLimit() uint64 { + return mso.RegionScheduleLimit +} + +// GetReplicaScheduleLimit mocks method +func (mso *ScheduleOptions) GetReplicaScheduleLimit() uint64 { + return mso.ReplicaScheduleLimit +} + +// GetMaxMergeRegionSize mocks method +func (mso *ScheduleOptions) GetMaxMergeRegionSize() uint64 { + return mso.MaxMergeRegionSize +} + +// GetMaxMergeRegionKeys mocks method +func (mso *ScheduleOptions) GetMaxMergeRegionKeys() uint64 { + return mso.MaxMergeRegionKeys +} + +// GetMaxStoreDownTime mocks method +func (mso *ScheduleOptions) GetMaxStoreDownTime() time.Duration { + return mso.MaxStoreDownTime +} + +// GetMaxReplicas mocks method +func (mso *ScheduleOptions) GetMaxReplicas() int { + return mso.MaxReplicas +} + +// SetMaxReplicas mocks method +func (mso *ScheduleOptions) SetMaxReplicas(replicas int) { + mso.MaxReplicas = replicas +} diff --git a/scheduler/pkg/slice/slice.go b/scheduler/pkg/slice/slice.go new file mode 100644 index 00000000..0e6f00fc --- /dev/null +++ b/scheduler/pkg/slice/slice.go @@ -0,0 +1,40 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package slice + +import "reflect" + +// AnyOf returns true if any element in the slice matches the predict func. +func AnyOf(s interface{}, p func(int) bool) bool { + l := reflect.ValueOf(s).Len() + for i := 0; i < l; i++ { + if p(i) { + return true + } + } + return false +} + +// NoneOf returns true if no element in the slice matches the predict func. +func NoneOf(s interface{}, p func(int) bool) bool { + return !AnyOf(s, p) +} + +// AllOf returns true if all elements in the slice match the predict func. +func AllOf(s interface{}, p func(int) bool) bool { + np := func(i int) bool { + return !p(i) + } + return NoneOf(s, np) +} diff --git a/scheduler/pkg/slice/slice_test.go b/scheduler/pkg/slice/slice_test.go new file mode 100644 index 00000000..b9a162b5 --- /dev/null +++ b/scheduler/pkg/slice/slice_test.go @@ -0,0 +1,51 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package slice_test + +import ( + "testing" + + "github.com/pingcap-incubator/tinykv/scheduler/pkg/slice" + . "github.com/pingcap/check" +) + +func Test(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testSliceSuite{}) + +type testSliceSuite struct { +} + +func (s *testSliceSuite) Test(c *C) { + tests := []struct { + a []int + anyOf bool + noneOf bool + allOf bool + }{ + {[]int{}, false, true, true}, + {[]int{1, 2, 3}, true, false, false}, + {[]int{1, 3}, false, true, false}, + {[]int{2, 2, 4}, true, false, true}, + } + + for _, t := range tests { + even := func(i int) bool { return t.a[i]%2 == 0 } + c.Assert(slice.AnyOf(t.a, even), Equals, t.anyOf) + c.Assert(slice.NoneOf(t.a, even), Equals, t.noneOf) + c.Assert(slice.AllOf(t.a, even), Equals, t.allOf) + } +} diff --git a/scheduler/pkg/tempurl/tempurl.go b/scheduler/pkg/tempurl/tempurl.go new file mode 100644 index 00000000..41dc1d9c --- /dev/null +++ b/scheduler/pkg/tempurl/tempurl.go @@ -0,0 +1,61 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tempurl + +import ( + "fmt" + "net" + "sync" + "time" + + "github.com/pingcap/log" + "go.uber.org/zap" +) + +var ( + testAddrMutex sync.Mutex + testAddrMap = make(map[string]struct{}) +) + +// Alloc allocates a local URL for testing. +func Alloc() string { + for i := 0; i < 10; i++ { + if u := tryAllocTestURL(); u != "" { + return u + } + time.Sleep(time.Second) + } + log.Fatal("failed to alloc test URL") + return "" +} + +func tryAllocTestURL() string { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + log.Fatal("listen failed", zap.Error(err)) + } + addr := fmt.Sprintf("http://%s", l.Addr()) + err = l.Close() + if err != nil { + log.Fatal("close failed", zap.Error(err)) + } + + testAddrMutex.Lock() + defer testAddrMutex.Unlock() + if _, ok := testAddrMap[addr]; ok { + return "" + } + testAddrMap[addr] = struct{}{} + return addr +} diff --git a/scheduler/pkg/testutil/operator_check.go b/scheduler/pkg/testutil/operator_check.go new file mode 100644 index 00000000..edf66fe5 --- /dev/null +++ b/scheduler/pkg/testutil/operator_check.go @@ -0,0 +1,74 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/operator" + check "github.com/pingcap/check" +) + +// CheckAddPeer checks if the operator is to add peer on specified store. +func CheckAddPeer(c *check.C, op *operator.Operator, kind operator.OpKind, storeID uint64) { + c.Assert(op, check.NotNil) + c.Assert(op.Len(), check.Equals, 1) + c.Assert(op.Step(0).(operator.AddPeer).ToStore, check.Equals, storeID) + kind |= operator.OpRegion + c.Assert(op.Kind()&kind, check.Equals, kind) +} + +// CheckRemovePeer checks if the operator is to remove peer on specified store. +func CheckRemovePeer(c *check.C, op *operator.Operator, storeID uint64) { + if op.Len() == 1 { + c.Assert(op.Step(0).(operator.RemovePeer).FromStore, check.Equals, storeID) + } else { + c.Assert(op.Len(), check.Equals, 2) + c.Assert(op.Step(0).(operator.TransferLeader).FromStore, check.Equals, storeID) + c.Assert(op.Step(1).(operator.RemovePeer).FromStore, check.Equals, storeID) + } +} + +// CheckTransferLeader checks if the operator is to transfer leader between the specified source and target stores. +func CheckTransferLeader(c *check.C, op *operator.Operator, kind operator.OpKind, sourceID, targetID uint64) { + c.Assert(op, check.NotNil) + c.Assert(op.Len(), check.Equals, 1) + c.Assert(op.Step(0), check.Equals, operator.TransferLeader{FromStore: sourceID, ToStore: targetID}) + kind |= operator.OpLeader + c.Assert(op.Kind()&kind, check.Equals, kind) +} + +// CheckTransferPeer checks if the operator is to transfer peer between the specified source and target stores. +func CheckTransferPeer(c *check.C, op *operator.Operator, kind operator.OpKind, sourceID, targetID uint64) { + c.Assert(op, check.NotNil) + if op.Len() == 2 { + c.Assert(op.Step(0).(operator.AddPeer).ToStore, check.Equals, targetID) + c.Assert(op.Step(1).(operator.RemovePeer).FromStore, check.Equals, sourceID) + } else { + c.Assert(op.Len(), check.Equals, 3) + c.Assert(op.Step(0).(operator.AddPeer).ToStore, check.Equals, targetID) + c.Assert(op.Step(1).(operator.TransferLeader).FromStore, check.Equals, sourceID) + c.Assert(op.Step(2).(operator.RemovePeer).FromStore, check.Equals, sourceID) + kind |= operator.OpLeader + } + kind |= operator.OpRegion + c.Assert(op.Kind()&kind, check.Equals, kind) +} + +// CheckTransferPeerWithLeaderTransfer checks if the operator is to transfer +// peer between the specified source and target stores and it meanwhile +// transfers the leader out of source store. +func CheckTransferPeerWithLeaderTransfer(c *check.C, op *operator.Operator, kind operator.OpKind, sourceID, targetID uint64) { + c.Assert(op, check.NotNil) + c.Assert(op.Len(), check.Equals, 3) + CheckTransferPeer(c, op, kind, sourceID, targetID) +} diff --git a/scheduler/pkg/testutil/testutil.go b/scheduler/pkg/testutil/testutil.go new file mode 100644 index 00000000..f16fdb62 --- /dev/null +++ b/scheduler/pkg/testutil/testutil.go @@ -0,0 +1,67 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "os" + "strings" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/server/config" + check "github.com/pingcap/check" + "google.golang.org/grpc" +) + +const ( + waitMaxRetry = 200 + waitRetrySleep = time.Millisecond * 100 +) + +// CheckFunc is a condition checker that passed to WaitUntil. Its implementation +// may call c.Fatal() to abort the test, or c.Log() to add more information. +type CheckFunc func(c *check.C) bool + +// WaitUntil repeatly evaluates f() for a period of time, util it returns true. +func WaitUntil(c *check.C, f CheckFunc) { + c.Log("wait start") + for i := 0; i < waitMaxRetry; i++ { + if f(c) { + return + } + time.Sleep(waitRetrySleep) + } + c.Fatal("wait timeout") +} + +// NewRequestHeader creates a new request header. +func NewRequestHeader(clusterID uint64) *schedulerpb.RequestHeader { + return &schedulerpb.RequestHeader{ + ClusterId: clusterID, + } +} + +// MustNewGrpcClient must create a new grpc client. +func MustNewGrpcClient(c *check.C, addr string) schedulerpb.SchedulerClient { + conn, err := grpc.Dial(strings.TrimPrefix(addr, "http://"), grpc.WithInsecure()) + + c.Assert(err, check.IsNil) + return schedulerpb.NewSchedulerClient(conn) +} + +// CleanServer is used to clean data directory. +func CleanServer(cfg *config.Config) { + // Clean data directory + os.RemoveAll(cfg.DataDir) +} diff --git a/scheduler/pkg/tsoutil/tso.go b/scheduler/pkg/tsoutil/tso.go new file mode 100644 index 00000000..df3eb0c7 --- /dev/null +++ b/scheduler/pkg/tsoutil/tso.go @@ -0,0 +1,29 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tsoutil + +import "time" + +const ( + PhysicalShiftBits = 18 + LogicalBits = (1 << PhysicalShiftBits) - 1 +) + +// ParseTS parses the ts to (physical,logical). +func ParseTS(ts uint64) (time.Time, uint64) { + logical := ts & LogicalBits + physical := ts >> PhysicalShiftBits + physicalTime := time.Unix(int64(physical/1000), int64(physical)%1000*time.Millisecond.Nanoseconds()) + return physicalTime, logical +} diff --git a/scheduler/pkg/typeutil/convension.go b/scheduler/pkg/typeutil/convension.go new file mode 100644 index 00000000..80bb7b09 --- /dev/null +++ b/scheduler/pkg/typeutil/convension.go @@ -0,0 +1,36 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package typeutil + +import ( + "encoding/binary" + + "github.com/pkg/errors" +) + +// BytesToUint64 converts a byte slice to uint64. +func BytesToUint64(b []byte) (uint64, error) { + if len(b) != 8 { + return 0, errors.Errorf("invalid data, must 8 bytes, but %d", len(b)) + } + + return binary.BigEndian.Uint64(b), nil +} + +// Uint64ToBytes converts uint64 to a byte slice. +func Uint64ToBytes(v uint64) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, v) + return b +} diff --git a/scheduler/pkg/typeutil/duration.go b/scheduler/pkg/typeutil/duration.go new file mode 100644 index 00000000..c29c8de8 --- /dev/null +++ b/scheduler/pkg/typeutil/duration.go @@ -0,0 +1,58 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package typeutil + +import ( + "fmt" + "strconv" + "time" + + "github.com/pkg/errors" +) + +// Duration is a wrapper of time.Duration for TOML and JSON. +type Duration struct { + time.Duration +} + +// NewDuration creates a Duration from time.Duration. +func NewDuration(duration time.Duration) Duration { + return Duration{Duration: duration} +} + +// MarshalJSON returns the duration as a JSON string. +func (d *Duration) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, d.String())), nil +} + +// UnmarshalJSON parses a JSON string into the duration. +func (d *Duration) UnmarshalJSON(text []byte) error { + s, err := strconv.Unquote(string(text)) + if err != nil { + return errors.WithStack(err) + } + duration, err := time.ParseDuration(s) + if err != nil { + return errors.WithStack(err) + } + d.Duration = duration + return nil +} + +// UnmarshalText parses a TOML string into the duration. +func (d *Duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return errors.WithStack(err) +} diff --git a/scheduler/pkg/typeutil/duration_test.go b/scheduler/pkg/typeutil/duration_test.go new file mode 100644 index 00000000..3772d3fe --- /dev/null +++ b/scheduler/pkg/typeutil/duration_test.go @@ -0,0 +1,49 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package typeutil + +import ( + "encoding/json" + + "github.com/BurntSushi/toml" + . "github.com/pingcap/check" +) + +var _ = Suite(&testDurationSuite{}) + +type testDurationSuite struct{} + +type example struct { + Interval Duration `json:"interval" toml:"interval"` +} + +func (s *testDurationSuite) TestJSON(c *C) { + example := &example{} + + text := []byte(`{"interval":"1h1m1s"}`) + c.Assert(json.Unmarshal(text, example), IsNil) + c.Assert(example.Interval.Seconds(), Equals, float64(60*60+60+1)) + + b, err := json.Marshal(example) + c.Assert(err, IsNil) + c.Assert(string(b), Equals, string(text)) +} + +func (s *testDurationSuite) TestTOML(c *C) { + example := &example{} + + text := []byte(`interval = "1h1m1s"`) + c.Assert(toml.Unmarshal(text, example), IsNil) + c.Assert(example.Interval.Seconds(), Equals, float64(60*60+60+1)) +} diff --git a/scheduler/pkg/typeutil/size.go b/scheduler/pkg/typeutil/size.go new file mode 100644 index 00000000..efbc52db --- /dev/null +++ b/scheduler/pkg/typeutil/size.go @@ -0,0 +1,53 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package typeutil + +import ( + "strconv" + + "github.com/docker/go-units" + "github.com/pkg/errors" +) + +// ByteSize is a retype uint64 for TOML and JSON. +type ByteSize uint64 + +// MarshalJSON returns the size as a JSON string. +func (b ByteSize) MarshalJSON() ([]byte, error) { + return []byte(`"` + units.BytesSize(float64(b)) + `"`), nil +} + +// UnmarshalJSON parses a JSON string into the bytesize. +func (b *ByteSize) UnmarshalJSON(text []byte) error { + s, err := strconv.Unquote(string(text)) + if err != nil { + return errors.WithStack(err) + } + v, err := units.RAMInBytes(s) + if err != nil { + return errors.WithStack(err) + } + *b = ByteSize(v) + return nil +} + +// UnmarshalText parses a Toml string into the bytesize. +func (b *ByteSize) UnmarshalText(text []byte) error { + v, err := units.RAMInBytes(string(text)) + if err != nil { + return errors.WithStack(err) + } + *b = ByteSize(v) + return nil +} diff --git a/scheduler/pkg/typeutil/size_test.go b/scheduler/pkg/typeutil/size_test.go new file mode 100644 index 00000000..54be8893 --- /dev/null +++ b/scheduler/pkg/typeutil/size_test.go @@ -0,0 +1,45 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package typeutil + +import ( + "encoding/json" + "testing" + + . "github.com/pingcap/check" +) + +func TestSize(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testSizeSuite{}) + +type testSizeSuite struct { +} + +func (s *testSizeSuite) TestJSON(c *C) { + b := ByteSize(265421587) + o, err := json.Marshal(b) + c.Assert(err, IsNil) + + var nb ByteSize + err = json.Unmarshal(o, &nb) + c.Assert(err, IsNil) + + b = ByteSize(1756821276000) + o, err = json.Marshal(b) + c.Assert(err, IsNil) + c.Assert(string(o), Equals, `"1.598TiB"`) +} diff --git a/scheduler/pkg/typeutil/string_slice.go b/scheduler/pkg/typeutil/string_slice.go new file mode 100644 index 00000000..e62df903 --- /dev/null +++ b/scheduler/pkg/typeutil/string_slice.go @@ -0,0 +1,43 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package typeutil + +import ( + "strconv" + "strings" + + "github.com/pkg/errors" +) + +//StringSlice is more friendly to json encode/decode +type StringSlice []string + +// MarshalJSON returns the size as a JSON string. +func (s StringSlice) MarshalJSON() ([]byte, error) { + return []byte(strconv.Quote(strings.Join(s, ","))), nil +} + +// UnmarshalJSON parses a JSON string into the bytesize. +func (s *StringSlice) UnmarshalJSON(text []byte) error { + data, err := strconv.Unquote(string(text)) + if err != nil { + return errors.WithStack(err) + } + if len(data) == 0 { + *s = nil + return nil + } + *s = strings.Split(data, ",") + return nil +} diff --git a/scheduler/pkg/typeutil/string_slice_test.go b/scheduler/pkg/typeutil/string_slice_test.go new file mode 100644 index 00000000..ce8e8ddb --- /dev/null +++ b/scheduler/pkg/typeutil/string_slice_test.go @@ -0,0 +1,48 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package typeutil + +import ( + "encoding/json" + + . "github.com/pingcap/check" +) + +var _ = Suite(&testStringSliceSuite{}) + +type testStringSliceSuite struct { +} + +func (s *testStringSliceSuite) TestJSON(c *C) { + b := StringSlice([]string{"zone", "rack"}) + o, err := json.Marshal(b) + c.Assert(err, IsNil) + c.Assert(string(o), Equals, "\"zone,rack\"") + + var nb StringSlice + err = json.Unmarshal(o, &nb) + c.Assert(err, IsNil) + c.Assert(nb, DeepEquals, b) +} + +func (s *testStringSliceSuite) TestEmpty(c *C) { + var ss StringSlice + b, err := json.Marshal(ss) + c.Assert(err, IsNil) + c.Assert(string(b), Equals, "\"\"") + + var ss2 StringSlice + c.Assert(ss2.UnmarshalJSON(b), IsNil) + c.Assert(ss2, DeepEquals, ss) +} diff --git a/scheduler/pkg/typeutil/time.go b/scheduler/pkg/typeutil/time.go new file mode 100644 index 00000000..e9e9631a --- /dev/null +++ b/scheduler/pkg/typeutil/time.go @@ -0,0 +1,34 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package typeutil + +import "time" + +// ZeroTime is a zero time. +var ZeroTime = time.Time{} + +// ParseTimestamp returns a timestamp for a given byte slice. +func ParseTimestamp(data []byte) (time.Time, error) { + nano, err := BytesToUint64(data) + if err != nil { + return ZeroTime, err + } + + return time.Unix(0, int64(nano)), nil +} + +// SubTimeByWallClock returns the duration between two different timestamps. +func SubTimeByWallClock(after time.Time, before time.Time) time.Duration { + return time.Duration(after.UnixNano() - before.UnixNano()) +} diff --git a/scheduler/pkg/typeutil/time_test.go b/scheduler/pkg/typeutil/time_test.go new file mode 100644 index 00000000..a8a8c7de --- /dev/null +++ b/scheduler/pkg/typeutil/time_test.go @@ -0,0 +1,49 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package typeutil + +import ( + "math/rand" + "time" + + . "github.com/pingcap/check" +) + +var _ = Suite(&testTimeSuite{}) + +type testTimeSuite struct{} + +func (s *testTimeSuite) TestParseTimestap(c *C) { + for i := 0; i < 3; i++ { + t := time.Now().Add(time.Second * time.Duration(rand.Int31n(1000))) + data := Uint64ToBytes(uint64(t.UnixNano())) + nt, err := ParseTimestamp(data) + c.Assert(err, IsNil) + c.Assert(nt.Equal(t), IsTrue) + } + data := []byte("pd") + nt, err := ParseTimestamp(data) + c.Assert(err, NotNil) + c.Assert(nt.Equal(ZeroTime), IsTrue) +} + +func (s *testTimeSuite) TestSubTimeByWallClock(c *C) { + for i := 0; i < 3; i++ { + r := rand.Int31n(1000) + t1 := time.Now() + t2 := t1.Add(time.Second * time.Duration(r)) + duration := SubTimeByWallClock(t2, t1) + c.Assert(duration, Equals, time.Second*time.Duration(r)) + } +} diff --git a/scheduler/scripts/build-api.sh b/scheduler/scripts/build-api.sh new file mode 100755 index 00000000..eca8a3ff --- /dev/null +++ b/scheduler/scripts/build-api.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Make sure docker is installed before executing it. + +docker pull mattjtodd/raml2html:latest +docker run --rm -v $PWD:/raml mattjtodd/raml2html -i /raml/server/api/api.raml -o /raml/docs/api.html + diff --git a/scheduler/scripts/retool b/scheduler/scripts/retool new file mode 100755 index 00000000..2e6406df --- /dev/null +++ b/scheduler/scripts/retool @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +# +# Add standard retool options +set -euo pipefail + + +cd $(dirname "$0")/.. +exec retool -base-dir=$PWD "$@" diff --git a/scheduler/scripts/retool-install.sh b/scheduler/scripts/retool-install.sh new file mode 100755 index 00000000..75287bab --- /dev/null +++ b/scheduler/scripts/retool-install.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +set -euo pipefail + +# This script generates tools.json +# It helps record what releases/branches are being used + +cd $(dirname "$0")/.. +which retool >/dev/null || go get github.com/twitchtv/retool + +# tool environment +# check runner +./scripts/retool add gopkg.in/alecthomas/gometalinter.v2 v2.0.5 +# check spelling +./scripts/retool add github.com/client9/misspell/cmd/misspell v0.3.4 +# checks correctness +./scripts/retool add github.com/gordonklaus/ineffassign 7bae11eba15a3285c75e388f77eb6357a2d73ee2 +./scripts/retool add honnef.co/go/tools/cmd/megacheck master +./scripts/retool add github.com/dnephin/govet 4a96d43e39d340b63daa8bc5576985aa599885f6 +# slow checks +./scripts/retool add github.com/kisielk/errcheck v1.1.0 +# linter +./scripts/retool add github.com/mgechev/revive 7773f47324c2bf1c8f7a5500aff2b6c01d3ed73b +./scripts/retool add github.com/securego/gosec/cmd/gosec 1.0.0 +# go fail +./scripts/retool add github.com/pingcap/failpoint/failpoint-ctl master +# deadlock detection +./scripts/retool add golang.org/x/tools/cmd/goimports 04b5d21e00f1f47bd824a6ade581e7189bacde87 diff --git a/scheduler/server/cluster.go b/scheduler/server/cluster.go new file mode 100644 index 00000000..df39f2c4 --- /dev/null +++ b/scheduler/server/cluster.go @@ -0,0 +1,857 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "fmt" + "path" + "sync" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/logutil" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/typeutil" + "github.com/pingcap-incubator/tinykv/scheduler/server/config" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/id" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule" + "github.com/pingcap/errcode" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +var ( + backgroundJobInterval = time.Minute + defaultChangedRegionsLimit = 10000 +) + +// RaftCluster is used for cluster config management. +// Raft cluster key format: +// cluster 1 -> /1/raft, value is metapb.Cluster +// cluster 2 -> /2/raft +// For cluster 1 +// store 1 -> /1/raft/s/1, value is metapb.Store +// region 1 -> /1/raft/r/1, value is metapb.Region +type RaftCluster struct { + sync.RWMutex + ctx context.Context + + s *Server + + running bool + + clusterID uint64 + clusterRoot string + + // cached cluster info + core *core.BasicCluster + meta *metapb.Cluster + opt *config.ScheduleOption + storage *core.Storage + id id.Allocator + + prepareChecker *prepareChecker + + coordinator *coordinator + + wg sync.WaitGroup + quit chan struct{} +} + +// ClusterStatus saves some state information +type ClusterStatus struct { + RaftBootstrapTime time.Time `json:"raft_bootstrap_time,omitempty"` + IsInitialized bool `json:"is_initialized"` +} + +func newRaftCluster(ctx context.Context, s *Server, clusterID uint64) *RaftCluster { + return &RaftCluster{ + ctx: ctx, + s: s, + running: false, + clusterID: clusterID, + clusterRoot: s.getClusterRootPath(), + } +} + +func (c *RaftCluster) loadClusterStatus() (*ClusterStatus, error) { + bootstrapTime, err := c.loadBootstrapTime() + if err != nil { + return nil, err + } + var isInitialized bool + if bootstrapTime != typeutil.ZeroTime { + isInitialized = c.isInitialized() + } + return &ClusterStatus{ + RaftBootstrapTime: bootstrapTime, + IsInitialized: isInitialized, + }, nil +} + +func (c *RaftCluster) isInitialized() bool { + if c.core.GetRegionCount() > 1 { + return true + } + region := c.core.SearchRegion(nil) + return region != nil && + len(region.GetVoters()) >= int(c.s.GetReplicationConfig().MaxReplicas) && + len(region.GetPendingPeers()) == 0 +} + +// loadBootstrapTime loads the saved bootstrap time from etcd. It returns zero +// value of time.Time when there is error or the cluster is not bootstrapped +// yet. +func (c *RaftCluster) loadBootstrapTime() (time.Time, error) { + var t time.Time + data, err := c.s.storage.Load(c.s.storage.ClusterStatePath("raft_bootstrap_time")) + if err != nil { + return t, err + } + if data == "" { + return t, nil + } + return typeutil.ParseTimestamp([]byte(data)) +} + +func (c *RaftCluster) initCluster(id id.Allocator, opt *config.ScheduleOption, storage *core.Storage) { + c.core = core.NewBasicCluster() + c.opt = opt + c.storage = storage + c.id = id + c.prepareChecker = newPrepareChecker() +} + +func (c *RaftCluster) start() error { + c.Lock() + defer c.Unlock() + + if c.running { + log.Warn("raft cluster has already been started") + return nil + } + + c.initCluster(c.s.idAllocator, c.s.scheduleOpt, c.s.storage) + cluster, err := c.loadClusterInfo() + if err != nil { + return err + } + if cluster == nil { + return nil + } + + c.coordinator = newCoordinator(c.ctx, cluster, c.s.hbStreams) + c.quit = make(chan struct{}) + + c.wg.Add(2) + go c.runCoordinator() + go c.runBackgroundJobs(backgroundJobInterval) + c.running = true + + return nil +} + +// Return nil if cluster is not bootstrapped. +func (c *RaftCluster) loadClusterInfo() (*RaftCluster, error) { + c.meta = &metapb.Cluster{} + ok, err := c.storage.LoadMeta(c.meta) + if err != nil { + return nil, err + } + if !ok { + return nil, nil + } + + start := time.Now() + if err := c.storage.LoadStores(c.core.PutStore); err != nil { + return nil, err + } + log.Info("load stores", + zap.Int("count", c.getStoreCount()), + zap.Duration("cost", time.Since(start)), + ) + return c, nil +} + +func (c *RaftCluster) runBackgroundJobs(interval time.Duration) { + defer logutil.LogPanic() + defer c.wg.Done() + + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-c.quit: + log.Info("background jobs has been stopped") + return + case <-ticker.C: + c.checkStores() + } + } +} + +func (c *RaftCluster) runCoordinator() { + defer logutil.LogPanic() + defer c.wg.Done() + defer func() { + c.coordinator.wg.Wait() + log.Info("coordinator has been stopped") + }() + c.coordinator.run() + <-c.coordinator.ctx.Done() + log.Info("coordinator is stopping") +} + +func (c *RaftCluster) stop() { + c.Lock() + + if !c.running { + c.Unlock() + return + } + + c.running = false + + close(c.quit) + c.coordinator.stop() + c.Unlock() + c.wg.Wait() +} + +func (c *RaftCluster) isRunning() bool { + c.RLock() + defer c.RUnlock() + return c.running +} + +// GetOperatorController returns the operator controller. +func (c *RaftCluster) GetOperatorController() *schedule.OperatorController { + c.RLock() + defer c.RUnlock() + return c.coordinator.opController +} + +// GetHeartbeatStreams returns the heartbeat streams. +func (c *RaftCluster) GetHeartbeatStreams() *heartbeatStreams { + c.RLock() + defer c.RUnlock() + return c.coordinator.hbStreams +} + +// GetCoordinator returns the coordinator. +func (c *RaftCluster) GetCoordinator() *coordinator { + c.RLock() + defer c.RUnlock() + return c.coordinator +} + +// handleStoreHeartbeat updates the store status. +func (c *RaftCluster) handleStoreHeartbeat(stats *schedulerpb.StoreStats) error { + c.Lock() + defer c.Unlock() + + storeID := stats.GetStoreId() + store := c.GetStore(storeID) + if store == nil { + return core.NewStoreNotFoundErr(storeID) + } + newStore := store.Clone(core.SetStoreStats(stats), core.SetLastHeartbeatTS(time.Now())) + c.core.PutStore(newStore) + return nil +} + +// processRegionHeartbeat updates the region information. +func (c *RaftCluster) processRegionHeartbeat(region *core.RegionInfo) error { + return nil +} + +func (c *RaftCluster) updateStoreStatusLocked(id uint64) { + leaderCount := c.core.GetStoreLeaderCount(id) + regionCount := c.core.GetStoreRegionCount(id) + pendingPeerCount := c.core.GetStorePendingPeerCount(id) + leaderRegionSize := c.core.GetStoreLeaderRegionSize(id) + regionSize := c.core.GetStoreRegionSize(id) + c.core.UpdateStoreStatus(id, leaderCount, regionCount, pendingPeerCount, leaderRegionSize, regionSize) +} + +func makeStoreKey(clusterRootPath string, storeID uint64) string { + return path.Join(clusterRootPath, "s", fmt.Sprintf("%020d", storeID)) +} + +func makeRaftClusterStatusPrefix(clusterRootPath string) string { + return path.Join(clusterRootPath, "status") +} + +func makeBootstrapTimeKey(clusterRootPath string) string { + return path.Join(makeRaftClusterStatusPrefix(clusterRootPath), "raft_bootstrap_time") +} + +func checkBootstrapRequest(clusterID uint64, req *schedulerpb.BootstrapRequest) error { + // TODO: do more check for request fields validation. + + storeMeta := req.GetStore() + if storeMeta == nil { + return errors.Errorf("missing store meta for bootstrap %d", clusterID) + } else if storeMeta.GetId() == 0 { + return errors.New("invalid zero store id") + } + + return nil +} + +func (c *RaftCluster) getClusterID() uint64 { + c.RLock() + defer c.RUnlock() + return c.meta.GetId() +} + +func (c *RaftCluster) putMetaLocked(meta *metapb.Cluster) error { + if c.storage != nil { + if err := c.storage.SaveMeta(meta); err != nil { + return err + } + } + c.meta = meta + return nil +} + +// GetRegionByKey gets region and leader peer by region key from cluster. +func (c *RaftCluster) GetRegionByKey(regionKey []byte) (*metapb.Region, *metapb.Peer) { + region := c.core.SearchRegion(regionKey) + if region == nil { + return nil, nil + } + return region.GetMeta(), region.GetLeader() +} + +// GetPrevRegionByKey gets previous region and leader peer by the region key from cluster. +func (c *RaftCluster) GetPrevRegionByKey(regionKey []byte) (*metapb.Region, *metapb.Peer) { + region := c.core.SearchPrevRegion(regionKey) + if region == nil { + return nil, nil + } + return region.GetMeta(), region.GetLeader() +} + +// GetRegionInfoByKey gets regionInfo by region key from cluster. +func (c *RaftCluster) GetRegionInfoByKey(regionKey []byte) *core.RegionInfo { + return c.core.SearchRegion(regionKey) +} + +// ScanRegions scans region with start key, until the region contains endKey, or +// total number greater than limit. +func (c *RaftCluster) ScanRegions(startKey, endKey []byte, limit int) []*core.RegionInfo { + return c.core.ScanRange(startKey, endKey, limit) +} + +// GetRegionByID gets region and leader peer by regionID from cluster. +func (c *RaftCluster) GetRegionByID(regionID uint64) (*metapb.Region, *metapb.Peer) { + region := c.GetRegion(regionID) + if region == nil { + return nil, nil + } + return region.GetMeta(), region.GetLeader() +} + +// GetRegion searches for a region by ID. +func (c *RaftCluster) GetRegion(regionID uint64) *core.RegionInfo { + return c.core.GetRegion(regionID) +} + +// GetMetaRegions gets regions from cluster. +func (c *RaftCluster) GetMetaRegions() []*metapb.Region { + return c.core.GetMetaRegions() +} + +// GetRegions returns all regions' information in detail. +func (c *RaftCluster) GetRegions() []*core.RegionInfo { + return c.core.GetRegions() +} + +// GetRegionCount returns total count of regions +func (c *RaftCluster) GetRegionCount() int { + return c.core.GetRegionCount() +} + +// GetStoreRegions returns all regions' information with a given storeID. +func (c *RaftCluster) GetStoreRegions(storeID uint64) []*core.RegionInfo { + return c.core.GetStoreRegions(storeID) +} + +// RandLeaderRegion returns a random region that has leader on the store. +func (c *RaftCluster) RandLeaderRegion(storeID uint64, opts ...core.RegionOption) *core.RegionInfo { + return c.core.RandLeaderRegion(storeID, opts...) +} + +// RandFollowerRegion returns a random region that has a follower on the store. +func (c *RaftCluster) RandFollowerRegion(storeID uint64, opts ...core.RegionOption) *core.RegionInfo { + return c.core.RandFollowerRegion(storeID, opts...) +} + +// RandPendingRegion returns a random region that has a pending peer on the store. +func (c *RaftCluster) RandPendingRegion(storeID uint64, opts ...core.RegionOption) *core.RegionInfo { + return c.core.RandPendingRegion(storeID, opts...) +} + +// GetPendingRegionsWithLock return pending regions subtree by storeID +func (c *RaftCluster) GetPendingRegionsWithLock(storeID uint64, callback func(core.RegionsContainer)) { + c.core.GetPendingRegionsWithLock(storeID, callback) +} + +// GetLeadersWithLock return leaders subtree by storeID +func (c *RaftCluster) GetLeadersWithLock(storeID uint64, callback func(core.RegionsContainer)) { + c.core.GetLeadersWithLock(storeID, callback) +} + +// GetFollowersWithLock return leaders subtree by storeID +func (c *RaftCluster) GetFollowersWithLock(storeID uint64, callback func(core.RegionsContainer)) { + c.core.GetFollowersWithLock(storeID, callback) +} + +// GetLeaderStore returns all stores that contains the region's leader peer. +func (c *RaftCluster) GetLeaderStore(region *core.RegionInfo) *core.StoreInfo { + return c.core.GetLeaderStore(region) +} + +// GetFollowerStores returns all stores that contains the region's follower peer. +func (c *RaftCluster) GetFollowerStores(region *core.RegionInfo) []*core.StoreInfo { + return c.core.GetFollowerStores(region) +} + +// GetRegionStores returns all stores that contains the region's peer. +func (c *RaftCluster) GetRegionStores(region *core.RegionInfo) []*core.StoreInfo { + return c.core.GetRegionStores(region) +} + +func (c *RaftCluster) getStoreCount() int { + return c.core.GetStoreCount() +} + +// GetStoreRegionCount returns the number of regions for a given store. +func (c *RaftCluster) GetStoreRegionCount(storeID uint64) int { + return c.core.GetStoreRegionCount(storeID) +} + +// GetAverageRegionSize returns the average region approximate size. +func (c *RaftCluster) GetAverageRegionSize() int64 { + return c.core.GetAverageRegionSize() +} + +// DropCacheRegion removes a region from the cache. +func (c *RaftCluster) DropCacheRegion(id uint64) { + c.RLock() + defer c.RUnlock() + if region := c.GetRegion(id); region != nil { + c.core.RemoveRegion(region) + } +} + +// GetMetaStores gets stores from cluster. +func (c *RaftCluster) GetMetaStores() []*metapb.Store { + return c.core.GetMetaStores() +} + +// GetStores returns all stores in the cluster. +func (c *RaftCluster) GetStores() []*core.StoreInfo { + return c.core.GetStores() +} + +// GetStore gets store from cluster. +func (c *RaftCluster) GetStore(storeID uint64) *core.StoreInfo { + return c.core.GetStore(storeID) +} + +func (c *RaftCluster) putStore(store *metapb.Store) error { + c.Lock() + defer c.Unlock() + + if store.GetId() == 0 { + return errors.Errorf("invalid put store %v", store) + } + + // Store address can not be the same as other stores. + for _, s := range c.GetStores() { + // It's OK to start a new store on the same address if the old store has been removed. + if s.IsTombstone() { + continue + } + if s.GetID() != store.GetId() && s.GetAddress() == store.GetAddress() { + return errors.Errorf("duplicated store address: %v, already registered by %v", store, s.GetMeta()) + } + } + + s := c.GetStore(store.GetId()) + if s == nil { + // Add a new store. + s = core.NewStoreInfo(store) + } else { + // Update an existed store. + s = s.Clone( + core.SetStoreAddress(store.Address), + ) + } + return c.putStoreLocked(s) +} + +// RemoveStore marks a store as offline in cluster. +// State transition: Up -> Offline. +func (c *RaftCluster) RemoveStore(storeID uint64) error { + op := errcode.Op("store.remove") + c.Lock() + defer c.Unlock() + + store := c.GetStore(storeID) + if store == nil { + return op.AddTo(core.NewStoreNotFoundErr(storeID)) + } + + // Remove an offline store should be OK, nothing to do. + if store.IsOffline() { + return nil + } + + if store.IsTombstone() { + return op.AddTo(core.StoreTombstonedErr{StoreID: storeID}) + } + + newStore := store.Clone(core.SetStoreState(metapb.StoreState_Offline)) + log.Warn("store has been offline", + zap.Uint64("store-id", newStore.GetID()), + zap.String("store-address", newStore.GetAddress())) + return c.putStoreLocked(newStore) +} + +// BuryStore marks a store as tombstone in cluster. +// State transition: +// Case 1: Up -> Tombstone (if force is true); +// Case 2: Offline -> Tombstone. +func (c *RaftCluster) BuryStore(storeID uint64, force bool) error { // revive:disable-line:flag-parameter + c.Lock() + defer c.Unlock() + + store := c.GetStore(storeID) + if store == nil { + return core.NewStoreNotFoundErr(storeID) + } + + // Bury a tombstone store should be OK, nothing to do. + if store.IsTombstone() { + return nil + } + + if store.IsUp() { + if !force { + return errors.New("store is still up, please remove store gracefully") + } + log.Warn("forcedly bury store", zap.Stringer("store", store.GetMeta())) + } + + newStore := store.Clone(core.SetStoreState(metapb.StoreState_Tombstone)) + log.Warn("store has been Tombstone", + zap.Uint64("store-id", newStore.GetID()), + zap.String("store-address", newStore.GetAddress())) + return c.putStoreLocked(newStore) +} + +// BlockStore stops balancer from selecting the store. +func (c *RaftCluster) BlockStore(storeID uint64) error { + return c.core.BlockStore(storeID) +} + +// UnblockStore allows balancer to select the store. +func (c *RaftCluster) UnblockStore(storeID uint64) { + c.core.UnblockStore(storeID) +} + +// AttachAvailableFunc attaches an available function to a specific store. +func (c *RaftCluster) AttachAvailableFunc(storeID uint64, f func() bool) { + c.core.AttachAvailableFunc(storeID, f) +} + +// SetStoreState sets up a store's state. +func (c *RaftCluster) SetStoreState(storeID uint64, state metapb.StoreState) error { + c.Lock() + defer c.Unlock() + + store := c.GetStore(storeID) + if store == nil { + return core.NewStoreNotFoundErr(storeID) + } + + newStore := store.Clone(core.SetStoreState(state)) + log.Warn("store update state", + zap.Uint64("store-id", storeID), + zap.Stringer("new-state", state)) + return c.putStoreLocked(newStore) +} + +// SetStoreWeight sets up a store's leader/region balance weight. +func (c *RaftCluster) SetStoreWeight(storeID uint64, leaderWeight, regionWeight float64) error { + c.Lock() + defer c.Unlock() + + store := c.GetStore(storeID) + if store == nil { + return core.NewStoreNotFoundErr(storeID) + } + + if err := c.s.storage.SaveStoreWeight(storeID, leaderWeight, regionWeight); err != nil { + return err + } + + newStore := store.Clone( + core.SetLeaderWeight(leaderWeight), + core.SetRegionWeight(regionWeight), + ) + + return c.putStoreLocked(newStore) +} + +func (c *RaftCluster) putStoreLocked(store *core.StoreInfo) error { + if c.storage != nil { + if err := c.storage.SaveStore(store.GetMeta()); err != nil { + return err + } + } + c.core.PutStore(store) + return nil +} + +func (c *RaftCluster) checkStores() { + var offlineStores []*metapb.Store + var upStoreCount int + stores := c.GetStores() + for _, store := range stores { + // the store has already been tombstone + if store.IsTombstone() { + continue + } + + if store.IsUp() { + upStoreCount++ + continue + } + + offlineStore := store.GetMeta() + // If the store is empty, it can be buried. + regionCount := c.core.GetStoreRegionCount(offlineStore.GetId()) + if regionCount == 0 { + if err := c.BuryStore(offlineStore.GetId(), false); err != nil { + log.Error("bury store failed", + zap.Stringer("store", offlineStore), + zap.Error(err)) + } + } else { + offlineStores = append(offlineStores, offlineStore) + } + } + + if len(offlineStores) == 0 { + return + } + + if upStoreCount < c.GetMaxReplicas() { + for _, offlineStore := range offlineStores { + log.Warn("store may not turn into Tombstone, there are no extra up store has enough space to accommodate the extra replica", zap.Stringer("store", offlineStore)) + } + } +} + +// RemoveTombStoneRecords removes the tombStone Records. +func (c *RaftCluster) RemoveTombStoneRecords() error { + c.Lock() + defer c.Unlock() + + for _, store := range c.GetStores() { + if store.IsTombstone() { + // the store has already been tombstone + err := c.deleteStoreLocked(store) + if err != nil { + log.Error("delete store failed", + zap.Stringer("store", store.GetMeta()), + zap.Error(err)) + return err + } + log.Info("delete store successed", + zap.Stringer("store", store.GetMeta())) + } + } + return nil +} + +func (c *RaftCluster) deleteStoreLocked(store *core.StoreInfo) error { + if c.storage != nil { + if err := c.storage.DeleteStore(store.GetMeta()); err != nil { + return err + } + } + c.core.DeleteStore(store) + return nil +} + +func (c *RaftCluster) collectHealthStatus() { + client := c.s.GetClient() + members, err := GetMembers(client) + if err != nil { + log.Error("get members error", zap.Error(err)) + } + unhealth := c.s.CheckHealth(members) + for _, member := range members { + if _, ok := unhealth[member.GetMemberId()]; ok { + continue + } + } +} + +func (c *RaftCluster) takeRegionStoresLocked(region *core.RegionInfo) []*core.StoreInfo { + stores := make([]*core.StoreInfo, 0, len(region.GetPeers())) + for _, p := range region.GetPeers() { + if store := c.core.TakeStore(p.StoreId); store != nil { + stores = append(stores, store) + } + } + return stores +} + +func (c *RaftCluster) allocID() (uint64, error) { + return c.id.Alloc() +} + +// AllocPeer allocs a new peer on a store. +func (c *RaftCluster) AllocPeer(storeID uint64) (*metapb.Peer, error) { + peerID, err := c.allocID() + if err != nil { + log.Error("failed to alloc peer", zap.Error(err)) + return nil, err + } + peer := &metapb.Peer{ + Id: peerID, + StoreId: storeID, + } + return peer, nil +} + +// GetConfig gets config from cluster. +func (c *RaftCluster) GetConfig() *metapb.Cluster { + c.RLock() + defer c.RUnlock() + return proto.Clone(c.meta).(*metapb.Cluster) +} + +func (c *RaftCluster) putConfig(meta *metapb.Cluster) error { + c.Lock() + defer c.Unlock() + if meta.GetId() != c.clusterID { + return errors.Errorf("invalid cluster %v, mismatch cluster id %d", meta, c.clusterID) + } + return c.putMetaLocked(proto.Clone(meta).(*metapb.Cluster)) +} + +// GetOpt returns the scheduling options. +func (c *RaftCluster) GetOpt() *config.ScheduleOption { + return c.opt +} + +// GetLeaderScheduleLimit returns the limit for leader schedule. +func (c *RaftCluster) GetLeaderScheduleLimit() uint64 { + return c.opt.GetLeaderScheduleLimit() +} + +// GetRegionScheduleLimit returns the limit for region schedule. +func (c *RaftCluster) GetRegionScheduleLimit() uint64 { + return c.opt.GetRegionScheduleLimit() +} + +// GetReplicaScheduleLimit returns the limit for replica schedule. +func (c *RaftCluster) GetReplicaScheduleLimit() uint64 { + return c.opt.GetReplicaScheduleLimit() +} + +// GetPatrolRegionInterval returns the interval of patroling region. +func (c *RaftCluster) GetPatrolRegionInterval() time.Duration { + return c.opt.GetPatrolRegionInterval() +} + +// GetMaxStoreDownTime returns the max down time of a store. +func (c *RaftCluster) GetMaxStoreDownTime() time.Duration { + return c.opt.GetMaxStoreDownTime() +} + +// GetMaxReplicas returns the number of replicas. +func (c *RaftCluster) GetMaxReplicas() int { + return c.opt.GetMaxReplicas() +} + +// isPrepared if the cluster information is collected +func (c *RaftCluster) isPrepared() bool { + c.RLock() + defer c.RUnlock() + return c.prepareChecker.check(c) +} + +func (c *RaftCluster) putRegion(region *core.RegionInfo) error { + c.Lock() + defer c.Unlock() + c.core.PutRegion(region) + return nil +} + +type prepareChecker struct { + reactiveRegions map[uint64]int + start time.Time + sum int + isPrepared bool +} + +func newPrepareChecker() *prepareChecker { + return &prepareChecker{ + start: time.Now(), + reactiveRegions: make(map[uint64]int), + } +} + +// Before starting up the scheduler, we need to take the proportion of the regions on each store into consideration. +func (checker *prepareChecker) check(c *RaftCluster) bool { + if checker.isPrepared || time.Since(checker.start) > collectTimeout { + return true + } + // The number of active regions should be more than total region of all stores * collectFactor + if float64(c.core.Length())*collectFactor > float64(checker.sum) { + return false + } + for _, store := range c.GetStores() { + if !store.IsUp() { + continue + } + storeID := store.GetID() + // For each store, the number of active regions should be more than total region of the store * collectFactor + if float64(c.core.GetStoreRegionCount(storeID))*collectFactor > float64(checker.reactiveRegions[storeID]) { + return false + } + } + checker.isPrepared = true + return true +} + +func (checker *prepareChecker) collect(region *core.RegionInfo) { + for _, p := range region.GetPeers() { + checker.reactiveRegions[p.GetStoreId()]++ + } + checker.sum++ +} diff --git a/scheduler/server/cluster_test.go b/scheduler/server/cluster_test.go new file mode 100644 index 00000000..e65b23f8 --- /dev/null +++ b/scheduler/server/cluster_test.go @@ -0,0 +1,1221 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "fmt" + "math/rand" + "sync" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockid" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/testutil" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/kv" + . "github.com/pingcap/check" +) + +const ( + initEpochVersion uint64 = 1 + initEpochConfVer uint64 = 1 +) + +var _ = Suite(&testClusterInfoSuite{}) + +type testClusterInfoSuite struct{} + +func (s *testClusterInfoSuite) setUpTestCluster(c *C) (*RaftCluster, []*core.RegionInfo) { + _, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + cluster := createTestRaftCluster(mockid.NewIDAllocator(), opt, core.NewStorage(kv.NewMemoryKV())) + + n, np := uint64(3), uint64(3) + + stores := newTestStores(3) + regions := newTestRegions(n, np) + + for _, store := range stores { + c.Assert(cluster.putStoreLocked(store), IsNil) + } + + for i, region := range regions { + c.Assert(cluster.processRegionHeartbeat(region), IsNil) + checkRegions(c, cluster.core.Regions, regions[:i+1]) + } + + return cluster, regions +} + +func (s *testClusterInfoSuite) TestRegionNotUpdate3C(c *C) { + cluster, regions := s.setUpTestCluster(c) + + for _, region := range regions { + c.Assert(cluster.processRegionHeartbeat(region), IsNil) + checkRegions(c, cluster.core.Regions, regions) + } +} + +func (s *testClusterInfoSuite) TestRegionUpdateVersion3C(c *C) { + cluster, regions := s.setUpTestCluster(c) + + for i, region := range regions { + region = region.Clone(core.WithIncVersion()) + regions[i] = region + + c.Assert(cluster.processRegionHeartbeat(region), IsNil) + checkRegions(c, cluster.core.Regions, regions) + } +} + +func (s *testClusterInfoSuite) TestRegionWithStaleVersion3C(c *C) { + cluster, regions := s.setUpTestCluster(c) + + for i, region := range regions { + origin := region + region = origin.Clone(core.WithIncVersion()) + regions[i] = region + c.Assert(cluster.processRegionHeartbeat(region), IsNil) + checkRegions(c, cluster.core.Regions, regions) + + stale := origin.Clone(core.WithIncConfVer()) + c.Assert(cluster.processRegionHeartbeat(stale), NotNil) + checkRegions(c, cluster.core.Regions, regions) + } +} + +func (s *testClusterInfoSuite) TestRegionUpdateVersionAndConfver3C(c *C) { + cluster, regions := s.setUpTestCluster(c) + + for i, region := range regions { + region = region.Clone( + core.WithIncVersion(), + core.WithIncConfVer(), + ) + regions[i] = region + c.Assert(cluster.processRegionHeartbeat(region), IsNil) + checkRegions(c, cluster.core.Regions, regions) + } +} + +func (s *testClusterInfoSuite) TestRegionWithStaleConfVer3C(c *C) { + cluster, regions := s.setUpTestCluster(c) + + for i, region := range regions { + origin := region + region = origin.Clone(core.WithIncConfVer()) + regions[i] = region + c.Assert(cluster.processRegionHeartbeat(region), IsNil) + checkRegions(c, cluster.core.Regions, regions) + + stale := origin.Clone() + c.Assert(cluster.processRegionHeartbeat(stale), NotNil) + checkRegions(c, cluster.core.Regions, regions) + } +} + +func (s *testClusterInfoSuite) TestRegionAddPendingPeer3C(c *C) { + cluster, regions := s.setUpTestCluster(c) + + pendingCounts := make([]int, 3) + for i, region := range regions { + pendingPeer := region.GetPeers()[rand.Intn(len(region.GetPeers()))] + pendingCounts[pendingPeer.StoreId]++ + + region := region.Clone(core.WithPendingPeers([]*metapb.Peer{pendingPeer})) + regions[i] = region + c.Assert(cluster.processRegionHeartbeat(region), IsNil) + checkRegions(c, cluster.core.Regions, regions) + } + checkPendingPeerCount([]int{}, cluster, c) +} + +func (s *testClusterInfoSuite) TestRegionRemovePendingPeer3C(c *C) { + cluster, regions := s.setUpTestCluster(c) + + for i, region := range regions { + region = region.Clone(core.WithPendingPeers([]*metapb.Peer{region.GetPeers()[rand.Intn(len(region.GetPeers()))]})) + regions[i] = region + c.Assert(cluster.processRegionHeartbeat(region), IsNil) + checkRegions(c, cluster.core.Regions, regions) + + region = region.Clone(core.WithPendingPeers(nil)) + regions[i] = region + c.Assert(cluster.processRegionHeartbeat(region), IsNil) + checkRegions(c, cluster.core.Regions, regions) + } + checkPendingPeerCount([]int{0, 0, 0}, cluster, c) +} + +func (s *testClusterInfoSuite) TestRegionRemovePeers3C(c *C) { + cluster, regions := s.setUpTestCluster(c) + + for i, region := range regions { + region = region.Clone(core.SetPeers(region.GetPeers()[:1])) + regions[i] = region + c.Assert(cluster.processRegionHeartbeat(region), IsNil) + checkRegions(c, cluster.core.Regions, regions) + } +} + +func (s *testClusterInfoSuite) TestRegionAddBackPeers3C(c *C) { + cluster, regions := s.setUpTestCluster(c) + + for i, region := range regions { + origin := region + region = origin.Clone(core.SetPeers(region.GetPeers()[:1])) + regions[i] = region + c.Assert(cluster.processRegionHeartbeat(region), IsNil) + checkRegions(c, cluster.core.Regions, regions) + + region = origin + regions[i] = region + c.Assert(cluster.processRegionHeartbeat(region), IsNil) + checkRegions(c, cluster.core.Regions, regions) + } +} + +func (s *testClusterInfoSuite) TestRegionChangeLeader3C(c *C) { + cluster, regions := s.setUpTestCluster(c) + + for i, region := range regions { + region = region.Clone(core.WithLeader(region.GetPeers()[1])) + regions[i] = region + c.Assert(cluster.processRegionHeartbeat(region), IsNil) + checkRegions(c, cluster.core.Regions, regions) + } +} + +func (s *testClusterInfoSuite) TestRegionChangeApproximateSize3C(c *C) { + cluster, regions := s.setUpTestCluster(c) + + for i, region := range regions { + region = region.Clone(core.SetApproximateSize(144)) + regions[i] = region + c.Assert(cluster.processRegionHeartbeat(region), IsNil) + checkRegions(c, cluster.core.Regions, regions) + } +} + +func (s *testClusterInfoSuite) TestRegionCounts3C(c *C) { + cluster, regions := s.setUpTestCluster(c) + + regionCounts := make(map[uint64]int) + for _, region := range regions { + for _, peer := range region.GetPeers() { + regionCounts[peer.GetStoreId()]++ + } + } + for id, count := range regionCounts { + c.Assert(cluster.GetStoreRegionCount(id), Equals, count) + } +} + +func (s *testClusterInfoSuite) TestRegionGetRegions3C(c *C) { + cluster, regions := s.setUpTestCluster(c) + + for _, region := range cluster.GetRegions() { + checkRegion(c, region, regions[region.GetID()]) + } + + for _, region := range cluster.GetMetaRegions() { + c.Assert(region, DeepEquals, regions[region.GetId()].GetMeta()) + } +} + +func (s *testClusterInfoSuite) TestRegionGetStores3C(c *C) { + cluster, regions := s.setUpTestCluster(c) + + for _, region := range regions { + for _, store := range cluster.GetRegionStores(region) { + c.Assert(region.GetStorePeer(store.GetID()), NotNil) + } + for _, store := range cluster.GetFollowerStores(region) { + peer := region.GetStorePeer(store.GetID()) + c.Assert(peer.GetId(), Not(Equals), region.GetLeader().GetId()) + } + } +} + +func (s *testClusterInfoSuite) TestRegionGetStoresInfo3C(c *C) { + cluster, _ := s.setUpTestCluster(c) + + for _, store := range cluster.core.Stores.GetStores() { + c.Assert(store.GetLeaderCount(), Equals, cluster.core.Regions.GetStoreLeaderCount(store.GetID())) + c.Assert(store.GetRegionCount(), Equals, cluster.core.Regions.GetStoreRegionCount(store.GetID())) + c.Assert(store.GetLeaderSize(), Equals, cluster.core.Regions.GetStoreLeaderRegionSize(store.GetID())) + c.Assert(store.GetRegionSize(), Equals, cluster.core.Regions.GetStoreRegionSize(store.GetID())) + } +} + +func heartbeatRegions(c *C, cluster *RaftCluster, regions []*core.RegionInfo) { + // Heartbeat and check region one by one. + for _, r := range regions { + c.Assert(cluster.processRegionHeartbeat(r), IsNil) + + checkRegion(c, cluster.GetRegion(r.GetID()), r) + checkRegion(c, cluster.GetRegionInfoByKey(r.GetStartKey()), r) + + if len(r.GetEndKey()) > 0 { + end := r.GetEndKey()[0] + checkRegion(c, cluster.GetRegionInfoByKey([]byte{end - 1}), r) + } + } + + // Check all regions after handling all heartbeats. + for _, r := range regions { + checkRegion(c, cluster.GetRegion(r.GetID()), r) + checkRegion(c, cluster.GetRegionInfoByKey(r.GetStartKey()), r) + + if len(r.GetEndKey()) > 0 { + end := r.GetEndKey()[0] + checkRegion(c, cluster.GetRegionInfoByKey([]byte{end - 1}), r) + result := cluster.GetRegionInfoByKey([]byte{end + 1}) + c.Assert(result.GetID(), Not(Equals), r.GetID()) + } + } +} + +func (s *testClusterInfoSuite) TestHeartbeatSplit3C(c *C) { + _, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + cluster := createTestRaftCluster(mockid.NewIDAllocator(), opt, core.NewStorage(kv.NewMemoryKV())) + + // 1: [nil, nil) + region1 := core.NewRegionInfo(&metapb.Region{Id: 1, RegionEpoch: &metapb.RegionEpoch{Version: 1, ConfVer: 1}}, nil) + c.Assert(cluster.processRegionHeartbeat(region1), IsNil) + checkRegion(c, cluster.GetRegionInfoByKey([]byte("foo")), region1) + + // split 1 to 2: [nil, m) 1: [m, nil), sync 2 first. + region1 = region1.Clone( + core.WithStartKey([]byte("m")), + core.WithIncVersion(), + ) + region2 := core.NewRegionInfo(&metapb.Region{Id: 2, EndKey: []byte("m"), RegionEpoch: &metapb.RegionEpoch{Version: 1, ConfVer: 1}}, nil) + c.Assert(cluster.processRegionHeartbeat(region2), IsNil) + checkRegion(c, cluster.GetRegionInfoByKey([]byte("a")), region2) + // [m, nil) is missing before r1's heartbeat. + c.Assert(cluster.GetRegionInfoByKey([]byte("z")), IsNil) + + c.Assert(cluster.processRegionHeartbeat(region1), IsNil) + checkRegion(c, cluster.GetRegionInfoByKey([]byte("z")), region1) + + // split 1 to 3: [m, q) 1: [q, nil), sync 1 first. + region1 = region1.Clone( + core.WithStartKey([]byte("q")), + core.WithIncVersion(), + ) + region3 := core.NewRegionInfo(&metapb.Region{Id: 3, StartKey: []byte("m"), EndKey: []byte("q"), RegionEpoch: &metapb.RegionEpoch{Version: 1, ConfVer: 1}}, nil) + c.Assert(cluster.processRegionHeartbeat(region1), IsNil) + checkRegion(c, cluster.GetRegionInfoByKey([]byte("z")), region1) + checkRegion(c, cluster.GetRegionInfoByKey([]byte("a")), region2) + // [m, q) is missing before r3's heartbeat. + c.Assert(cluster.GetRegionInfoByKey([]byte("n")), IsNil) + c.Assert(cluster.processRegionHeartbeat(region3), IsNil) + checkRegion(c, cluster.GetRegionInfoByKey([]byte("n")), region3) +} + +func (s *testClusterInfoSuite) TestRegionSplitAndMerge3C(c *C) { + _, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + cluster := createTestRaftCluster(mockid.NewIDAllocator(), opt, core.NewStorage(kv.NewMemoryKV())) + + regions := []*core.RegionInfo{core.NewTestRegionInfo([]byte{}, []byte{})} + + // Byte will underflow/overflow if n > 7. + n := 7 + + // Split. + for i := 0; i < n; i++ { + regions = core.SplitRegions(regions) + heartbeatRegions(c, cluster, regions) + } + + // Merge. + for i := 0; i < n; i++ { + regions = core.MergeRegions(regions) + heartbeatRegions(c, cluster, regions) + } + + // Split twice and merge once. + for i := 0; i < n*2; i++ { + if (i+1)%3 == 0 { + regions = core.MergeRegions(regions) + } else { + regions = core.SplitRegions(regions) + } + heartbeatRegions(c, cluster, regions) + } +} + +func checkPendingPeerCount(expect []int, cluster *RaftCluster, c *C) { + for i, e := range expect { + s := cluster.core.Stores.GetStore(uint64(i + 1)) + c.Assert(s.GetPendingPeerCount(), Equals, e) + } +} + +type testClusterSuite struct { + baseCluster +} + +var _ = Suite(&testClusterSuite{}) + +func (s *testClusterSuite) TestConcurrentHandleRegion3C(c *C) { + var err error + var cleanup CleanupFunc + s.svr, cleanup, err = NewTestServer(c) + c.Assert(err, IsNil) + mustWaitLeader(c, []*Server{s.svr}) + s.grpcSchedulerClient = testutil.MustNewGrpcClient(c, s.svr.GetAddr()) + defer cleanup() + storeAddrs := []string{"127.0.1.1:0", "127.0.1.1:1", "127.0.1.1:2"} + _, err = s.svr.bootstrapCluster(s.newBootstrapRequest(c, s.svr.clusterID, "127.0.0.1:0")) + c.Assert(err, IsNil) + s.svr.cluster.Lock() + s.svr.cluster.storage = core.NewStorage(kv.NewMemoryKV()) + s.svr.cluster.Unlock() + var stores []*metapb.Store + for _, addr := range storeAddrs { + store := s.newStore(c, 0, addr) + stores = append(stores, store) + _, err := putStore(c, s.grpcSchedulerClient, s.svr.clusterID, store) + c.Assert(err, IsNil) + } + + var wg sync.WaitGroup + // register store and bind stream + for i, store := range stores { + req := &schedulerpb.StoreHeartbeatRequest{ + Header: testutil.NewRequestHeader(s.svr.clusterID), + Stats: &schedulerpb.StoreStats{ + StoreId: store.GetId(), + Capacity: 1000 * (1 << 20), + Available: 1000 * (1 << 20), + }, + } + _, err := s.svr.StoreHeartbeat(context.TODO(), req) + c.Assert(err, IsNil) + stream, err := s.grpcSchedulerClient.RegionHeartbeat(context.Background()) + c.Assert(err, IsNil) + peer := &metapb.Peer{Id: s.allocID(c), StoreId: store.GetId()} + regionReq := &schedulerpb.RegionHeartbeatRequest{ + Header: testutil.NewRequestHeader(s.svr.clusterID), + Region: &metapb.Region{ + Id: s.allocID(c), + Peers: []*metapb.Peer{peer}, + }, + Leader: peer, + } + err = stream.Send(regionReq) + c.Assert(err, IsNil) + // make sure the first store can receive one response + if i == 0 { + wg.Add(1) + } + go func(isReciver bool) { + if isReciver { + _, err := stream.Recv() + c.Assert(err, IsNil) + wg.Done() + } + for { + stream.Recv() + } + }(i == 0) + } + concurrent := 2000 + for i := 0; i < concurrent; i++ { + region := &metapb.Region{ + Id: s.allocID(c), + StartKey: []byte(fmt.Sprintf("%5d", i)), + EndKey: []byte(fmt.Sprintf("%5d", i+1)), + Peers: []*metapb.Peer{{Id: s.allocID(c), StoreId: stores[0].GetId()}}, + RegionEpoch: &metapb.RegionEpoch{ + ConfVer: initEpochConfVer, + Version: initEpochVersion, + }, + } + if i == 0 { + region.StartKey = []byte("") + } else if i == concurrent-1 { + region.EndKey = []byte("") + } + + wg.Add(1) + go func() { + defer wg.Done() + err := s.svr.cluster.HandleRegionHeartbeat(core.NewRegionInfo(region, region.Peers[0])) + c.Assert(err, IsNil) + }() + } + wg.Wait() +} + +func (s *testClusterInfoSuite) TestLoadClusterInfo(c *C) { + server, cleanup := mustRunTestServer(c) + defer cleanup() + + storage := server.storage + _, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + + raftCluster := createTestRaftCluster(mockid.NewIDAllocator(), opt, core.NewStorage(kv.NewMemoryKV())) + // Cluster is not bootstrapped. + cluster, err := raftCluster.loadClusterInfo() + c.Assert(err, IsNil) + c.Assert(cluster, IsNil) + + // Save meta, stores and regions. + n := 10 + meta := &metapb.Cluster{Id: 123} + c.Assert(storage.SaveMeta(meta), IsNil) + stores := mustSaveStores(c, storage, n) + + raftCluster = createTestRaftCluster(server.idAllocator, opt, storage) + cluster, err = raftCluster.loadClusterInfo() + c.Assert(err, IsNil) + c.Assert(cluster, NotNil) + + // Check meta, stores, and regions. + c.Assert(cluster.GetConfig(), DeepEquals, meta) + c.Assert(cluster.getStoreCount(), Equals, n) + for _, store := range cluster.GetMetaStores() { + c.Assert(store, DeepEquals, stores[store.GetId()]) + } +} + +func (s *testClusterInfoSuite) TestStoreHeartbeat(c *C) { + _, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + cluster := createTestRaftCluster(mockid.NewIDAllocator(), opt, core.NewStorage(kv.NewMemoryKV())) + + n, np := uint64(3), uint64(3) + stores := newTestStores(n) + regions := newTestRegions(n, np) + + for _, region := range regions { + c.Assert(cluster.putRegion(region), IsNil) + } + c.Assert(cluster.core.Regions.GetRegionCount(), Equals, int(n)) + + for i, store := range stores { + storeStats := &schedulerpb.StoreStats{ + StoreId: store.GetID(), + Capacity: 100, + Available: 50, + RegionCount: 1, + } + c.Assert(cluster.handleStoreHeartbeat(storeStats), NotNil) + + c.Assert(cluster.putStoreLocked(store), IsNil) + c.Assert(cluster.getStoreCount(), Equals, i+1) + + c.Assert(store.GetLastHeartbeatTS().IsZero(), IsTrue) + + c.Assert(cluster.handleStoreHeartbeat(storeStats), IsNil) + + s := cluster.GetStore(store.GetID()) + c.Assert(s.GetLastHeartbeatTS().IsZero(), IsFalse) + c.Assert(s.GetStoreStats(), DeepEquals, storeStats) + } + + c.Assert(cluster.getStoreCount(), Equals, int(n)) + + for _, store := range stores { + tmp := &metapb.Store{} + ok, err := cluster.storage.LoadStore(store.GetID(), tmp) + c.Assert(ok, IsTrue) + c.Assert(err, IsNil) + c.Assert(tmp, DeepEquals, store.GetMeta()) + } +} + +type baseCluster struct { + svr *Server + grpcSchedulerClient schedulerpb.SchedulerClient +} + +func (s *baseCluster) allocID(c *C) uint64 { + id, err := s.svr.idAllocator.Alloc() + c.Assert(err, IsNil) + return id +} + +func (s *baseCluster) newPeer(c *C, storeID uint64, peerID uint64) *metapb.Peer { + c.Assert(storeID, Greater, uint64(0)) + + if peerID == 0 { + peerID = s.allocID(c) + } + + return &metapb.Peer{ + StoreId: storeID, + Id: peerID, + } +} + +func (s *baseCluster) newStore(c *C, storeID uint64, addr string) *metapb.Store { + if storeID == 0 { + storeID = s.allocID(c) + } + + return &metapb.Store{ + Id: storeID, + Address: addr, + } +} + +func (s *baseCluster) newRegion(c *C, regionID uint64, startKey []byte, + endKey []byte, peers []*metapb.Peer, epoch *metapb.RegionEpoch) *metapb.Region { + if regionID == 0 { + regionID = s.allocID(c) + } + + if epoch == nil { + epoch = &metapb.RegionEpoch{ + ConfVer: initEpochConfVer, + Version: initEpochVersion, + } + } + + for _, peer := range peers { + peerID := peer.GetId() + c.Assert(peerID, Greater, uint64(0)) + } + + return &metapb.Region{ + Id: regionID, + StartKey: startKey, + EndKey: endKey, + RegionEpoch: epoch, + Peers: peers, + } +} + +func (s *testClusterSuite) TestBootstrap(c *C) { + var err error + var cleanup func() + s.svr, cleanup, err = NewTestServer(c) + defer cleanup() + c.Assert(err, IsNil) + mustWaitLeader(c, []*Server{s.svr}) + s.grpcSchedulerClient = testutil.MustNewGrpcClient(c, s.svr.GetAddr()) + clusterID := s.svr.clusterID + + // IsBootstrapped returns false. + req := s.newIsBootstrapRequest(clusterID) + resp, err := s.grpcSchedulerClient.IsBootstrapped(context.Background(), req) + c.Assert(err, IsNil) + c.Assert(resp, NotNil) + c.Assert(resp.GetBootstrapped(), IsFalse) + + // Bootstrap the cluster. + storeAddr := "127.0.0.1:0" + s.bootstrapCluster(c, clusterID, storeAddr) + + // IsBootstrapped returns true. + req = s.newIsBootstrapRequest(clusterID) + resp, err = s.grpcSchedulerClient.IsBootstrapped(context.Background(), req) + c.Assert(err, IsNil) + c.Assert(resp.GetBootstrapped(), IsTrue) + + // check bootstrapped error. + reqBoot := s.newBootstrapRequest(c, clusterID, storeAddr) + respBoot, err := s.grpcSchedulerClient.Bootstrap(context.Background(), reqBoot) + c.Assert(err, IsNil) + c.Assert(respBoot.GetHeader().GetError(), NotNil) + c.Assert(respBoot.GetHeader().GetError().GetType(), Equals, schedulerpb.ErrorType_ALREADY_BOOTSTRAPPED) +} + +func (s *baseCluster) newIsBootstrapRequest(clusterID uint64) *schedulerpb.IsBootstrappedRequest { + req := &schedulerpb.IsBootstrappedRequest{ + Header: testutil.NewRequestHeader(clusterID), + } + + return req +} + +func (s *baseCluster) newBootstrapRequest(c *C, clusterID uint64, storeAddr string) *schedulerpb.BootstrapRequest { + store := s.newStore(c, 0, storeAddr) + + req := &schedulerpb.BootstrapRequest{ + Header: testutil.NewRequestHeader(clusterID), + Store: store, + } + + return req +} + +// helper function to check and bootstrap. +func (s *baseCluster) bootstrapCluster(c *C, clusterID uint64, storeAddr string) { + req := s.newBootstrapRequest(c, clusterID, storeAddr) + _, err := s.grpcSchedulerClient.Bootstrap(context.Background(), req) + c.Assert(err, IsNil) +} + +func (s *baseCluster) getStore(c *C, clusterID uint64, storeID uint64) *metapb.Store { + req := &schedulerpb.GetStoreRequest{ + Header: testutil.NewRequestHeader(clusterID), + StoreId: storeID, + } + resp, err := s.grpcSchedulerClient.GetStore(context.Background(), req) + c.Assert(err, IsNil) + c.Assert(resp.GetStore().GetId(), Equals, storeID) + + return resp.GetStore() +} + +func (s *baseCluster) getRegion(c *C, clusterID uint64, regionKey []byte) *metapb.Region { + req := &schedulerpb.GetRegionRequest{ + Header: testutil.NewRequestHeader(clusterID), + RegionKey: regionKey, + } + + resp, err := s.grpcSchedulerClient.GetRegion(context.Background(), req) + c.Assert(err, IsNil) + c.Assert(resp.GetRegion(), NotNil) + + return resp.GetRegion() +} + +func (s *baseCluster) getRegionByID(c *C, clusterID uint64, regionID uint64) *metapb.Region { + req := &schedulerpb.GetRegionByIDRequest{ + Header: testutil.NewRequestHeader(clusterID), + RegionId: regionID, + } + + resp, err := s.grpcSchedulerClient.GetRegionByID(context.Background(), req) + c.Assert(err, IsNil) + c.Assert(resp.GetRegion(), NotNil) + + return resp.GetRegion() +} + +func (s *baseCluster) getRaftCluster(c *C) *RaftCluster { + cluster := s.svr.GetRaftCluster() + c.Assert(cluster, NotNil) + return cluster +} + +func (s *baseCluster) getClusterConfig(c *C, clusterID uint64) *metapb.Cluster { + req := &schedulerpb.GetClusterConfigRequest{ + Header: testutil.NewRequestHeader(clusterID), + } + + resp, err := s.grpcSchedulerClient.GetClusterConfig(context.Background(), req) + c.Assert(err, IsNil) + c.Assert(resp.GetCluster(), NotNil) + + return resp.GetCluster() +} + +func (s *testClusterSuite) TestGetPutConfig(c *C) { + var err error + var cleanup func() + s.svr, cleanup, err = NewTestServer(c) + defer cleanup() + c.Assert(err, IsNil) + mustWaitLeader(c, []*Server{s.svr}) + s.grpcSchedulerClient = testutil.MustNewGrpcClient(c, s.svr.GetAddr()) + clusterID := s.svr.clusterID + + storeAddr := "127.0.0.1:0" + bootstrapRequest := s.newBootstrapRequest(c, s.svr.clusterID, storeAddr) + _, err = s.svr.bootstrapCluster(bootstrapRequest) + c.Assert(err, IsNil) + + store := bootstrapRequest.Store + peer := s.newPeer(c, store.GetId(), 0) + region := s.newRegion(c, 0, []byte{}, []byte{}, []*metapb.Peer{peer}, nil) + err = s.svr.cluster.processRegionHeartbeat(core.NewRegionInfo(region, nil)) + c.Assert(err, IsNil) + // Get region. + region = s.getRegion(c, clusterID, []byte("abc")) + c.Assert(region.GetPeers(), HasLen, 1) + peer = region.GetPeers()[0] + + // Get region by id. + regionByID := s.getRegionByID(c, clusterID, region.GetId()) + c.Assert(region, DeepEquals, regionByID) + + // Get store. + storeID := peer.GetStoreId() + store = s.getStore(c, clusterID, storeID) + + // Update store. + store.Address = "127.0.0.1:1" + s.testPutStore(c, clusterID, store) + + // Remove store. + s.testRemoveStore(c, clusterID, store) + + // Update cluster config. + req := &schedulerpb.PutClusterConfigRequest{ + Header: testutil.NewRequestHeader(clusterID), + Cluster: &metapb.Cluster{ + Id: clusterID, + MaxPeerCount: 5, + }, + } + resp, err := s.grpcSchedulerClient.PutClusterConfig(context.Background(), req) + c.Assert(err, IsNil) + c.Assert(resp, NotNil) + meta := s.getClusterConfig(c, clusterID) + c.Assert(meta.GetMaxPeerCount(), Equals, uint32(5)) +} + +func putStore(c *C, grpcSchedulerClient schedulerpb.SchedulerClient, clusterID uint64, store *metapb.Store) (*schedulerpb.PutStoreResponse, error) { + req := &schedulerpb.PutStoreRequest{ + Header: testutil.NewRequestHeader(clusterID), + Store: store, + } + resp, err := grpcSchedulerClient.PutStore(context.Background(), req) + return resp, err +} + +func (s *baseCluster) testPutStore(c *C, clusterID uint64, store *metapb.Store) { + // Update store. + _, err := putStore(c, s.grpcSchedulerClient, clusterID, store) + c.Assert(err, IsNil) + updatedStore := s.getStore(c, clusterID, store.GetId()) + c.Assert(updatedStore, DeepEquals, store) + + // Update store again. + _, err = putStore(c, s.grpcSchedulerClient, clusterID, store) + c.Assert(err, IsNil) + + // Put new store with a duplicated address when old store is up will fail. + _, err = putStore(c, s.grpcSchedulerClient, clusterID, s.newStore(c, 0, store.GetAddress())) + c.Assert(err, NotNil) + + // Put new store with a duplicated address when old store is offline will fail. + s.resetStoreState(c, store.GetId(), metapb.StoreState_Offline) + _, err = putStore(c, s.grpcSchedulerClient, clusterID, s.newStore(c, 0, store.GetAddress())) + c.Assert(err, NotNil) + + // Put new store with a duplicated address when old store is tombstone is OK. + s.resetStoreState(c, store.GetId(), metapb.StoreState_Tombstone) + _, err = putStore(c, s.grpcSchedulerClient, clusterID, s.newStore(c, 0, store.GetAddress())) + c.Assert(err, IsNil) + + // Put a new store. + _, err = putStore(c, s.grpcSchedulerClient, clusterID, s.newStore(c, 0, "127.0.0.1:12345")) + c.Assert(err, IsNil) + + // Put an existed store with duplicated address with other old stores. + s.resetStoreState(c, store.GetId(), metapb.StoreState_Up) + _, err = putStore(c, s.grpcSchedulerClient, clusterID, s.newStore(c, store.GetId(), "127.0.0.1:12345")) + c.Assert(err, NotNil) +} + +func (s *baseCluster) resetStoreState(c *C, storeID uint64, state metapb.StoreState) { + cluster := s.svr.GetRaftCluster() + c.Assert(cluster, NotNil) + store := cluster.GetStore(storeID) + c.Assert(store, NotNil) + newStore := store.Clone(core.SetStoreState(state)) + cluster.Lock() + err := cluster.putStoreLocked(newStore) + cluster.Unlock() + c.Assert(err, IsNil) +} + +func (s *baseCluster) testRemoveStore(c *C, clusterID uint64, store *metapb.Store) { + cluster := s.getRaftCluster(c) + + // When store is up: + { + // Case 1: RemoveStore should be OK; + s.resetStoreState(c, store.GetId(), metapb.StoreState_Up) + err := cluster.RemoveStore(store.GetId()) + c.Assert(err, IsNil) + removedStore := s.getStore(c, clusterID, store.GetId()) + c.Assert(removedStore.GetState(), Equals, metapb.StoreState_Offline) + // Case 2: BuryStore w/ force should be OK; + s.resetStoreState(c, store.GetId(), metapb.StoreState_Up) + err = cluster.BuryStore(store.GetId(), true) + c.Assert(err, IsNil) + buriedStore := s.getStore(c, clusterID, store.GetId()) + c.Assert(buriedStore.GetState(), Equals, metapb.StoreState_Tombstone) + // Case 3: BuryStore w/o force should fail. + s.resetStoreState(c, store.GetId(), metapb.StoreState_Up) + err = cluster.BuryStore(store.GetId(), false) + c.Assert(err, NotNil) + } + + // When store is offline: + { + // Case 1: RemoveStore should be OK; + s.resetStoreState(c, store.GetId(), metapb.StoreState_Offline) + err := cluster.RemoveStore(store.GetId()) + c.Assert(err, IsNil) + removedStore := s.getStore(c, clusterID, store.GetId()) + c.Assert(removedStore.GetState(), Equals, metapb.StoreState_Offline) + // Case 2: BuryStore w/ or w/o force should be OK. + s.resetStoreState(c, store.GetId(), metapb.StoreState_Offline) + err = cluster.BuryStore(store.GetId(), false) + c.Assert(err, IsNil) + buriedStore := s.getStore(c, clusterID, store.GetId()) + c.Assert(buriedStore.GetState(), Equals, metapb.StoreState_Tombstone) + } + + // When store is tombstone: + { + // Case 1: RemoveStore should should fail; + s.resetStoreState(c, store.GetId(), metapb.StoreState_Tombstone) + err := cluster.RemoveStore(store.GetId()) + c.Assert(err, NotNil) + // Case 2: BuryStore w/ or w/o force should be OK. + s.resetStoreState(c, store.GetId(), metapb.StoreState_Tombstone) + err = cluster.BuryStore(store.GetId(), false) + c.Assert(err, IsNil) + buriedStore := s.getStore(c, clusterID, store.GetId()) + c.Assert(buriedStore.GetState(), Equals, metapb.StoreState_Tombstone) + } + + { + // Put after removed should return tombstone error. + resp, err := putStore(c, s.grpcSchedulerClient, clusterID, store) + c.Assert(err, IsNil) + c.Assert(resp.GetHeader().GetError().GetType(), Equals, schedulerpb.ErrorType_STORE_TOMBSTONE) + } + { + // Update after removed should return tombstone error. + req := &schedulerpb.StoreHeartbeatRequest{ + Header: testutil.NewRequestHeader(clusterID), + Stats: &schedulerpb.StoreStats{StoreId: store.GetId()}, + } + resp, err := s.grpcSchedulerClient.StoreHeartbeat(context.Background(), req) + c.Assert(err, IsNil) + c.Assert(resp.GetHeader().GetError().GetType(), Equals, schedulerpb.ErrorType_STORE_TOMBSTONE) + } +} + +// Make sure PD will not panic if it start and stop again and again. +func (s *testClusterSuite) TestRaftClusterRestart(c *C) { + var err error + var cleanup func() + s.svr, cleanup, err = NewTestServer(c) + defer cleanup() + c.Assert(err, IsNil) + mustWaitLeader(c, []*Server{s.svr}) + _, err = s.svr.bootstrapCluster(s.newBootstrapRequest(c, s.svr.clusterID, "127.0.0.1:0")) + c.Assert(err, IsNil) + + cluster := s.svr.GetRaftCluster() + c.Assert(cluster, NotNil) + cluster.stop() + + err = s.svr.createRaftCluster() + c.Assert(err, IsNil) + + cluster = s.svr.GetRaftCluster() + c.Assert(cluster, NotNil) + cluster.stop() +} + +func (s *testClusterSuite) TestGetPDMembers(c *C) { + var err error + var cleanup func() + s.svr, cleanup, err = NewTestServer(c) + defer cleanup() + c.Assert(err, IsNil) + mustWaitLeader(c, []*Server{s.svr}) + s.grpcSchedulerClient = testutil.MustNewGrpcClient(c, s.svr.GetAddr()) + req := &schedulerpb.GetMembersRequest{ + Header: testutil.NewRequestHeader(s.svr.ClusterID()), + } + + resp, err := s.grpcSchedulerClient.GetMembers(context.Background(), req) + c.Assert(err, IsNil) + // A more strict test can be found at api/member_test.go + c.Assert(len(resp.GetMembers()), Not(Equals), 0) +} + +var _ = Suite(&testGetStoresSuite{}) + +type testGetStoresSuite struct { + cluster *RaftCluster +} + +func (s *testGetStoresSuite) SetUpSuite(c *C) { + _, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + cluster := createTestRaftCluster(mockid.NewIDAllocator(), opt, core.NewStorage(kv.NewMemoryKV())) + s.cluster = cluster + + stores := newTestStores(200) + + for _, store := range stores { + c.Assert(s.cluster.putStoreLocked(store), IsNil) + } +} + +func (s *testGetStoresSuite) BenchmarkGetStores(c *C) { + for i := 0; i < c.N; i++ { + // Logic to benchmark + s.cluster.core.Stores.GetStores() + } +} + +var _ = Suite(&testStoresInfoSuite{}) + +type testStoresInfoSuite struct{} + +func checkStaleRegion(origin *metapb.Region, region *metapb.Region) error { + o := origin.GetRegionEpoch() + e := region.GetRegionEpoch() + + if e.GetVersion() < o.GetVersion() || e.GetConfVer() < o.GetConfVer() { + return ErrRegionIsStale(region, origin) + } + + return nil +} + +// Create n stores (0..n). +func newTestStores(n uint64) []*core.StoreInfo { + stores := make([]*core.StoreInfo, 0, n) + for i := uint64(1); i <= n; i++ { + store := &metapb.Store{ + Id: i, + } + stores = append(stores, core.NewStoreInfo(store)) + } + return stores +} + +func (s *testStoresInfoSuite) TestStores(c *C) { + n := uint64(10) + cache := core.NewStoresInfo() + stores := newTestStores(n) + + for i, store := range stores { + id := store.GetID() + c.Assert(cache.GetStore(id), IsNil) + c.Assert(cache.BlockStore(id), NotNil) + cache.SetStore(store) + c.Assert(cache.GetStore(id), DeepEquals, store) + c.Assert(cache.GetStoreCount(), Equals, i+1) + c.Assert(cache.BlockStore(id), IsNil) + c.Assert(cache.GetStore(id).IsBlocked(), IsTrue) + c.Assert(cache.BlockStore(id), NotNil) + cache.UnblockStore(id) + c.Assert(cache.GetStore(id).IsBlocked(), IsFalse) + } + c.Assert(cache.GetStoreCount(), Equals, int(n)) + + for _, store := range cache.GetStores() { + c.Assert(store, DeepEquals, stores[store.GetID()-1]) + } + for _, store := range cache.GetMetaStores() { + c.Assert(store, DeepEquals, stores[store.GetId()-1].GetMeta()) + } + + c.Assert(cache.GetStoreCount(), Equals, int(n)) +} + +var _ = Suite(&testRegionsInfoSuite{}) + +type testRegionsInfoSuite struct{} + +// Create n regions (0..n) of n stores (0..n). +// Each region contains np peers, the first peer is the leader. +func newTestRegions(n, np uint64) []*core.RegionInfo { + regions := make([]*core.RegionInfo, 0, n) + for i := uint64(0); i < n; i++ { + peers := make([]*metapb.Peer, 0, np) + for j := uint64(0); j < np; j++ { + peer := &metapb.Peer{ + Id: i*np + j, + } + peer.StoreId = (i + j) % n + peers = append(peers, peer) + } + region := &metapb.Region{ + Id: i, + Peers: peers, + StartKey: []byte{byte(i)}, + EndKey: []byte{byte(i + 1)}, + RegionEpoch: &metapb.RegionEpoch{ConfVer: 2, Version: 2}, + } + regions = append(regions, core.NewRegionInfo(region, peers[0])) + } + return regions +} + +func (s *testRegionsInfoSuite) Test(c *C) { + n, np := uint64(10), uint64(3) + cache := core.NewRegionsInfo() + regions := newTestRegions(n, np) + + for i := uint64(0); i < n; i++ { + region := regions[i] + regionKey := []byte{byte(i)} + + c.Assert(cache.GetRegion(i), IsNil) + c.Assert(cache.SearchRegion(regionKey), IsNil) + checkRegions(c, cache, regions[0:i]) + + cache.AddRegion(region) + checkRegion(c, cache.GetRegion(i), region) + checkRegion(c, cache.SearchRegion(regionKey), region) + checkRegions(c, cache, regions[0:(i+1)]) + // previous region + if i == 0 { + c.Assert(cache.SearchPrevRegion(regionKey), IsNil) + } else { + checkRegion(c, cache.SearchPrevRegion(regionKey), regions[i-1]) + } + // Update leader to peer np-1. + newRegion := region.Clone(core.WithLeader(region.GetPeers()[np-1])) + regions[i] = newRegion + cache.SetRegion(newRegion) + checkRegion(c, cache.GetRegion(i), newRegion) + checkRegion(c, cache.SearchRegion(regionKey), newRegion) + checkRegions(c, cache, regions[0:(i+1)]) + + cache.RemoveRegion(region) + c.Assert(cache.GetRegion(i), IsNil) + c.Assert(cache.SearchRegion(regionKey), IsNil) + checkRegions(c, cache, regions[0:i]) + + // Reset leader to peer 0. + newRegion = region.Clone(core.WithLeader(region.GetPeers()[0])) + regions[i] = newRegion + cache.AddRegion(newRegion) + checkRegion(c, cache.GetRegion(i), newRegion) + checkRegions(c, cache, regions[0:(i+1)]) + checkRegion(c, cache.SearchRegion(regionKey), newRegion) + } + + for i := uint64(0); i < n; i++ { + region := cache.RandLeaderRegion(i, core.HealthRegion()) + c.Assert(region.GetLeader().GetStoreId(), Equals, i) + + region = cache.RandFollowerRegion(i, core.HealthRegion()) + c.Assert(region.GetLeader().GetStoreId(), Not(Equals), i) + + c.Assert(region.GetStorePeer(i), NotNil) + } + + // check overlaps + // clone it otherwise there are two items with the same key in the tree + overlapRegion := regions[n-1].Clone(core.WithStartKey(regions[n-2].GetStartKey())) + cache.AddRegion(overlapRegion) + c.Assert(cache.GetRegion(n-2), IsNil) + c.Assert(cache.GetRegion(n-1), NotNil) + + // All regions will be filtered out if they have pending peers. + for i := uint64(0); i < n; i++ { + for j := 0; j < cache.GetStoreLeaderCount(i); j++ { + region := cache.RandLeaderRegion(i, core.HealthRegion()) + newRegion := region.Clone(core.WithPendingPeers(region.GetPeers())) + cache.SetRegion(newRegion) + } + c.Assert(cache.RandLeaderRegion(i, core.HealthRegion()), IsNil) + } + for i := uint64(0); i < n; i++ { + c.Assert(cache.RandFollowerRegion(i, core.HealthRegion()), IsNil) + } +} + +func checkRegion(c *C, a *core.RegionInfo, b *core.RegionInfo) { + c.Assert(a, DeepEquals, b) + c.Assert(a.GetMeta(), DeepEquals, b.GetMeta()) + c.Assert(a.GetLeader(), DeepEquals, b.GetLeader()) + c.Assert(a.GetPeers(), DeepEquals, b.GetPeers()) + if len(a.GetPendingPeers()) > 0 || len(b.GetPendingPeers()) > 0 { + c.Assert(a.GetPendingPeers(), DeepEquals, b.GetPendingPeers()) + } +} + +func checkRegions(c *C, cache *core.RegionsInfo, regions []*core.RegionInfo) { + regionCount := make(map[uint64]int) + leaderCount := make(map[uint64]int) + followerCount := make(map[uint64]int) + for _, region := range regions { + for _, peer := range region.GetPeers() { + regionCount[peer.StoreId]++ + if peer.Id == region.GetLeader().Id { + leaderCount[peer.StoreId]++ + checkRegion(c, cache.GetLeader(peer.StoreId, region), region) + } else { + followerCount[peer.StoreId]++ + checkRegion(c, cache.GetFollower(peer.StoreId, region), region) + } + } + } + + c.Assert(cache.GetRegionCount(), Equals, len(regions)) + for id, count := range regionCount { + c.Assert(cache.GetStoreRegionCount(id), Equals, count) + } + for id, count := range leaderCount { + c.Assert(cache.GetStoreLeaderCount(id), Equals, count) + } + for id, count := range followerCount { + c.Assert(cache.GetStoreFollowerCount(id), Equals, count) + } + + for _, region := range cache.GetRegions() { + checkRegion(c, region, regions[region.GetID()]) + } + for _, region := range cache.GetMetaRegions() { + c.Assert(region, DeepEquals, regions[region.GetId()].GetMeta()) + } +} + +var _ = Suite(&testClusterUtilSuite{}) + +type testClusterUtilSuite struct{} + +func (s *testClusterUtilSuite) TestCheckStaleRegion(c *C) { + // (0, 0) v.s. (0, 0) + region := core.NewTestRegionInfo([]byte{}, []byte{}) + origin := core.NewTestRegionInfo([]byte{}, []byte{}) + c.Assert(checkStaleRegion(region.GetMeta(), origin.GetMeta()), IsNil) + c.Assert(checkStaleRegion(origin.GetMeta(), region.GetMeta()), IsNil) + + // (1, 0) v.s. (0, 0) + region.GetRegionEpoch().Version++ + c.Assert(checkStaleRegion(origin.GetMeta(), region.GetMeta()), IsNil) + c.Assert(checkStaleRegion(region.GetMeta(), origin.GetMeta()), NotNil) + + // (1, 1) v.s. (0, 0) + region.GetRegionEpoch().ConfVer++ + c.Assert(checkStaleRegion(origin.GetMeta(), region.GetMeta()), IsNil) + c.Assert(checkStaleRegion(region.GetMeta(), origin.GetMeta()), NotNil) + + // (0, 1) v.s. (0, 0) + region.GetRegionEpoch().Version-- + c.Assert(checkStaleRegion(origin.GetMeta(), region.GetMeta()), IsNil) + c.Assert(checkStaleRegion(region.GetMeta(), origin.GetMeta()), NotNil) +} + +func mustSaveStores(c *C, s *core.Storage, n int) []*metapb.Store { + stores := make([]*metapb.Store, 0, n) + for i := 0; i < n; i++ { + store := &metapb.Store{Id: uint64(i)} + stores = append(stores, store) + } + + for _, store := range stores { + c.Assert(s.SaveStore(store), IsNil) + } + + return stores +} diff --git a/scheduler/server/cluster_worker.go b/scheduler/server/cluster_worker.go new file mode 100644 index 00000000..4fec8bba --- /dev/null +++ b/scheduler/server/cluster_worker.go @@ -0,0 +1,146 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "bytes" + + "github.com/gogo/protobuf/proto" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +// HandleRegionHeartbeat processes RegionInfo reports from client. +func (c *RaftCluster) HandleRegionHeartbeat(region *core.RegionInfo) error { + if err := c.processRegionHeartbeat(region); err != nil { + return err + } + + // If the region peer count is 0, then we should not handle this. + if len(region.GetPeers()) == 0 { + log.Warn("invalid region, zero region peer count", zap.Stringer("region-meta", core.RegionToHexMeta(region.GetMeta()))) + return errors.Errorf("invalid region, zero region peer count: %v", core.RegionToHexMeta(region.GetMeta())) + } + + c.RLock() + co := c.coordinator + c.RUnlock() + co.opController.Dispatch(region, schedule.DispatchFromHeartBeat) + return nil +} + +func (c *RaftCluster) handleAskSplit(request *schedulerpb.AskSplitRequest) (*schedulerpb.AskSplitResponse, error) { + reqRegion := request.GetRegion() + err := c.validRequestRegion(reqRegion) + if err != nil { + return nil, err + } + + newRegionID, err := c.s.idAllocator.Alloc() + if err != nil { + return nil, err + } + + peerIDs := make([]uint64, len(request.Region.Peers)) + for i := 0; i < len(peerIDs); i++ { + if peerIDs[i], err = c.s.idAllocator.Alloc(); err != nil { + return nil, err + } + } + + split := &schedulerpb.AskSplitResponse{ + NewRegionId: newRegionID, + NewPeerIds: peerIDs, + } + + return split, nil +} + +func (c *RaftCluster) validRequestRegion(reqRegion *metapb.Region) error { + startKey := reqRegion.GetStartKey() + region, _ := c.GetRegionByKey(startKey) + if region == nil { + return errors.Errorf("region not found, request region: %v", core.RegionToHexMeta(reqRegion)) + } + // If the request epoch is less than current region epoch, then returns an error. + reqRegionEpoch := reqRegion.GetRegionEpoch() + regionEpoch := region.GetRegionEpoch() + if reqRegionEpoch.GetVersion() < regionEpoch.GetVersion() || + reqRegionEpoch.GetConfVer() < regionEpoch.GetConfVer() { + return errors.Errorf("invalid region epoch, request: %v, currenrt: %v", reqRegionEpoch, regionEpoch) + } + return nil +} + +func (c *RaftCluster) checkSplitRegion(left *metapb.Region, right *metapb.Region) error { + if left == nil || right == nil { + return errors.New("invalid split region") + } + + if !bytes.Equal(left.GetEndKey(), right.GetStartKey()) { + return errors.New("invalid split region") + } + + if len(right.GetEndKey()) == 0 || bytes.Compare(left.GetStartKey(), right.GetEndKey()) < 0 { + return nil + } + + return errors.New("invalid split region") +} + +func (c *RaftCluster) checkSplitRegions(regions []*metapb.Region) error { + if len(regions) <= 1 { + return errors.New("invalid split region") + } + + for i := 1; i < len(regions); i++ { + left := regions[i-1] + right := regions[i] + if !bytes.Equal(left.GetEndKey(), right.GetStartKey()) { + return errors.New("invalid split region") + } + if len(right.GetEndKey()) != 0 && bytes.Compare(left.GetStartKey(), right.GetEndKey()) >= 0 { + return errors.New("invalid split region") + } + } + return nil +} + +func (c *RaftCluster) handleReportSplit(request *schedulerpb.ReportSplitRequest) (*schedulerpb.ReportSplitResponse, error) { + left := request.GetLeft() + right := request.GetRight() + + err := c.checkSplitRegion(left, right) + if err != nil { + log.Warn("report split region is invalid", + zap.Stringer("left-region", core.RegionToHexMeta(left)), + zap.Stringer("right-region", core.RegionToHexMeta(right)), + zap.Error(err)) + return nil, err + } + + // Build origin region by using left and right. + originRegion := proto.Clone(right).(*metapb.Region) + originRegion.RegionEpoch = nil + originRegion.StartKey = left.GetStartKey() + log.Info("region split, generate new region", + zap.Uint64("region-id", originRegion.GetId()), + zap.Stringer("region-meta", core.RegionToHexMeta(left))) + return &schedulerpb.ReportSplitResponse{}, nil +} diff --git a/scheduler/server/cluster_worker_test.go b/scheduler/server/cluster_worker_test.go new file mode 100644 index 00000000..212e6455 --- /dev/null +++ b/scheduler/server/cluster_worker_test.go @@ -0,0 +1,121 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/testutil" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + . "github.com/pingcap/check" +) + +var _ = Suite(&testClusterWorkerSuite{}) + +type testClusterWorkerSuite struct { + baseCluster +} + +func (s *testClusterWorkerSuite) TestReportSplit(c *C) { + var cluster RaftCluster + left := &metapb.Region{Id: 1, StartKey: []byte("a"), EndKey: []byte("b")} + right := &metapb.Region{Id: 2, StartKey: []byte("b"), EndKey: []byte("c")} + _, err := cluster.handleReportSplit(&schedulerpb.ReportSplitRequest{Left: left, Right: right}) + c.Assert(err, IsNil) + _, err = cluster.handleReportSplit(&schedulerpb.ReportSplitRequest{Left: right, Right: left}) + c.Assert(err, NotNil) +} + +func (s *testClusterWorkerSuite) TestValidRequestRegion(c *C) { + var err error + var cleanup func() + s.svr, cleanup, err = NewTestServer(c) + defer cleanup() + c.Assert(err, IsNil) + mustWaitLeader(c, []*Server{s.svr}) + s.grpcSchedulerClient = testutil.MustNewGrpcClient(c, s.svr.GetAddr()) + _, err = s.svr.bootstrapCluster(s.newBootstrapRequest(c, s.svr.clusterID, "127.0.0.1:0")) + c.Assert(err, IsNil) + + cluster := s.svr.GetRaftCluster() + c.Assert(cluster, NotNil) + + r1 := core.NewRegionInfo(&metapb.Region{ + Id: 1, + StartKey: []byte(""), + EndKey: []byte("a"), + Peers: []*metapb.Peer{{ + Id: 1, + StoreId: 1, + }}, + RegionEpoch: &metapb.RegionEpoch{ConfVer: 2, Version: 2}, + }, &metapb.Peer{ + Id: 1, + StoreId: 1, + }) + err = cluster.HandleRegionHeartbeat(r1) + c.Assert(err, IsNil) + r2 := &metapb.Region{Id: 2, StartKey: []byte("a"), EndKey: []byte("b")} + c.Assert(cluster.validRequestRegion(r2), NotNil) + r3 := &metapb.Region{Id: 1, StartKey: []byte(""), EndKey: []byte("a"), RegionEpoch: &metapb.RegionEpoch{ConfVer: 1, Version: 2}} + c.Assert(cluster.validRequestRegion(r3), NotNil) + r4 := &metapb.Region{Id: 1, StartKey: []byte(""), EndKey: []byte("a"), RegionEpoch: &metapb.RegionEpoch{ConfVer: 2, Version: 1}} + c.Assert(cluster.validRequestRegion(r4), NotNil) + r5 := &metapb.Region{Id: 1, StartKey: []byte(""), EndKey: []byte("a"), RegionEpoch: &metapb.RegionEpoch{ConfVer: 2, Version: 2}} + c.Assert(cluster.validRequestRegion(r5), IsNil) + cluster.stop() +} + +func (s *testClusterWorkerSuite) TestAskSplit(c *C) { + var err error + var cleanup func() + s.svr, cleanup, err = NewTestServer(c) + defer cleanup() + c.Assert(err, IsNil) + mustWaitLeader(c, []*Server{s.svr}) + s.grpcSchedulerClient = testutil.MustNewGrpcClient(c, s.svr.GetAddr()) + bootstrapRequest := s.newBootstrapRequest(c, s.svr.clusterID, "127.0.0.1:0") + _, err = s.svr.bootstrapCluster(bootstrapRequest) + c.Assert(err, IsNil) + + store := bootstrapRequest.Store + peer := s.newPeer(c, store.GetId(), 0) + region := s.newRegion(c, 0, []byte{}, []byte{}, []*metapb.Peer{peer}, nil) + err = s.svr.cluster.processRegionHeartbeat(core.NewRegionInfo(region, nil)) + c.Assert(err, IsNil) + + cluster := s.svr.GetRaftCluster() + c.Assert(cluster, NotNil) + regions := cluster.GetRegions() + + req := &schedulerpb.AskSplitRequest{ + Header: &schedulerpb.RequestHeader{ + ClusterId: s.svr.ClusterID(), + }, + Region: regions[0].GetMeta(), + } + + _, err = cluster.handleAskSplit(req) + c.Assert(err, IsNil) + + req1 := &schedulerpb.AskSplitRequest{ + Header: &schedulerpb.RequestHeader{ + ClusterId: s.svr.ClusterID(), + }, + Region: regions[0].GetMeta(), + } + + _, err = cluster.handleAskSplit(req1) + c.Assert(err, IsNil) +} diff --git a/scheduler/server/config/config.go b/scheduler/server/config/config.go new file mode 100644 index 00000000..91b0779d --- /dev/null +++ b/scheduler/server/config/config.go @@ -0,0 +1,656 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "crypto/tls" + "encoding/json" + "flag" + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + "time" + + "github.com/BurntSushi/toml" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/typeutil" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.etcd.io/etcd/embed" + "go.etcd.io/etcd/pkg/transport" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// Config is the pd server configuration. +type Config struct { + *flag.FlagSet `json:"-"` + + Version bool `json:"-"` + + ConfigCheck bool `json:"-"` + + ClientUrls string `toml:"client-urls" json:"client-urls"` + PeerUrls string `toml:"peer-urls" json:"peer-urls"` + AdvertiseClientUrls string `toml:"advertise-client-urls" json:"advertise-client-urls"` + AdvertisePeerUrls string `toml:"advertise-peer-urls" json:"advertise-peer-urls"` + + Name string `toml:"name" json:"name"` + DataDir string `toml:"data-dir" json:"data-dir"` + ForceNewCluster bool `json:"force-new-cluster"` + EnableGRPCGateway bool `json:"enable-grpc-gateway"` + + InitialCluster string `toml:"initial-cluster" json:"initial-cluster"` + InitialClusterState string `toml:"initial-cluster-state" json:"initial-cluster-state"` + + // LeaderLease time, if leader doesn't update its TTL + // in etcd after lease time, etcd will expire the leader key + // and other servers can campaign the leader again. + // Etcd only supports seconds TTL, so here is second too. + LeaderLease int64 `toml:"lease" json:"lease"` + + // Log related config. + Log log.Config `toml:"log" json:"log"` + + // Backward compatibility. + LogFileDeprecated string `toml:"log-file" json:"log-file"` + LogLevelDeprecated string `toml:"log-level" json:"log-level"` + + // TsoSaveInterval is the interval to save timestamp. + TsoSaveInterval typeutil.Duration `toml:"tso-save-interval" json:"tso-save-interval"` + + Schedule ScheduleConfig `toml:"schedule" json:"schedule"` + + Replication ReplicationConfig `toml:"replication" json:"replication"` + + PDServerCfg PDServerConfig `toml:"pd-server" json:"pd-server"` + + // QuotaBackendBytes Raise alarms when backend size exceeds the given quota. 0 means use the default quota. + // the default size is 2GB, the maximum is 8GB. + QuotaBackendBytes typeutil.ByteSize `toml:"quota-backend-bytes" json:"quota-backend-bytes"` + // AutoCompactionMode is either 'periodic' or 'revision'. The default value is 'periodic'. + AutoCompactionMode string `toml:"auto-compaction-mode" json:"auto-compaction-mode"` + // AutoCompactionRetention is either duration string with time unit + // (e.g. '5m' for 5-minute), or revision unit (e.g. '5000'). + // If no time unit is provided and compaction mode is 'periodic', + // the unit defaults to hour. For example, '5' translates into 5-hour. + // The default retention is 1 hour. + // Before etcd v3.3.x, the type of retention is int. We add 'v2' suffix to make it backward compatible. + AutoCompactionRetention string `toml:"auto-compaction-retention" json:"auto-compaction-retention-v2"` + + // TickInterval is the interval for etcd Raft tick. + TickInterval typeutil.Duration `toml:"tick-interval"` + // ElectionInterval is the interval for etcd Raft election. + ElectionInterval typeutil.Duration `toml:"election-interval"` + + Security SecurityConfig `toml:"security" json:"security"` + + configFile string + + // For all warnings during parsing. + WarningMsgs []string + + // Only test can change them. + nextRetryDelay time.Duration + DisableStrictReconfigCheck bool + + HeartbeatStreamBindInterval typeutil.Duration + + LeaderPriorityCheckInterval typeutil.Duration + + logger *zap.Logger + logProps *log.ZapProperties +} + +// NewConfig creates a new config. +func NewConfig() *Config { + cfg := &Config{} + cfg.FlagSet = flag.NewFlagSet("pd", flag.ContinueOnError) + fs := cfg.FlagSet + + fs.BoolVar(&cfg.Version, "V", false, "print version information and exit") + fs.BoolVar(&cfg.Version, "version", false, "print version information and exit") + fs.StringVar(&cfg.configFile, "config", "", "Config file") + fs.BoolVar(&cfg.ConfigCheck, "config-check", false, "check config file validity and exit") + + fs.StringVar(&cfg.Name, "name", "", "human-readable name for this pd member") + + fs.StringVar(&cfg.DataDir, "data-dir", "", "path to the data directory (default 'default.${name}')") + fs.StringVar(&cfg.ClientUrls, "client-urls", defaultClientUrls, "url for client traffic") + fs.StringVar(&cfg.AdvertiseClientUrls, "advertise-client-urls", "", "advertise url for client traffic (default '${client-urls}')") + fs.StringVar(&cfg.PeerUrls, "peer-urls", defaultPeerUrls, "url for peer traffic") + fs.StringVar(&cfg.AdvertisePeerUrls, "advertise-peer-urls", "", "advertise url for peer traffic (default '${peer-urls}')") + fs.StringVar(&cfg.InitialCluster, "initial-cluster", "", "initial cluster configuration for bootstrapping, e,g. pd=http://127.0.0.1:2380") + + fs.StringVar(&cfg.Log.Level, "L", "", "log level: debug, info, warn, error, fatal (default 'info')") + fs.StringVar(&cfg.Log.File.Filename, "log-file", "", "log file path") + fs.BoolVar(&cfg.Log.File.LogRotate, "log-rotate", true, "rotate log") + + fs.StringVar(&cfg.Security.CAPath, "cacert", "", "Path of file that contains list of trusted TLS CAs") + fs.StringVar(&cfg.Security.CertPath, "cert", "", "Path of file that contains X509 certificate in PEM format") + fs.StringVar(&cfg.Security.KeyPath, "key", "", "Path of file that contains X509 key in PEM format") + fs.BoolVar(&cfg.ForceNewCluster, "force-new-cluster", false, "Force to create a new one-member cluster") + + return cfg +} + +const ( + defaultLeaderLease = int64(3) + defaultNextRetryDelay = time.Second + defaultCompactionMode = "periodic" + defaultAutoCompactionRetention = "1h" + + defaultName = "pd" + defaultClientUrls = "http://127.0.0.1:2379" + defaultPeerUrls = "http://127.0.0.1:2380" + defaultInitialClusterState = embed.ClusterStateFlagNew + + // etcd use 100ms for heartbeat and 1s for election timeout. + // We can enlarge both a little to reduce the network aggression. + // now embed etcd use TickMs for heartbeat, we will update + // after embed etcd decouples tick and heartbeat. + defaultTickInterval = 500 * time.Millisecond + // embed etcd has a check that `5 * tick > election` + defaultElectionInterval = 3000 * time.Millisecond + + defaultHeartbeatStreamRebindInterval = time.Minute + + defaultLeaderPriorityCheckInterval = time.Minute + + defaultMaxResetTsGap = 24 * time.Hour + + defaultEnableGRPCGateway = true +) + +func adjustString(v *string, defValue string) { + if len(*v) == 0 { + *v = defValue + } +} + +func adjustUint64(v *uint64, defValue uint64) { + if *v == 0 { + *v = defValue + } +} + +func adjustInt64(v *int64, defValue int64) { + if *v == 0 { + *v = defValue + } +} + +func adjustDuration(v *typeutil.Duration, defValue time.Duration) { + if v.Duration == 0 { + v.Duration = defValue + } +} + +func adjustSchedulers(v *SchedulerConfigs, defValue SchedulerConfigs) { + if len(*v) == 0 { + *v = defValue + } +} + +// Parse parses flag definitions from the argument list. +func (c *Config) Parse(arguments []string) error { + // Parse first to get config file. + err := c.FlagSet.Parse(arguments) + if err != nil { + return errors.WithStack(err) + } + + // Load config file if specified. + var meta *toml.MetaData + if c.configFile != "" { + meta, err = c.configFromFile(c.configFile) + if err != nil { + return err + } + + // Backward compatibility for toml config + if c.LogFileDeprecated != "" && c.Log.File.Filename == "" { + c.Log.File.Filename = c.LogFileDeprecated + msg := fmt.Sprintf("log-file in %s is deprecated, use [log.file] instead", c.configFile) + c.WarningMsgs = append(c.WarningMsgs, msg) + } + if c.LogLevelDeprecated != "" && c.Log.Level == "" { + c.Log.Level = c.LogLevelDeprecated + msg := fmt.Sprintf("log-level in %s is deprecated, use [log] instead", c.configFile) + c.WarningMsgs = append(c.WarningMsgs, msg) + } + if meta.IsDefined("schedule", "disable-raft-learner") { + msg := fmt.Sprintf("disable-raft-learner in %s is deprecated", c.configFile) + c.WarningMsgs = append(c.WarningMsgs, msg) + } + } + + // Parse again to replace with command line options. + err = c.FlagSet.Parse(arguments) + if err != nil { + return errors.WithStack(err) + } + + if len(c.FlagSet.Args()) != 0 { + return errors.Errorf("'%s' is an invalid flag", c.FlagSet.Arg(0)) + } + + err = c.Adjust(meta) + return err +} + +// Validate is used to validate if some configurations are right. +func (c *Config) Validate() error { + dataDir, err := filepath.Abs(c.DataDir) + if err != nil { + return errors.WithStack(err) + } + logFile, err := filepath.Abs(c.Log.File.Filename) + if err != nil { + return errors.WithStack(err) + } + rel, err := filepath.Rel(dataDir, filepath.Dir(logFile)) + if err != nil { + return errors.WithStack(err) + } + if !strings.HasPrefix(rel, "..") { + return errors.New("log directory shouldn't be the subdirectory of data directory") + } + + return nil +} + +// Utility to test if a configuration is defined. +type configMetaData struct { + meta *toml.MetaData + path []string +} + +func newConfigMetadata(meta *toml.MetaData) *configMetaData { + return &configMetaData{meta: meta} +} + +func (m *configMetaData) IsDefined(key string) bool { + if m.meta == nil { + return false + } + keys := append([]string(nil), m.path...) + keys = append(keys, key) + return m.meta.IsDefined(keys...) +} + +func (m *configMetaData) Child(path ...string) *configMetaData { + newPath := append([]string(nil), m.path...) + newPath = append(newPath, path...) + return &configMetaData{ + meta: m.meta, + path: newPath, + } +} + +func (m *configMetaData) CheckUndecoded() error { + if m.meta == nil { + return nil + } + undecoded := m.meta.Undecoded() + if len(undecoded) == 0 { + return nil + } + errInfo := "Config contains undefined item: " + for _, key := range undecoded { + errInfo += key.String() + ", " + } + return errors.New(errInfo[:len(errInfo)-2]) +} + +// Adjust is used to adjust the PD configurations. +func (c *Config) Adjust(meta *toml.MetaData) error { + configMetaData := newConfigMetadata(meta) + if err := configMetaData.CheckUndecoded(); err != nil { + c.WarningMsgs = append(c.WarningMsgs, err.Error()) + } + + if c.Name == "" { + hostname, err := os.Hostname() + if err != nil { + return err + } + adjustString(&c.Name, fmt.Sprintf("%s-%s", defaultName, hostname)) + } + adjustString(&c.DataDir, fmt.Sprintf("default.%s", c.Name)) + + if err := c.Validate(); err != nil { + return err + } + + adjustString(&c.ClientUrls, defaultClientUrls) + adjustString(&c.AdvertiseClientUrls, c.ClientUrls) + adjustString(&c.PeerUrls, defaultPeerUrls) + adjustString(&c.AdvertisePeerUrls, c.PeerUrls) + + if len(c.InitialCluster) == 0 { + // The advertise peer urls may be http://127.0.0.1:2380,http://127.0.0.1:2381 + // so the initial cluster is pd=http://127.0.0.1:2380,pd=http://127.0.0.1:2381 + items := strings.Split(c.AdvertisePeerUrls, ",") + + sep := "" + for _, item := range items { + c.InitialCluster += fmt.Sprintf("%s%s=%s", sep, c.Name, item) + sep = "," + } + } + + adjustString(&c.InitialClusterState, defaultInitialClusterState) + + adjustInt64(&c.LeaderLease, defaultLeaderLease) + + adjustDuration(&c.TsoSaveInterval, time.Duration(defaultLeaderLease)*time.Second) + + if c.nextRetryDelay == 0 { + c.nextRetryDelay = defaultNextRetryDelay + } + + adjustString(&c.AutoCompactionMode, defaultCompactionMode) + adjustString(&c.AutoCompactionRetention, defaultAutoCompactionRetention) + adjustDuration(&c.TickInterval, defaultTickInterval) + adjustDuration(&c.ElectionInterval, defaultElectionInterval) + + if err := c.Schedule.adjust(configMetaData.Child("schedule")); err != nil { + return err + } + if err := c.Replication.adjust(); err != nil { + return err + } + + if err := c.PDServerCfg.adjust(configMetaData.Child("pd-server")); err != nil { + return err + } + + adjustDuration(&c.HeartbeatStreamBindInterval, defaultHeartbeatStreamRebindInterval) + + adjustDuration(&c.LeaderPriorityCheckInterval, defaultLeaderPriorityCheckInterval) + + if !configMetaData.IsDefined("enable-grpc-gateway") { + c.EnableGRPCGateway = defaultEnableGRPCGateway + } + return nil +} + +// Clone returns a cloned configuration. +func (c *Config) Clone() *Config { + cfg := &Config{} + *cfg = *c + return cfg +} + +func (c *Config) String() string { + data, err := json.MarshalIndent(c, "", " ") + if err != nil { + return "" + } + return string(data) +} + +// configFromFile loads config from file. +func (c *Config) configFromFile(path string) (*toml.MetaData, error) { + meta, err := toml.DecodeFile(path, c) + return &meta, errors.WithStack(err) +} + +// ScheduleConfig is the schedule configuration. +type ScheduleConfig struct { + // PatrolRegionInterval is the interval for scanning region during patrol. + PatrolRegionInterval typeutil.Duration `toml:"patrol-region-interval,omitempty" json:"patrol-region-interval"` + // MaxStoreDownTime is the max duration after which + // a store will be considered to be down if it hasn't reported heartbeats. + MaxStoreDownTime typeutil.Duration `toml:"max-store-down-time,omitempty" json:"max-store-down-time"` + // LeaderScheduleLimit is the max coexist leader schedules. + LeaderScheduleLimit uint64 `toml:"leader-schedule-limit,omitempty" json:"leader-schedule-limit"` + // RegionScheduleLimit is the max coexist region schedules. + RegionScheduleLimit uint64 `toml:"region-schedule-limit,omitempty" json:"region-schedule-limit"` + // ReplicaScheduleLimit is the max coexist replica schedules. + ReplicaScheduleLimit uint64 `toml:"replica-schedule-limit,omitempty" json:"replica-schedule-limit"` + + // Schedulers support for loading customized schedulers + Schedulers SchedulerConfigs `toml:"schedulers,omitempty" json:"schedulers-v2"` // json v2 is for the sake of compatible upgrade + + // Only used to display + SchedulersPayload map[string]string `json:"schedulers,omitempty"` +} + +// Clone returns a cloned scheduling configuration. +func (c *ScheduleConfig) Clone() *ScheduleConfig { + schedulers := make(SchedulerConfigs, len(c.Schedulers)) + copy(schedulers, c.Schedulers) + return &ScheduleConfig{ + PatrolRegionInterval: c.PatrolRegionInterval, + MaxStoreDownTime: c.MaxStoreDownTime, + LeaderScheduleLimit: c.LeaderScheduleLimit, + RegionScheduleLimit: c.RegionScheduleLimit, + ReplicaScheduleLimit: c.ReplicaScheduleLimit, + Schedulers: schedulers, + } +} + +const ( + defaultMaxReplicas = 3 + defaultPatrolRegionInterval = 100 * time.Millisecond + defaultMaxStoreDownTime = 30 * time.Minute + defaultLeaderScheduleLimit = 4 + defaultRegionScheduleLimit = 2048 + defaultReplicaScheduleLimit = 64 +) + +func (c *ScheduleConfig) adjust(meta *configMetaData) error { + adjustDuration(&c.PatrolRegionInterval, defaultPatrolRegionInterval) + adjustDuration(&c.MaxStoreDownTime, defaultMaxStoreDownTime) + if !meta.IsDefined("leader-schedule-limit") { + adjustUint64(&c.LeaderScheduleLimit, defaultLeaderScheduleLimit) + } + if !meta.IsDefined("region-schedule-limit") { + adjustUint64(&c.RegionScheduleLimit, defaultRegionScheduleLimit) + } + if !meta.IsDefined("replica-schedule-limit") { + adjustUint64(&c.ReplicaScheduleLimit, defaultReplicaScheduleLimit) + } + adjustSchedulers(&c.Schedulers, defaultSchedulers) + + return c.Validate() +} + +// Validate is used to validate if some scheduling configurations are right. +func (c *ScheduleConfig) Validate() error { + for _, scheduleConfig := range c.Schedulers { + if !schedule.IsSchedulerRegistered(scheduleConfig.Type) { + return errors.Errorf("create func of %v is not registered, maybe misspelled", scheduleConfig.Type) + } + } + return nil +} + +// SchedulerConfigs is a slice of customized scheduler configuration. +type SchedulerConfigs []SchedulerConfig + +// SchedulerConfig is customized scheduler configuration +type SchedulerConfig struct { + Type string `toml:"type" json:"type"` + Args []string `toml:"args,omitempty" json:"args"` + Disable bool `toml:"disable" json:"disable"` + ArgsPayload string `toml:"args-payload,omitempty" json:"args-payload"` +} + +var defaultSchedulers = SchedulerConfigs{ + {Type: "balance-region"}, + {Type: "balance-leader"}, +} + +// IsDefaultScheduler checks whether the scheduler is enable by default. +func IsDefaultScheduler(typ string) bool { + for _, c := range defaultSchedulers { + if typ == c.Type { + return true + } + } + return false +} + +// ReplicationConfig is the replication configuration. +type ReplicationConfig struct { + // MaxReplicas is the number of replicas for each region. + MaxReplicas uint64 `toml:"max-replicas,omitempty" json:"max-replicas"` +} + +func (c *ReplicationConfig) clone() *ReplicationConfig { + return &ReplicationConfig{ + MaxReplicas: c.MaxReplicas, + } +} + +func (c *ReplicationConfig) adjust() error { + adjustUint64(&c.MaxReplicas, defaultMaxReplicas) + + return nil +} + +// SecurityConfig is the configuration for supporting tls. +type SecurityConfig struct { + // CAPath is the path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty + CAPath string `toml:"cacert-path" json:"cacert-path"` + // CertPath is the path of file that contains X509 certificate in PEM format. + CertPath string `toml:"cert-path" json:"cert-path"` + // KeyPath is the path of file that contains X509 key in PEM format. + KeyPath string `toml:"key-path" json:"key-path"` +} + +// ToTLSConfig generatres tls config. +func (s SecurityConfig) ToTLSConfig() (*tls.Config, error) { + if len(s.CertPath) == 0 && len(s.KeyPath) == 0 { + return nil, nil + } + tlsInfo := transport.TLSInfo{ + CertFile: s.CertPath, + KeyFile: s.KeyPath, + TrustedCAFile: s.CAPath, + } + tlsConfig, err := tlsInfo.ClientConfig() + if err != nil { + return nil, errors.WithStack(err) + } + return tlsConfig, nil +} + +// PDServerConfig is the configuration for pd server. +type PDServerConfig struct { + // MaxResetTSGap is the max gap to reset the tso. + MaxResetTSGap time.Duration `toml:"max-reset-ts-gap" json:"max-reset-ts-gap"` +} + +func (c *PDServerConfig) adjust(meta *configMetaData) error { + if !meta.IsDefined("max-reset-ts-gap") { + c.MaxResetTSGap = defaultMaxResetTsGap + } + return nil +} + +// ParseUrls parse a string into multiple urls. +// Export for api. +func ParseUrls(s string) ([]url.URL, error) { + items := strings.Split(s, ",") + urls := make([]url.URL, 0, len(items)) + for _, item := range items { + u, err := url.Parse(item) + if err != nil { + return nil, errors.WithStack(err) + } + + urls = append(urls, *u) + } + + return urls, nil +} + +// SetupLogger setup the logger. +func (c *Config) SetupLogger() error { + lg, p, err := log.InitLogger(&c.Log, zap.AddStacktrace(zapcore.FatalLevel)) + if err != nil { + return err + } + c.logger = lg + c.logProps = p + return nil +} + +// GetZapLogger gets the created zap logger. +func (c *Config) GetZapLogger() *zap.Logger { + return c.logger +} + +// GetZapLogProperties gets properties of the zap logger. +func (c *Config) GetZapLogProperties() *log.ZapProperties { + return c.logProps +} + +// GenEmbedEtcdConfig generates a configuration for embedded etcd. +func (c *Config) GenEmbedEtcdConfig() (*embed.Config, error) { + cfg := embed.NewConfig() + cfg.Name = c.Name + cfg.Dir = c.DataDir + cfg.WalDir = "" + cfg.InitialCluster = c.InitialCluster + cfg.ClusterState = c.InitialClusterState + cfg.EnablePprof = true + cfg.StrictReconfigCheck = !c.DisableStrictReconfigCheck + cfg.TickMs = uint(c.TickInterval.Duration / time.Millisecond) + cfg.ElectionMs = uint(c.ElectionInterval.Duration / time.Millisecond) + cfg.AutoCompactionMode = c.AutoCompactionMode + cfg.AutoCompactionRetention = c.AutoCompactionRetention + cfg.QuotaBackendBytes = int64(c.QuotaBackendBytes) + + cfg.ClientTLSInfo.ClientCertAuth = len(c.Security.CAPath) != 0 + cfg.ClientTLSInfo.TrustedCAFile = c.Security.CAPath + cfg.ClientTLSInfo.CertFile = c.Security.CertPath + cfg.ClientTLSInfo.KeyFile = c.Security.KeyPath + cfg.PeerTLSInfo.TrustedCAFile = c.Security.CAPath + cfg.PeerTLSInfo.CertFile = c.Security.CertPath + cfg.PeerTLSInfo.KeyFile = c.Security.KeyPath + cfg.ForceNewCluster = c.ForceNewCluster + cfg.ZapLoggerBuilder = embed.NewZapCoreLoggerBuilder(c.logger, c.logger.Core(), c.logProps.Syncer) + cfg.EnableGRPCGateway = c.EnableGRPCGateway + cfg.Logger = "zap" + var err error + + cfg.LPUrls, err = ParseUrls(c.PeerUrls) + if err != nil { + return nil, err + } + + cfg.APUrls, err = ParseUrls(c.AdvertisePeerUrls) + if err != nil { + return nil, err + } + + cfg.LCUrls, err = ParseUrls(c.ClientUrls) + if err != nil { + return nil, err + } + + cfg.ACUrls, err = ParseUrls(c.AdvertiseClientUrls) + if err != nil { + return nil, err + } + + return cfg, nil +} diff --git a/scheduler/server/config/option.go b/scheduler/server/config/option.go new file mode 100644 index 00000000..2ea74bcf --- /dev/null +++ b/scheduler/server/config/option.go @@ -0,0 +1,183 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "context" + "reflect" + "sync/atomic" + "time" + + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/kv" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule" +) + +// ScheduleOption is a wrapper to access the configuration safely. +type ScheduleOption struct { + schedule atomic.Value + replication *Replication + pdServerConfig atomic.Value +} + +// NewScheduleOption creates a new ScheduleOption. +func NewScheduleOption(cfg *Config) *ScheduleOption { + o := &ScheduleOption{} + o.Store(&cfg.Schedule) + o.replication = newReplication(&cfg.Replication) + o.pdServerConfig.Store(&cfg.PDServerCfg) + return o +} + +// Load returns scheduling configurations. +func (o *ScheduleOption) Load() *ScheduleConfig { + return o.schedule.Load().(*ScheduleConfig) +} + +// Store sets scheduling configurations. +func (o *ScheduleOption) Store(cfg *ScheduleConfig) { + o.schedule.Store(cfg) +} + +// GetReplication returns replication configurations. +func (o *ScheduleOption) GetReplication() *Replication { + return o.replication +} + +// GetMaxReplicas returns the number of replicas for each region. +func (o *ScheduleOption) GetMaxReplicas() int { + return o.replication.GetMaxReplicas() +} + +// SetMaxReplicas sets the number of replicas for each region. +func (o *ScheduleOption) SetMaxReplicas(replicas int) { + o.replication.SetMaxReplicas(replicas) +} + +// GetPatrolRegionInterval returns the interval of patroling region. +func (o *ScheduleOption) GetPatrolRegionInterval() time.Duration { + return o.Load().PatrolRegionInterval.Duration +} + +// GetMaxStoreDownTime returns the max down time of a store. +func (o *ScheduleOption) GetMaxStoreDownTime() time.Duration { + return o.Load().MaxStoreDownTime.Duration +} + +// GetLeaderScheduleLimit returns the limit for leader schedule. +func (o *ScheduleOption) GetLeaderScheduleLimit() uint64 { + return o.Load().LeaderScheduleLimit +} + +// GetRegionScheduleLimit returns the limit for region schedule. +func (o *ScheduleOption) GetRegionScheduleLimit() uint64 { + return o.Load().RegionScheduleLimit +} + +// GetReplicaScheduleLimit returns the limit for replica schedule. +func (o *ScheduleOption) GetReplicaScheduleLimit() uint64 { + return o.Load().ReplicaScheduleLimit +} + +// GetSchedulers gets the scheduler configurations. +func (o *ScheduleOption) GetSchedulers() SchedulerConfigs { + return o.Load().Schedulers +} + +// AddSchedulerCfg adds the scheduler configurations. +func (o *ScheduleOption) AddSchedulerCfg(tp string, args []string) { + c := o.Load() + v := c.Clone() + for i, schedulerCfg := range v.Schedulers { + // comparing args is to cover the case that there are schedulers in same type but not with same name + // such as two schedulers of type "evict-leader", + // one name is "evict-leader-scheduler-1" and the other is "evict-leader-scheduler-2" + if reflect.DeepEqual(schedulerCfg, SchedulerConfig{Type: tp, Args: args, Disable: false}) { + return + } + + if reflect.DeepEqual(schedulerCfg, SchedulerConfig{Type: tp, Args: args, Disable: true}) { + schedulerCfg.Disable = false + v.Schedulers[i] = schedulerCfg + o.Store(v) + return + } + } + v.Schedulers = append(v.Schedulers, SchedulerConfig{Type: tp, Args: args, Disable: false}) + o.Store(v) +} + +// RemoveSchedulerCfg removes the scheduler configurations. +func (o *ScheduleOption) RemoveSchedulerCfg(ctx context.Context, name string) error { + c := o.Load() + v := c.Clone() + for i, schedulerCfg := range v.Schedulers { + // To create a temporary scheduler is just used to get scheduler's name + decoder := schedule.ConfigSliceDecoder(schedulerCfg.Type, schedulerCfg.Args) + tmp, err := schedule.CreateScheduler(schedulerCfg.Type, schedule.NewOperatorController(ctx, nil, nil), core.NewStorage(kv.NewMemoryKV()), decoder) + if err != nil { + return err + } + if tmp.GetName() == name { + if IsDefaultScheduler(tmp.GetType()) { + schedulerCfg.Disable = true + v.Schedulers[i] = schedulerCfg + } else { + v.Schedulers = append(v.Schedulers[:i], v.Schedulers[i+1:]...) + } + o.Store(v) + return nil + } + } + return nil +} + +// LoadPDServerConfig returns PD server configurations. +func (o *ScheduleOption) LoadPDServerConfig() *PDServerConfig { + return o.pdServerConfig.Load().(*PDServerConfig) +} + +// Replication provides some help to do replication. +type Replication struct { + replicateCfg atomic.Value +} + +func newReplication(cfg *ReplicationConfig) *Replication { + r := &Replication{} + r.Store(cfg) + return r +} + +// Load returns replication configurations. +func (r *Replication) Load() *ReplicationConfig { + return r.replicateCfg.Load().(*ReplicationConfig) +} + +// Store sets replication configurations. +func (r *Replication) Store(cfg *ReplicationConfig) { + r.replicateCfg.Store(cfg) +} + +// GetMaxReplicas returns the number of replicas for each region. +func (r *Replication) GetMaxReplicas() int { + return int(r.Load().MaxReplicas) +} + +// SetMaxReplicas set the replicas for each region. +func (r *Replication) SetMaxReplicas(replicas int) { + c := r.Load() + v := c.clone() + v.MaxReplicas = uint64(replicas) + r.Store(v) +} diff --git a/scheduler/server/coordinator.go b/scheduler/server/coordinator.go new file mode 100644 index 00000000..2aea3694 --- /dev/null +++ b/scheduler/server/coordinator.go @@ -0,0 +1,367 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "sync" + "time" + + "github.com/pingcap-incubator/tinykv/scheduler/pkg/logutil" + "github.com/pingcap-incubator/tinykv/scheduler/server/config" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/operator" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +const ( + runSchedulerCheckInterval = 3 * time.Second + collectFactor = 0.8 + collectTimeout = 5 * time.Minute + maxScheduleRetries = 10 + maxLoadConfigRetries = 10 + + regionheartbeatSendChanCap = 1024 + + patrolScanRegionLimit = 128 // It takes about 14 minutes to iterate 1 million regions. +) + +var ( + errSchedulerExisted = errors.New("scheduler existed") + errSchedulerNotFound = errors.New("scheduler not found") +) + +// coordinator is used to manage all schedulers and checkers to decide if the region needs to be scheduled. +type coordinator struct { + sync.RWMutex + + wg sync.WaitGroup + ctx context.Context + cancel context.CancelFunc + cluster *RaftCluster + checkers *schedule.CheckerController + schedulers map[string]*scheduleController + opController *schedule.OperatorController + hbStreams *heartbeatStreams +} + +// newCoordinator creates a new coordinator. +func newCoordinator(ctx context.Context, cluster *RaftCluster, hbStreams *heartbeatStreams) *coordinator { + ctx, cancel := context.WithCancel(ctx) + opController := schedule.NewOperatorController(ctx, cluster, hbStreams) + return &coordinator{ + ctx: ctx, + cancel: cancel, + cluster: cluster, + checkers: schedule.NewCheckerController(ctx, cluster, opController), + schedulers: make(map[string]*scheduleController), + opController: opController, + hbStreams: hbStreams, + } +} + +// patrolRegions is used to scan regions. +// The checkers will check these regions to decide if they need to do some operations. +func (c *coordinator) patrolRegions() { + defer logutil.LogPanic() + + defer c.wg.Done() + timer := time.NewTimer(c.cluster.GetPatrolRegionInterval()) + defer timer.Stop() + + log.Info("coordinator starts patrol regions") + var key []byte + for { + select { + case <-timer.C: + timer.Reset(c.cluster.GetPatrolRegionInterval()) + case <-c.ctx.Done(): + log.Info("patrol regions has been stopped") + return + } + + regions := c.cluster.ScanRegions(key, nil, patrolScanRegionLimit) + if len(regions) == 0 { + // Resets the scan key. + key = nil + continue + } + + for _, region := range regions { + // Skips the region if there is already a pending operator. + if c.opController.GetOperator(region.GetID()) != nil { + continue + } + + checkerIsBusy, ops := c.checkers.CheckRegion(region) + if checkerIsBusy { + break + } + + key = region.GetEndKey() + if ops != nil { + c.opController.AddOperator(ops...) + } + } + } +} + +func (c *coordinator) run() { + ticker := time.NewTicker(runSchedulerCheckInterval) + defer ticker.Stop() + log.Info("coordinator starts to collect cluster information") + for { + if c.shouldRun() { + log.Info("coordinator has finished cluster information preparation") + break + } + select { + case <-ticker.C: + case <-c.ctx.Done(): + log.Info("coordinator stops running") + return + } + } + log.Info("coordinator starts to run schedulers") + var ( + scheduleNames []string + configs []string + err error + ) + for i := 0; i < maxLoadConfigRetries; i++ { + scheduleNames, configs, err = c.cluster.storage.LoadAllScheduleConfig() + if err == nil { + break + } + log.Error("cannot load schedulers' config", zap.Int("retry-times", i), zap.Error(err)) + } + if err != nil { + log.Fatal("cannot load schedulers' config", zap.Error(err)) + } + + scheduleCfg := c.cluster.opt.Load().Clone() + // The new way to create scheduler with the independent configuration. + for i, name := range scheduleNames { + data := configs[i] + typ := schedule.FindSchedulerTypeByName(name) + var cfg config.SchedulerConfig + for _, c := range scheduleCfg.Schedulers { + if c.Type == typ { + cfg = c + break + } + } + if len(cfg.Type) == 0 { + log.Error("the scheduler type not found", zap.String("scheduler-name", name)) + continue + } + if cfg.Disable { + log.Info("skip create scheduler with independent configuration", zap.String("scheduler-name", name), zap.String("scheduler-type", cfg.Type)) + continue + } + s, err := schedule.CreateScheduler(cfg.Type, c.opController, c.cluster.storage, schedule.ConfigJSONDecoder([]byte(data))) + if err != nil { + log.Error("can not create scheduler with independent configuration", zap.String("scheduler-name", name), zap.Error(err)) + continue + } + log.Info("create scheduler with independent configuration", zap.String("scheduler-name", s.GetName())) + if err = c.addScheduler(s); err != nil { + log.Error("can not add scheduler with independent configuration", zap.String("scheduler-name", s.GetName()), zap.Error(err)) + } + } + + // The old way to create the scheduler. + k := 0 + for _, schedulerCfg := range scheduleCfg.Schedulers { + if schedulerCfg.Disable { + scheduleCfg.Schedulers[k] = schedulerCfg + k++ + log.Info("skip create scheduler", zap.String("scheduler-type", schedulerCfg.Type)) + continue + } + + s, err := schedule.CreateScheduler(schedulerCfg.Type, c.opController, c.cluster.storage, schedule.ConfigSliceDecoder(schedulerCfg.Type, schedulerCfg.Args)) + if err != nil { + log.Error("can not create scheduler", zap.String("scheduler-type", schedulerCfg.Type), zap.Error(err)) + continue + } + + log.Info("create scheduler", zap.String("scheduler-name", s.GetName())) + if err = c.addScheduler(s, schedulerCfg.Args...); err != nil && err != errSchedulerExisted { + log.Error("can not add scheduler", zap.String("scheduler-name", s.GetName()), zap.Error(err)) + } else { + // Only records the valid scheduler config. + scheduleCfg.Schedulers[k] = schedulerCfg + k++ + } + } + + // Removes the invalid scheduler config and persist. + scheduleCfg.Schedulers = scheduleCfg.Schedulers[:k] + c.cluster.opt.Store(scheduleCfg) + + c.wg.Add(1) + // Starts to patrol regions. + go c.patrolRegions() +} + +func (c *coordinator) stop() { + c.cancel() +} + +func (c *coordinator) getSchedulers() []string { + c.RLock() + defer c.RUnlock() + + names := make([]string, 0, len(c.schedulers)) + for name := range c.schedulers { + names = append(names, name) + } + return names +} + +func (c *coordinator) shouldRun() bool { + return c.cluster.isPrepared() +} + +func (c *coordinator) addScheduler(scheduler schedule.Scheduler, args ...string) error { + c.Lock() + defer c.Unlock() + + if _, ok := c.schedulers[scheduler.GetName()]; ok { + return errSchedulerExisted + } + + s := newScheduleController(c, scheduler) + if err := s.Prepare(c.cluster); err != nil { + return err + } + + c.wg.Add(1) + go c.runScheduler(s) + c.schedulers[s.GetName()] = s + c.cluster.opt.AddSchedulerCfg(s.GetType(), args) + + return nil +} + +func (c *coordinator) removeScheduler(name string) error { + c.Lock() + defer c.Unlock() + if c.cluster == nil { + return ErrNotBootstrapped + } + s, ok := c.schedulers[name] + if !ok { + return errSchedulerNotFound + } + + s.Stop() + delete(c.schedulers, name) + + var err error + opt := c.cluster.opt + if err = opt.RemoveSchedulerCfg(s.Ctx(), name); err != nil { + log.Error("can not remove scheduler", zap.String("scheduler-name", name), zap.Error(err)) + } else { + err = c.cluster.storage.RemoveScheduleConfig(name) + if err != nil { + log.Error("can not remove the scheduler config", zap.Error(err)) + } + } + return err +} + +func (c *coordinator) runScheduler(s *scheduleController) { + defer logutil.LogPanic() + defer c.wg.Done() + defer s.Cleanup(c.cluster) + + timer := time.NewTimer(s.GetInterval()) + defer timer.Stop() + + for { + select { + case <-timer.C: + timer.Reset(s.GetInterval()) + if !s.AllowSchedule() { + continue + } + if op := s.Schedule(); op != nil { + c.opController.AddOperator(op) + } + + case <-s.Ctx().Done(): + log.Info("scheduler has been stopped", + zap.String("scheduler-name", s.GetName()), + zap.Error(s.Ctx().Err())) + return + } + } +} + +// scheduleController is used to manage a scheduler to schedule. +type scheduleController struct { + schedule.Scheduler + cluster *RaftCluster + opController *schedule.OperatorController + nextInterval time.Duration + ctx context.Context + cancel context.CancelFunc +} + +// newScheduleController creates a new scheduleController. +func newScheduleController(c *coordinator, s schedule.Scheduler) *scheduleController { + ctx, cancel := context.WithCancel(c.ctx) + return &scheduleController{ + Scheduler: s, + cluster: c.cluster, + opController: c.opController, + nextInterval: s.GetMinInterval(), + ctx: ctx, + cancel: cancel, + } +} + +func (s *scheduleController) Ctx() context.Context { + return s.ctx +} + +func (s *scheduleController) Stop() { + s.cancel() +} + +func (s *scheduleController) Schedule() *operator.Operator { + for i := 0; i < maxScheduleRetries; i++ { + // If we have schedule, reset interval to the minimal interval. + if op := s.Scheduler.Schedule(s.cluster); op != nil { + s.nextInterval = s.Scheduler.GetMinInterval() + return op + } + } + s.nextInterval = s.Scheduler.GetNextInterval(s.nextInterval) + return nil +} + +// GetInterval returns the interval of scheduling for a scheduler. +func (s *scheduleController) GetInterval() time.Duration { + return s.nextInterval +} + +// AllowSchedule returns if a scheduler is allowed to schedule. +func (s *scheduleController) AllowSchedule() bool { + return s.Scheduler.IsScheduleAllowed(s.cluster) +} diff --git a/scheduler/server/coordinator_test.go b/scheduler/server/coordinator_test.go new file mode 100644 index 00000000..f5bb1b58 --- /dev/null +++ b/scheduler/server/coordinator_test.go @@ -0,0 +1,803 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "fmt" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockhbstream" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockid" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/testutil" + "github.com/pingcap-incubator/tinykv/scheduler/server/config" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/id" + "github.com/pingcap-incubator/tinykv/scheduler/server/kv" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/operator" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/opt" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedulers" + . "github.com/pingcap/check" +) + +func newTestScheduleConfig() (*config.ScheduleConfig, *config.ScheduleOption, error) { + cfg := config.NewConfig() + if err := cfg.Adjust(nil); err != nil { + return nil, nil, err + } + opt := config.NewScheduleOption(cfg) + return &cfg.Schedule, opt, nil +} + +func newTestOperator(regionID uint64, regionEpoch *metapb.RegionEpoch, kind operator.OpKind, steps ...operator.OpStep) *operator.Operator { + return operator.NewOperator("test", "test", regionID, regionEpoch, kind, steps...) +} + +type testCluster struct { + *RaftCluster +} + +func newTestCluster(opt *config.ScheduleOption) *testCluster { + cluster := createTestRaftCluster(mockid.NewIDAllocator(), opt, core.NewStorage(kv.NewMemoryKV())) + return &testCluster{RaftCluster: cluster} +} + +func newTestRegionMeta(regionID uint64) *metapb.Region { + return &metapb.Region{ + Id: regionID, + StartKey: []byte(fmt.Sprintf("%20d", regionID)), + EndKey: []byte(fmt.Sprintf("%20d", regionID+1)), + RegionEpoch: &metapb.RegionEpoch{Version: 1, ConfVer: 1}, + } +} + +func (c *testCluster) addRegionStore(storeID uint64, regionCount int, regionSizes ...uint64) error { + var regionSize uint64 + if len(regionSizes) == 0 { + regionSize = uint64(regionCount) * 10 + } else { + regionSize = regionSizes[0] + } + + stats := &schedulerpb.StoreStats{} + stats.Capacity = 1000 * (1 << 20) + stats.Available = stats.Capacity - regionSize + newStore := core.NewStoreInfo(&metapb.Store{Id: storeID}, + core.SetStoreStats(stats), + core.SetRegionCount(regionCount), + core.SetRegionSize(int64(regionSize)), + core.SetLastHeartbeatTS(time.Now()), + ) + c.Lock() + defer c.Unlock() + return c.putStoreLocked(newStore) +} + +func (c *testCluster) addLeaderRegion(regionID uint64, leaderStoreID uint64, followerStoreIDs ...uint64) error { + region := newTestRegionMeta(regionID) + leader, _ := c.AllocPeer(leaderStoreID) + region.Peers = []*metapb.Peer{leader} + for _, followerStoreID := range followerStoreIDs { + peer, _ := c.AllocPeer(followerStoreID) + region.Peers = append(region.Peers, peer) + } + regionInfo := core.NewRegionInfo(region, leader, core.SetApproximateSize(10)) + return c.putRegion(regionInfo) +} + +func (c *testCluster) updateLeaderCount(storeID uint64, leaderCount int) error { + store := c.GetStore(storeID) + newStore := store.Clone( + core.SetLeaderCount(leaderCount), + core.SetLeaderSize(int64(leaderCount)*10), + ) + c.Lock() + defer c.Unlock() + return c.putStoreLocked(newStore) +} + +func (c *testCluster) addLeaderStore(storeID uint64, leaderCount int) error { + stats := &schedulerpb.StoreStats{} + newStore := core.NewStoreInfo(&metapb.Store{Id: storeID}, + core.SetStoreStats(stats), + core.SetLeaderCount(leaderCount), + core.SetLeaderSize(int64(leaderCount)*10), + core.SetLastHeartbeatTS(time.Now()), + ) + c.Lock() + defer c.Unlock() + return c.putStoreLocked(newStore) +} + +func (c *testCluster) setStoreDown(storeID uint64) error { + store := c.GetStore(storeID) + newStore := store.Clone( + core.SetStoreState(metapb.StoreState_Up), + core.SetLastHeartbeatTS(time.Time{}), + ) + c.Lock() + defer c.Unlock() + return c.putStoreLocked(newStore) +} + +func (c *testCluster) setStoreOffline(storeID uint64) error { + store := c.GetStore(storeID) + newStore := store.Clone(core.SetStoreState(metapb.StoreState_Offline)) + c.Lock() + defer c.Unlock() + return c.putStoreLocked(newStore) +} + +func (c *testCluster) LoadRegion(regionID uint64, followerStoreIDs ...uint64) error { + // regions load from etcd will have no leader + region := newTestRegionMeta(regionID) + region.Peers = []*metapb.Peer{} + for _, id := range followerStoreIDs { + peer, _ := c.AllocPeer(id) + region.Peers = append(region.Peers, peer) + } + return c.putRegion(core.NewRegionInfo(region, nil)) +} + +func waitOperator(c *C, co *coordinator, regionID uint64) { + testutil.WaitUntil(c, func(c *C) bool { + return co.opController.GetOperator(regionID) != nil + }) +} + +var _ = Suite(&testCoordinatorSuite{}) + +type testCoordinatorSuite struct { + ctx context.Context + cancel context.CancelFunc +} + +func (s *testCoordinatorSuite) SetUpSuite(c *C) { + s.ctx, s.cancel = context.WithCancel(context.Background()) +} + +func (s *testCoordinatorSuite) TearDownSuite(c *C) { + s.cancel() +} + +func (s *testCoordinatorSuite) TestBasic(c *C) { + _, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + tc := newTestCluster(opt) + hbStreams, cleanup := getHeartBeatStreams(s.ctx, c, tc) + defer cleanup() + defer hbStreams.Close() + + co := newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + oc := co.opController + + c.Assert(tc.addLeaderRegion(1, 1), IsNil) + + op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader) + oc.AddOperator(op1) + c.Assert(oc.OperatorCount(op1.Kind()), Equals, uint64(1)) + c.Assert(oc.GetOperator(1).RegionID(), Equals, op1.RegionID()) + + // Region 1 already has an operator, cannot add another one. + op2 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion) + oc.AddOperator(op2) + c.Assert(oc.OperatorCount(op2.Kind()), Equals, uint64(0)) + + // Remove the operator manually, then we can add a new operator. + c.Assert(oc.RemoveOperator(op1), IsTrue) + oc.AddOperator(op2) + c.Assert(oc.OperatorCount(op2.Kind()), Equals, uint64(1)) + c.Assert(oc.GetOperator(1).RegionID(), Equals, op2.RegionID()) +} + +func (s *testCoordinatorSuite) TestDispatch(c *C) { + _, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + tc := newTestCluster(opt) + hbStreams, cleanup := getHeartBeatStreams(s.ctx, c, tc) + defer cleanup() + defer hbStreams.Close() + + co := newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + co.run() + defer co.wg.Wait() + defer co.stop() + + // Transfer peer from store 4 to store 1. + c.Assert(tc.addRegionStore(4, 40), IsNil) + c.Assert(tc.addRegionStore(3, 30), IsNil) + c.Assert(tc.addRegionStore(2, 20), IsNil) + c.Assert(tc.addRegionStore(1, 10), IsNil) + c.Assert(tc.addLeaderRegion(1, 2, 3, 4), IsNil) + + // Transfer leader from store 4 to store 2. + c.Assert(tc.updateLeaderCount(4, 50), IsNil) + c.Assert(tc.updateLeaderCount(3, 30), IsNil) + c.Assert(tc.updateLeaderCount(2, 20), IsNil) + c.Assert(tc.updateLeaderCount(1, 10), IsNil) + c.Assert(tc.addLeaderRegion(2, 4, 3, 2), IsNil) + + // Wait for schedule and turn off balance. + waitOperator(c, co, 1) + testutil.CheckTransferPeer(c, co.opController.GetOperator(1), operator.OpBalance, 4, 1) + c.Assert(co.removeScheduler("balance-region-scheduler"), IsNil) + waitOperator(c, co, 2) + testutil.CheckTransferLeader(c, co.opController.GetOperator(2), operator.OpBalance, 4, 2) + c.Assert(co.removeScheduler("balance-leader-scheduler"), IsNil) + + stream := mockhbstream.NewHeartbeatStream() + + // Transfer peer. + region := tc.GetRegion(1).Clone() + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + region = waitAddPeer(c, stream, region, 1) + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + region = waitRemovePeer(c, stream, region, 4) + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + waitNoResponse(c, stream) + + // Transfer leader. + region = tc.GetRegion(2).Clone() + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + waitTransferLeader(c, stream, region, 2) + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + waitNoResponse(c, stream) +} + +func dispatchHeartbeat(c *C, co *coordinator, region *core.RegionInfo, stream mockhbstream.HeartbeatStream) error { + co.hbStreams.bindStream(region.GetLeader().GetStoreId(), stream) + if err := co.cluster.putRegion(region.Clone()); err != nil { + return err + } + co.opController.Dispatch(region, schedule.DispatchFromHeartBeat) + return nil +} + +func (s *testCoordinatorSuite) TestReplica(c *C) { + // Turn off balance. + cfg, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + cfg.LeaderScheduleLimit = 0 + cfg.RegionScheduleLimit = 0 + + tc := newTestCluster(opt) + hbStreams, cleanup := getHeartBeatStreams(s.ctx, c, tc) + defer cleanup() + defer hbStreams.Close() + + co := newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + co.run() + defer co.wg.Wait() + defer co.stop() + + c.Assert(tc.addRegionStore(1, 1), IsNil) + c.Assert(tc.addRegionStore(2, 2), IsNil) + c.Assert(tc.addRegionStore(3, 3), IsNil) + c.Assert(tc.addRegionStore(4, 4), IsNil) + + stream := mockhbstream.NewHeartbeatStream() + + // Add peer to store 1. + c.Assert(tc.addLeaderRegion(1, 2, 3), IsNil) + region := tc.GetRegion(1) + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + region = waitAddPeer(c, stream, region, 1) + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + waitNoResponse(c, stream) + + // Remove peer from store 4. + c.Assert(tc.addLeaderRegion(2, 1, 2, 3, 4), IsNil) + region = tc.GetRegion(2) + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + region = waitRemovePeer(c, stream, region, 4) + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + waitNoResponse(c, stream) + + // Remove offline peer directly when it's pending. + c.Assert(tc.addLeaderRegion(3, 1, 2, 3), IsNil) + c.Assert(tc.setStoreOffline(3), IsNil) + region = tc.GetRegion(3) + region = region.Clone(core.WithPendingPeers([]*metapb.Peer{region.GetStorePeer(3)})) + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + waitNoResponse(c, stream) +} + +func (s *testCoordinatorSuite) TestPeerState(c *C) { + _, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + tc := newTestCluster(opt) + hbStreams, cleanup := getHeartBeatStreams(s.ctx, c, tc) + defer cleanup() + defer hbStreams.Close() + + co := newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + co.run() + defer co.wg.Wait() + defer co.stop() + + // Transfer peer from store 4 to store 1. + c.Assert(tc.addRegionStore(1, 10), IsNil) + c.Assert(tc.addRegionStore(2, 20), IsNil) + c.Assert(tc.addRegionStore(3, 30), IsNil) + c.Assert(tc.addRegionStore(4, 40), IsNil) + c.Assert(tc.addLeaderRegion(1, 2, 3, 4), IsNil) + + stream := mockhbstream.NewHeartbeatStream() + + // Wait for schedule. + waitOperator(c, co, 1) + testutil.CheckTransferPeer(c, co.opController.GetOperator(1), operator.OpBalance, 4, 1) + + region := tc.GetRegion(1).Clone() + + // Add new peer. + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + region = waitAddPeer(c, stream, region, 1) + + // If the new peer is pending, the operator will not finish. + region = region.Clone(core.WithPendingPeers(append(region.GetPendingPeers(), region.GetStorePeer(1)))) + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + waitNoResponse(c, stream) + c.Assert(co.opController.GetOperator(region.GetID()), NotNil) + + // The new peer is not pending now, the operator will finish. + // And we will proceed to remove peer in store 4. + region = region.Clone(core.WithPendingPeers(nil)) + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + waitRemovePeer(c, stream, region, 4) + c.Assert(tc.addLeaderRegion(1, 1, 2, 3), IsNil) + region = tc.GetRegion(1).Clone() + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + waitNoResponse(c, stream) +} + +func (s *testCoordinatorSuite) TestShouldRun(c *C) { + _, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + tc := newTestCluster(opt) + hbStreams, cleanup := getHeartBeatStreams(s.ctx, c, tc) + defer cleanup() + defer hbStreams.Close() + + co := newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + + c.Assert(tc.addLeaderStore(1, 5), IsNil) + c.Assert(tc.addLeaderStore(2, 2), IsNil) + c.Assert(tc.addLeaderStore(3, 0), IsNil) + c.Assert(tc.addLeaderStore(4, 0), IsNil) + c.Assert(tc.LoadRegion(1, 1, 2, 3), IsNil) + c.Assert(tc.LoadRegion(2, 1, 2, 3), IsNil) + c.Assert(tc.LoadRegion(3, 1, 2, 3), IsNil) + c.Assert(tc.LoadRegion(4, 1, 2, 3), IsNil) + c.Assert(tc.LoadRegion(5, 1, 2, 3), IsNil) + c.Assert(tc.LoadRegion(6, 2, 1, 4), IsNil) + c.Assert(tc.LoadRegion(7, 2, 1, 4), IsNil) + c.Assert(co.shouldRun(), IsFalse) + c.Assert(tc.core.Regions.GetStoreRegionCount(4), Equals, 2) + + tbl := []struct { + regionID uint64 + shouldRun bool + }{ + {1, false}, + {2, false}, + {3, false}, + {4, false}, + {5, false}, + // store4 needs collect two region + {6, false}, + {7, true}, + } + + for _, t := range tbl { + r := tc.GetRegion(t.regionID) + nr := r.Clone(core.WithLeader(r.GetPeers()[0])) + c.Assert(tc.processRegionHeartbeat(nr), IsNil) + c.Assert(co.shouldRun(), Equals, t.shouldRun) + } + nr := &metapb.Region{Id: 6, Peers: []*metapb.Peer{}} + newRegion := core.NewRegionInfo(nr, nil) + c.Assert(tc.processRegionHeartbeat(newRegion), NotNil) + c.Assert(co.cluster.prepareChecker.sum, Equals, 7) + +} +func (s *testCoordinatorSuite) TestShouldRunWithNonLeaderRegions(c *C) { + _, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + tc := newTestCluster(opt) + hbStreams, cleanup := getHeartBeatStreams(s.ctx, c, tc) + defer cleanup() + defer hbStreams.Close() + + co := newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + + c.Assert(tc.addLeaderStore(1, 10), IsNil) + c.Assert(tc.addLeaderStore(2, 0), IsNil) + c.Assert(tc.addLeaderStore(3, 0), IsNil) + for i := 0; i < 10; i++ { + c.Assert(tc.LoadRegion(uint64(i+1), 1, 2, 3), IsNil) + } + c.Assert(co.shouldRun(), IsFalse) + c.Assert(tc.core.Regions.GetStoreRegionCount(1), Equals, 10) + + tbl := []struct { + regionID uint64 + shouldRun bool + }{ + {1, false}, + {2, false}, + {3, false}, + {4, false}, + {5, false}, + {6, false}, + {7, false}, + {8, true}, + } + + for _, t := range tbl { + r := tc.GetRegion(t.regionID) + nr := r.Clone(core.WithLeader(r.GetPeers()[0])) + c.Assert(tc.processRegionHeartbeat(nr), IsNil) + c.Assert(co.shouldRun(), Equals, t.shouldRun) + } + nr := &metapb.Region{Id: 8, Peers: []*metapb.Peer{}} + newRegion := core.NewRegionInfo(nr, nil) + c.Assert(tc.processRegionHeartbeat(newRegion), NotNil) + c.Assert(co.cluster.prepareChecker.sum, Equals, 8) + + // Now, after server is prepared, there exist some regions with no leader. + c.Assert(tc.GetRegion(9).GetLeader().GetStoreId(), Equals, uint64(0)) + c.Assert(tc.GetRegion(10).GetLeader().GetStoreId(), Equals, uint64(0)) +} + +func (s *testCoordinatorSuite) TestRemoveScheduler(c *C) { + cfg, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + cfg.ReplicaScheduleLimit = 0 + + tc := newTestCluster(opt) + hbStreams, cleanup := getHeartBeatStreams(s.ctx, c, tc) + defer cleanup() + defer hbStreams.Close() + + co := newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + co.run() + + // Add stores 1,2 + c.Assert(tc.addLeaderStore(1, 1), IsNil) + c.Assert(tc.addLeaderStore(2, 1), IsNil) + + c.Assert(co.schedulers, HasLen, 2) + storage := tc.RaftCluster.storage + + sches, _, err := storage.LoadAllScheduleConfig() + c.Assert(err, IsNil) + c.Assert(sches, HasLen, 2) + + // remove all schedulers + c.Assert(co.removeScheduler("balance-leader-scheduler"), IsNil) + c.Assert(co.removeScheduler("balance-region-scheduler"), IsNil) + // all removed + sches, _, err = storage.LoadAllScheduleConfig() + c.Assert(err, IsNil) + c.Assert(sches, HasLen, 0) + c.Assert(co.schedulers, HasLen, 0) + newOpt := co.cluster.opt + co.stop() + co.wg.Wait() + + // suppose restart PD again + c.Assert(err, IsNil) + tc.RaftCluster.opt = newOpt + co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + co.run() + c.Assert(co.schedulers, HasLen, 0) + // the option remains default scheduler + c.Assert(co.cluster.opt.GetSchedulers(), HasLen, 2) + co.stop() + co.wg.Wait() +} + +func (s *testCoordinatorSuite) TestRestart(c *C) { + // Turn off balance, we test add replica only. + cfg, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + cfg.LeaderScheduleLimit = 0 + cfg.RegionScheduleLimit = 0 + + tc := newTestCluster(opt) + hbStreams, cleanup := getHeartBeatStreams(s.ctx, c, tc) + defer cleanup() + defer hbStreams.Close() + + // Add 3 stores (1, 2, 3) and a region with 1 replica on store 1. + c.Assert(tc.addRegionStore(1, 1), IsNil) + c.Assert(tc.addRegionStore(2, 2), IsNil) + c.Assert(tc.addRegionStore(3, 3), IsNil) + c.Assert(tc.addLeaderRegion(1, 1), IsNil) + region := tc.GetRegion(1) + tc.prepareChecker.collect(region) + + // Add 1 replica on store 2. + co := newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + co.run() + stream := mockhbstream.NewHeartbeatStream() + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + region = waitAddPeer(c, stream, region, 2) + co.stop() + co.wg.Wait() + + // Recreate coodinator then add another replica on store 3. + co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + co.run() + c.Assert(dispatchHeartbeat(c, co, region, stream), IsNil) + region = waitAddPeer(c, stream, region, 3) + co.stop() + co.wg.Wait() +} + +var _ = Suite(&testOperatorControllerSuite{}) + +type testOperatorControllerSuite struct { + ctx context.Context + cancel context.CancelFunc +} + +func (s *testOperatorControllerSuite) SetUpSuite(c *C) { + s.ctx, s.cancel = context.WithCancel(context.Background()) +} + +func (s *testOperatorControllerSuite) TearDownSuite(c *C) { + s.cancel() +} + +func (s *testOperatorControllerSuite) TestOperatorCount(c *C) { + _, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + tc := newTestCluster(opt) + hbStreams := mockhbstream.NewHeartbeatStreams(tc.RaftCluster.getClusterID()) + oc := schedule.NewOperatorController(s.ctx, tc.RaftCluster, hbStreams) + c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(0)) + c.Assert(oc.OperatorCount(operator.OpRegion), Equals, uint64(0)) + + c.Assert(tc.addLeaderRegion(1, 1), IsNil) + c.Assert(tc.addLeaderRegion(2, 2), IsNil) + op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader) + oc.AddOperator(op1) + c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(1)) // 1:leader + op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader) + oc.AddOperator(op2) + c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(2)) // 1:leader, 2:leader + c.Assert(oc.RemoveOperator(op1), IsTrue) + c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(1)) // 2:leader + + op1 = newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion) + oc.AddOperator(op1) + c.Assert(oc.OperatorCount(operator.OpRegion), Equals, uint64(1)) // 1:region 2:leader + c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(1)) + op2 = newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpRegion) + op2.SetPriorityLevel(core.HighPriority) + oc.AddOperator(op2) + c.Assert(oc.OperatorCount(operator.OpRegion), Equals, uint64(2)) // 1:region 2:region + c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(0)) +} + +var _ = Suite(&testScheduleControllerSuite{}) + +type testScheduleControllerSuite struct { + ctx context.Context + cancel context.CancelFunc +} + +func (s *testScheduleControllerSuite) SetUpSuite(c *C) { + s.ctx, s.cancel = context.WithCancel(context.Background()) +} + +func (s *testScheduleControllerSuite) TearDownSuite(c *C) { + s.cancel() +} + +// FIXME: remove after move into schedulers package +type mockLimitScheduler struct { + schedule.Scheduler + limit uint64 + counter *schedule.OperatorController + kind operator.OpKind +} + +func (s *mockLimitScheduler) IsScheduleAllowed(cluster opt.Cluster) bool { + return s.counter.OperatorCount(s.kind) < s.limit +} + +func (s *testScheduleControllerSuite) TestController(c *C) { + _, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + tc := newTestCluster(opt) + hbStreams, cleanup := getHeartBeatStreams(s.ctx, c, tc) + defer cleanup() + defer hbStreams.Close() + + c.Assert(tc.addLeaderRegion(1, 1), IsNil) + c.Assert(tc.addLeaderRegion(2, 2), IsNil) + + co := newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + oc := co.opController + scheduler, err := schedule.CreateScheduler("balance-leader", oc, core.NewStorage(kv.NewMemoryKV()), nil) + c.Assert(err, IsNil) + lb := &mockLimitScheduler{ + Scheduler: scheduler, + counter: oc, + kind: operator.OpLeader, + } + + sc := newScheduleController(co, lb) + + for i := schedulers.MinScheduleInterval; sc.GetInterval() != schedulers.MaxScheduleInterval; i = sc.GetNextInterval(i) { + c.Assert(sc.GetInterval(), Equals, i) + c.Assert(sc.Schedule(), IsNil) + } + // limit = 2 + lb.limit = 2 + // count = 0 + c.Assert(sc.AllowSchedule(), IsTrue) + op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader) + c.Assert(oc.AddOperator(op1), IsTrue) + // count = 1 + c.Assert(sc.AllowSchedule(), IsTrue) + op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader) + c.Assert(oc.AddOperator(op2), IsTrue) + // count = 2 + c.Assert(sc.AllowSchedule(), IsFalse) + c.Assert(oc.RemoveOperator(op1), IsTrue) + // count = 1 + c.Assert(sc.AllowSchedule(), IsTrue) + + // add a PriorityKind operator will remove old operator + op3 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpBalance) + op3.SetPriorityLevel(core.HighPriority) + c.Assert(oc.AddOperator(op1), IsTrue) + c.Assert(sc.AllowSchedule(), IsFalse) + c.Assert(oc.AddOperator(op3), IsTrue) + c.Assert(sc.AllowSchedule(), IsTrue) + c.Assert(oc.RemoveOperator(op3), IsTrue) + + // add a admin operator will remove old operator + c.Assert(oc.AddOperator(op2), IsTrue) + c.Assert(sc.AllowSchedule(), IsFalse) + op4 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpAdmin) + op4.SetPriorityLevel(core.HighPriority) + c.Assert(oc.AddOperator(op4), IsTrue) + c.Assert(sc.AllowSchedule(), IsTrue) + c.Assert(oc.RemoveOperator(op4), IsTrue) + + // test wrong region id. + op5 := newTestOperator(3, &metapb.RegionEpoch{}, operator.OpBalance) + c.Assert(oc.AddOperator(op5), IsFalse) + + // test wrong region epoch. + c.Assert(oc.RemoveOperator(op1), IsTrue) + epoch := &metapb.RegionEpoch{ + Version: tc.GetRegion(1).GetRegionEpoch().GetVersion() + 1, + ConfVer: tc.GetRegion(1).GetRegionEpoch().GetConfVer(), + } + op6 := newTestOperator(1, epoch, operator.OpLeader) + c.Assert(oc.AddOperator(op6), IsFalse) + epoch.Version-- + op6 = newTestOperator(1, epoch, operator.OpLeader) + c.Assert(oc.AddOperator(op6), IsTrue) + c.Assert(oc.RemoveOperator(op6), IsTrue) +} + +func (s *testScheduleControllerSuite) TestInterval(c *C) { + _, opt, err := newTestScheduleConfig() + c.Assert(err, IsNil) + tc := newTestCluster(opt) + hbStreams, cleanup := getHeartBeatStreams(s.ctx, c, tc) + defer cleanup() + defer hbStreams.Close() + + co := newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + lb, err := schedule.CreateScheduler("balance-leader", co.opController, core.NewStorage(kv.NewMemoryKV()), nil) + c.Assert(err, IsNil) + sc := newScheduleController(co, lb) + + // If no operator for x seconds, the next check should be in x/2 seconds. + idleSeconds := []int{5, 10, 20, 30, 60} + for _, n := range idleSeconds { + sc.nextInterval = schedulers.MinScheduleInterval + for totalSleep := time.Duration(0); totalSleep <= time.Second*time.Duration(n); totalSleep += sc.GetInterval() { + c.Assert(sc.Schedule(), IsNil) + } + c.Assert(sc.GetInterval(), Less, time.Second*time.Duration(n/2)) + } +} + +func waitAddPeer(c *C, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo { + var res *schedulerpb.RegionHeartbeatResponse + testutil.WaitUntil(c, func(c *C) bool { + if res = stream.Recv(); res != nil { + return res.GetRegionId() == region.GetID() && + res.GetChangePeer().GetChangeType() == eraftpb.ConfChangeType_AddNode && + res.GetChangePeer().GetPeer().GetStoreId() == storeID + } + return false + }) + return region.Clone( + core.WithAddPeer(res.GetChangePeer().GetPeer()), + core.WithIncConfVer(), + ) +} + +func waitRemovePeer(c *C, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo { + var res *schedulerpb.RegionHeartbeatResponse + testutil.WaitUntil(c, func(c *C) bool { + if res = stream.Recv(); res != nil { + return res.GetRegionId() == region.GetID() && + res.GetChangePeer().GetChangeType() == eraftpb.ConfChangeType_RemoveNode && + res.GetChangePeer().GetPeer().GetStoreId() == storeID + } + return false + }) + return region.Clone( + core.WithRemoveStorePeer(storeID), + core.WithIncConfVer(), + ) +} + +func waitTransferLeader(c *C, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo { + var res *schedulerpb.RegionHeartbeatResponse + testutil.WaitUntil(c, func(c *C) bool { + if res = stream.Recv(); res != nil { + return res.GetRegionId() == region.GetID() && res.GetTransferLeader().GetPeer().GetStoreId() == storeID + } + return false + }) + return region.Clone( + core.WithLeader(res.GetTransferLeader().GetPeer()), + ) +} + +func waitNoResponse(c *C, stream mockhbstream.HeartbeatStream) { + testutil.WaitUntil(c, func(c *C) bool { + res := stream.Recv() + return res == nil + }) +} + +func getHeartBeatStreams(ctx context.Context, c *C, tc *testCluster) (*heartbeatStreams, func()) { + config := NewTestSingleConfig(c) + svr, err := CreateServer(config) + c.Assert(err, IsNil) + kvBase := kv.NewEtcdKVBase(svr.client, svr.rootPath) + c.Assert(err, IsNil) + svr.storage = core.NewStorage(kvBase) + cluster := tc.RaftCluster + cluster.s = svr + cluster.running = false + cluster.clusterID = tc.getClusterID() + cluster.clusterRoot = svr.getClusterRootPath() + hbStreams := newHeartbeatStreams(ctx, tc.getClusterID(), cluster) + return hbStreams, func() { testutil.CleanServer(config) } +} + +func createTestRaftCluster(id id.Allocator, opt *config.ScheduleOption, storage *core.Storage) *RaftCluster { + cluster := &RaftCluster{} + cluster.initCluster(id, opt, storage) + return cluster +} diff --git a/scheduler/server/core/basic_cluster.go b/scheduler/server/core/basic_cluster.go new file mode 100644 index 00000000..61b1741f --- /dev/null +++ b/scheduler/server/core/basic_cluster.go @@ -0,0 +1,353 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "sync" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" +) + +// BasicCluster provides basic data member and interface for a tikv cluster. +type BasicCluster struct { + sync.RWMutex + Stores *StoresInfo + Regions *RegionsInfo +} + +// NewBasicCluster creates a BasicCluster. +func NewBasicCluster() *BasicCluster { + return &BasicCluster{ + Stores: NewStoresInfo(), + Regions: NewRegionsInfo(), + } +} + +// GetStores returns all Stores in the cluster. +func (bc *BasicCluster) GetStores() []*StoreInfo { + bc.RLock() + defer bc.RUnlock() + return bc.Stores.GetStores() +} + +// GetMetaStores gets a complete set of metapb.Store. +func (bc *BasicCluster) GetMetaStores() []*metapb.Store { + bc.RLock() + defer bc.RUnlock() + return bc.Stores.GetMetaStores() +} + +// GetStore searches for a store by ID. +func (bc *BasicCluster) GetStore(storeID uint64) *StoreInfo { + bc.RLock() + defer bc.RUnlock() + return bc.Stores.GetStore(storeID) +} + +// GetRegion searches for a region by ID. +func (bc *BasicCluster) GetRegion(regionID uint64) *RegionInfo { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.GetRegion(regionID) +} + +// GetRegions gets all RegionInfo from regionMap. +func (bc *BasicCluster) GetRegions() []*RegionInfo { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.GetRegions() +} + +// GetMetaRegions gets a set of metapb.Region from regionMap. +func (bc *BasicCluster) GetMetaRegions() []*metapb.Region { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.GetMetaRegions() +} + +// GetStoreRegions gets all RegionInfo with a given storeID. +func (bc *BasicCluster) GetStoreRegions(storeID uint64) []*RegionInfo { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.GetStoreRegions(storeID) +} + +// GetRegionStores returns all Stores that contains the region's peer. +func (bc *BasicCluster) GetRegionStores(region *RegionInfo) []*StoreInfo { + bc.RLock() + defer bc.RUnlock() + var Stores []*StoreInfo + for id := range region.GetStoreIds() { + if store := bc.Stores.GetStore(id); store != nil { + Stores = append(Stores, store) + } + } + return Stores +} + +// GetFollowerStores returns all Stores that contains the region's follower peer. +func (bc *BasicCluster) GetFollowerStores(region *RegionInfo) []*StoreInfo { + bc.RLock() + defer bc.RUnlock() + var Stores []*StoreInfo + for id := range region.GetFollowers() { + if store := bc.Stores.GetStore(id); store != nil { + Stores = append(Stores, store) + } + } + return Stores +} + +// GetLeaderStore returns all Stores that contains the region's leader peer. +func (bc *BasicCluster) GetLeaderStore(region *RegionInfo) *StoreInfo { + bc.RLock() + defer bc.RUnlock() + return bc.Stores.GetStore(region.GetLeader().GetStoreId()) +} + +// BlockStore stops balancer from selecting the store. +func (bc *BasicCluster) BlockStore(storeID uint64) error { + bc.Lock() + defer bc.Unlock() + return bc.Stores.BlockStore(storeID) +} + +// UnblockStore allows balancer to select the store. +func (bc *BasicCluster) UnblockStore(storeID uint64) { + bc.Lock() + defer bc.Unlock() + bc.Stores.UnblockStore(storeID) +} + +// AttachAvailableFunc attaches an available function to a specific store. +func (bc *BasicCluster) AttachAvailableFunc(storeID uint64, f func() bool) { + bc.Lock() + defer bc.Unlock() + bc.Stores.AttachAvailableFunc(storeID, f) +} + +// UpdateStoreStatus updates the information of the store. +func (bc *BasicCluster) UpdateStoreStatus(storeID uint64, leaderCount int, regionCount int, pendingPeerCount int, leaderSize int64, regionSize int64) { + bc.Lock() + defer bc.Unlock() + bc.Stores.UpdateStoreStatus(storeID, leaderCount, regionCount, pendingPeerCount, leaderSize, regionSize) +} + +// RandFollowerRegion returns a random region that has a follower on the store. +func (bc *BasicCluster) RandFollowerRegion(storeID uint64, opts ...RegionOption) *RegionInfo { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.RandFollowerRegion(storeID, opts...) +} + +// RandLeaderRegion returns a random region that has leader on the store. +func (bc *BasicCluster) RandLeaderRegion(storeID uint64, opts ...RegionOption) *RegionInfo { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.RandLeaderRegion(storeID, opts...) +} + +// RandPendingRegion returns a random region that has a pending peer on the store. +func (bc *BasicCluster) RandPendingRegion(storeID uint64, opts ...RegionOption) *RegionInfo { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.RandPendingRegion(storeID, opts...) +} + +// GetPendingRegionsWithLock return pending regions subtree by storeID +func (bc *BasicCluster) GetPendingRegionsWithLock(storeID uint64, callback func(RegionsContainer)) { + bc.RLock() + defer bc.RUnlock() + callback(bc.Regions.pendingPeers[storeID]) +} + +// GetLeadersWithLock return leaders subtree by storeID +func (bc *BasicCluster) GetLeadersWithLock(storeID uint64, callback func(RegionsContainer)) { + bc.RLock() + defer bc.RUnlock() + callback(bc.Regions.leaders[storeID]) +} + +// GetFollowersWithLock return leaders subtree by storeID +func (bc *BasicCluster) GetFollowersWithLock(storeID uint64, callback func(RegionsContainer)) { + bc.RLock() + defer bc.RUnlock() + callback(bc.Regions.followers[storeID]) +} + +// GetRegionCount gets the total count of RegionInfo of regionMap. +func (bc *BasicCluster) GetRegionCount() int { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.GetRegionCount() +} + +// GetStoreCount returns the total count of storeInfo. +func (bc *BasicCluster) GetStoreCount() int { + bc.RLock() + defer bc.RUnlock() + return bc.Stores.GetStoreCount() +} + +// GetStoreRegionCount gets the total count of a store's leader and follower RegionInfo by storeID. +func (bc *BasicCluster) GetStoreRegionCount(storeID uint64) int { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.GetStoreLeaderCount(storeID) + bc.Regions.GetStoreFollowerCount(storeID) + bc.Regions.GetStoreLearnerCount(storeID) +} + +// GetStoreLeaderCount get the total count of a store's leader RegionInfo. +func (bc *BasicCluster) GetStoreLeaderCount(storeID uint64) int { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.GetStoreLeaderCount(storeID) +} + +// GetStoreFollowerCount get the total count of a store's follower RegionInfo. +func (bc *BasicCluster) GetStoreFollowerCount(storeID uint64) int { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.GetStoreFollowerCount(storeID) +} + +// GetStorePendingPeerCount gets the total count of a store's region that includes pending peer. +func (bc *BasicCluster) GetStorePendingPeerCount(storeID uint64) int { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.GetStorePendingPeerCount(storeID) +} + +// GetStoreLeaderRegionSize get total size of store's leader regions. +func (bc *BasicCluster) GetStoreLeaderRegionSize(storeID uint64) int64 { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.GetStoreLeaderRegionSize(storeID) +} + +// GetStoreRegionSize get total size of store's regions. +func (bc *BasicCluster) GetStoreRegionSize(storeID uint64) int64 { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.GetStoreLeaderRegionSize(storeID) + bc.Regions.GetStoreFollowerRegionSize(storeID) + bc.Regions.GetStoreLearnerRegionSize(storeID) +} + +// GetAverageRegionSize returns the average region approximate size. +func (bc *BasicCluster) GetAverageRegionSize() int64 { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.GetAverageRegionSize() +} + +// PutStore put a store. +func (bc *BasicCluster) PutStore(store *StoreInfo) { + bc.Lock() + defer bc.Unlock() + bc.Stores.SetStore(store) +} + +// DeleteStore deletes a store. +func (bc *BasicCluster) DeleteStore(store *StoreInfo) { + bc.Lock() + defer bc.Unlock() + bc.Stores.DeleteStore(store) +} + +// TakeStore returns the point of the origin StoreInfo with the specified storeID. +func (bc *BasicCluster) TakeStore(storeID uint64) *StoreInfo { + bc.RLock() + defer bc.RUnlock() + return bc.Stores.TakeStore(storeID) +} + +// PutRegion put a region. +func (bc *BasicCluster) PutRegion(region *RegionInfo) []*RegionInfo { + bc.Lock() + defer bc.Unlock() + return bc.Regions.SetRegion(region) +} + +// RemoveRegion removes RegionInfo from regionTree and regionMap. +func (bc *BasicCluster) RemoveRegion(region *RegionInfo) { + bc.Lock() + defer bc.Unlock() + bc.Regions.RemoveRegion(region) +} + +// SearchRegion searches RegionInfo from regionTree. +func (bc *BasicCluster) SearchRegion(regionKey []byte) *RegionInfo { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.SearchRegion(regionKey) +} + +// SearchPrevRegion searches previous RegionInfo from regionTree. +func (bc *BasicCluster) SearchPrevRegion(regionKey []byte) *RegionInfo { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.SearchPrevRegion(regionKey) +} + +// ScanRange scans regions intersecting [start key, end key), returns at most +// `limit` regions. limit <= 0 means no limit. +func (bc *BasicCluster) ScanRange(startKey, endKey []byte, limit int) []*RegionInfo { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.ScanRange(startKey, endKey, limit) +} + +// GetOverlaps returns the regions which are overlapped with the specified region range. +func (bc *BasicCluster) GetOverlaps(region *RegionInfo) []*RegionInfo { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.GetOverlaps(region) +} + +// Length returns the RegionsInfo length. +func (bc *BasicCluster) Length() int { + bc.RLock() + defer bc.RUnlock() + return bc.Regions.Length() +} + +// RegionSetInformer provides access to a shared informer of regions. +type RegionSetInformer interface { + RandFollowerRegion(storeID uint64, opts ...RegionOption) *RegionInfo + RandLeaderRegion(storeID uint64, opts ...RegionOption) *RegionInfo + RandPendingRegion(storeID uint64, opts ...RegionOption) *RegionInfo + GetPendingRegionsWithLock(storeID uint64, callback func(RegionsContainer)) + GetLeadersWithLock(storeID uint64, callback func(RegionsContainer)) + GetFollowersWithLock(storeID uint64, callback func(RegionsContainer)) + GetAverageRegionSize() int64 + GetStoreRegionCount(storeID uint64) int + GetRegion(id uint64) *RegionInfo + ScanRegions(startKey, endKey []byte, limit int) []*RegionInfo +} + +// StoreSetInformer provides access to a shared informer of stores. +type StoreSetInformer interface { + GetStores() []*StoreInfo + GetStore(id uint64) *StoreInfo + + GetRegionStores(region *RegionInfo) []*StoreInfo + GetFollowerStores(region *RegionInfo) []*StoreInfo + GetLeaderStore(region *RegionInfo) *StoreInfo +} + +// StoreSetController is used to control stores' status. +type StoreSetController interface { + BlockStore(id uint64) error + UnblockStore(id uint64) + + AttachAvailableFunc(id uint64, f func() bool) +} diff --git a/scheduler/server/core/errors.go b/scheduler/server/core/errors.go new file mode 100644 index 00000000..a58b9adc --- /dev/null +++ b/scheduler/server/core/errors.go @@ -0,0 +1,63 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package core defines core characteristics of the server. +// This file uses the errcode packate to define PD specific error codes. +// Probably this should be a different package. +package core + +import ( + "fmt" + "net/http" + + "github.com/pingcap/errcode" +) + +var ( + // Parent for other errors + storeStateCode = errcode.StateCode.Child("state.store") + + // StoreBlockedCode is an error due to requesting an operation that is invalid due to a store being in a blocked state + StoreBlockedCode = storeStateCode.Child("state.store.blocked") + + // StoreTombstonedCode is an invalid operation was attempted on a store which is in a removed state. + StoreTombstonedCode = storeStateCode.Child("state.store.tombstoned").SetHTTP(http.StatusGone) +) + +var _ errcode.ErrorCode = (*StoreTombstonedErr)(nil) // assert implements interface +var _ errcode.ErrorCode = (*StoreBlockedErr)(nil) // assert implements interface + +// StoreErr can be newtyped or embedded in your own error +type StoreErr struct { + StoreID uint64 `json:"storeId"` +} + +// StoreTombstonedErr is an invalid operation was attempted on a store which is in a removed state. +type StoreTombstonedErr StoreErr + +func (e StoreTombstonedErr) Error() string { + return fmt.Sprintf("The store %020d has been removed", e.StoreID) +} + +// Code returns StoreTombstonedCode +func (e StoreTombstonedErr) Code() errcode.Code { return StoreTombstonedCode } + +// StoreBlockedErr has a Code() of StoreBlockedCode +type StoreBlockedErr StoreErr + +func (e StoreBlockedErr) Error() string { + return fmt.Sprintf("store %v is blocked", e.StoreID) +} + +// Code returns StoreBlockedCode +func (e StoreBlockedErr) Code() errcode.Code { return StoreBlockedCode } diff --git a/scheduler/server/core/kind.go b/scheduler/server/core/kind.go new file mode 100644 index 00000000..d3686e03 --- /dev/null +++ b/scheduler/server/core/kind.go @@ -0,0 +1,57 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +// PriorityLevel lower level means higher priority +type PriorityLevel int + +// Built-in priority level +const ( + LowPriority PriorityLevel = iota + NormalPriority + HighPriority +) + +// ScheduleKind distinguishes resources and schedule strategy. +type ScheduleKind struct { + Resource ResourceKind +} + +// NewScheduleKind creates a schedule kind with resource kind and schedule strategy. +func NewScheduleKind(Resource ResourceKind) ScheduleKind { + return ScheduleKind{ + Resource: Resource, + } +} + +// ResourceKind distinguishes different kinds of resources. +type ResourceKind int + +const ( + // LeaderKind indicates the leader kind resource + LeaderKind ResourceKind = iota + // RegionKind indicates the region kind resource + RegionKind +) + +func (k ResourceKind) String() string { + switch k { + case LeaderKind: + return "leader" + case RegionKind: + return "region" + default: + return "unknown" + } +} diff --git a/scheduler/server/core/region.go b/scheduler/server/core/region.go new file mode 100644 index 00000000..e8b5fbee --- /dev/null +++ b/scheduler/server/core/region.go @@ -0,0 +1,823 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "bytes" + "encoding/hex" + "fmt" + "reflect" + "strings" + + "github.com/gogo/protobuf/proto" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" +) + +// RegionInfo records detail region info. +// Read-Only once created. +type RegionInfo struct { + meta *metapb.Region + learners []*metapb.Peer + voters []*metapb.Peer + leader *metapb.Peer + pendingPeers []*metapb.Peer + approximateSize int64 +} + +// NewRegionInfo creates RegionInfo with region's meta and leader peer. +func NewRegionInfo(region *metapb.Region, leader *metapb.Peer, opts ...RegionCreateOption) *RegionInfo { + regionInfo := &RegionInfo{ + meta: region, + leader: leader, + } + + for _, opt := range opts { + opt(regionInfo) + } + classifyVoterAndLearner(regionInfo) + return regionInfo +} + +// classifyVoterAndLearner sorts out voter and learner from peers into different slice. +func classifyVoterAndLearner(region *RegionInfo) { + voters := make([]*metapb.Peer, 0, len(region.meta.Peers)) + for _, p := range region.meta.Peers { + voters = append(voters, p) + } + region.voters = voters +} + +// EmptyRegionApproximateSize is the region approximate size of an empty region +// (heartbeat size <= 1MB). +const EmptyRegionApproximateSize = 1 + +// RegionFromHeartbeat constructs a Region from region heartbeat. +func RegionFromHeartbeat(heartbeat *schedulerpb.RegionHeartbeatRequest) *RegionInfo { + // Convert unit to MB. + // If region is empty or less than 1MB, use 1MB instead. + regionSize := heartbeat.GetApproximateSize() / (1 << 20) + if regionSize < EmptyRegionApproximateSize { + regionSize = EmptyRegionApproximateSize + } + + region := &RegionInfo{ + meta: heartbeat.GetRegion(), + leader: heartbeat.GetLeader(), + pendingPeers: heartbeat.GetPendingPeers(), + approximateSize: int64(regionSize), + } + + classifyVoterAndLearner(region) + return region +} + +// Clone returns a copy of current regionInfo. +func (r *RegionInfo) Clone(opts ...RegionCreateOption) *RegionInfo { + pendingPeers := make([]*metapb.Peer, 0, len(r.pendingPeers)) + for _, peer := range r.pendingPeers { + pendingPeers = append(pendingPeers, proto.Clone(peer).(*metapb.Peer)) + } + + region := &RegionInfo{ + meta: proto.Clone(r.meta).(*metapb.Region), + leader: proto.Clone(r.leader).(*metapb.Peer), + pendingPeers: pendingPeers, + approximateSize: r.approximateSize, + } + + for _, opt := range opts { + opt(region) + } + classifyVoterAndLearner(region) + return region +} + +// GetLearners returns the learners. +func (r *RegionInfo) GetLearners() []*metapb.Peer { + return r.learners +} + +// GetVoters returns the voters. +func (r *RegionInfo) GetVoters() []*metapb.Peer { + return r.voters +} + +// GetPeer returns the peer with specified peer id. +func (r *RegionInfo) GetPeer(peerID uint64) *metapb.Peer { + for _, peer := range r.meta.GetPeers() { + if peer.GetId() == peerID { + return peer + } + } + return nil +} + +// GetDownLearner returns the down learner with soecified peer id. +func (r *RegionInfo) GetDownLearner(peerID uint64) *metapb.Peer { + return nil +} + +// GetPendingPeer returns the pending peer with specified peer id. +func (r *RegionInfo) GetPendingPeer(peerID uint64) *metapb.Peer { + for _, peer := range r.pendingPeers { + if peer.GetId() == peerID { + return peer + } + } + return nil +} + +// GetPendingVoter returns the pending voter with specified peer id. +func (r *RegionInfo) GetPendingVoter(peerID uint64) *metapb.Peer { + for _, peer := range r.pendingPeers { + if peer.GetId() == peerID { + return peer + } + } + return nil +} + +// GetPendingLearner returns the pending learner peer with specified peer id. +func (r *RegionInfo) GetPendingLearner(peerID uint64) *metapb.Peer { + return nil +} + +// GetStorePeer returns the peer in specified store. +func (r *RegionInfo) GetStorePeer(storeID uint64) *metapb.Peer { + for _, peer := range r.meta.GetPeers() { + if peer.GetStoreId() == storeID { + return peer + } + } + return nil +} + +// GetStoreVoter returns the voter in specified store. +func (r *RegionInfo) GetStoreVoter(storeID uint64) *metapb.Peer { + for _, peer := range r.voters { + if peer.GetStoreId() == storeID { + return peer + } + } + return nil +} + +// GetStoreLearner returns the learner peer in specified store. +func (r *RegionInfo) GetStoreLearner(storeID uint64) *metapb.Peer { + for _, peer := range r.learners { + if peer.GetStoreId() == storeID { + return peer + } + } + return nil +} + +// GetStoreIds returns a map indicate the region distributed. +func (r *RegionInfo) GetStoreIds() map[uint64]struct{} { + peers := r.meta.GetPeers() + stores := make(map[uint64]struct{}, len(peers)) + for _, peer := range peers { + stores[peer.GetStoreId()] = struct{}{} + } + return stores +} + +// GetFollowers returns a map indicate the follow peers distributed. +func (r *RegionInfo) GetFollowers() map[uint64]*metapb.Peer { + peers := r.GetVoters() + followers := make(map[uint64]*metapb.Peer, len(peers)) + for _, peer := range peers { + if r.leader == nil || r.leader.GetId() != peer.GetId() { + followers[peer.GetStoreId()] = peer + } + } + return followers +} + +// GetFollower randomly returns a follow peer. +func (r *RegionInfo) GetFollower() *metapb.Peer { + for _, peer := range r.GetVoters() { + if r.leader == nil || r.leader.GetId() != peer.GetId() { + return peer + } + } + return nil +} + +// GetDiffFollowers returns the followers which is not located in the same +// store as any other followers of the another specified region. +func (r *RegionInfo) GetDiffFollowers(other *RegionInfo) []*metapb.Peer { + res := make([]*metapb.Peer, 0, len(r.meta.Peers)) + for _, p := range r.GetFollowers() { + diff := true + for _, o := range other.GetFollowers() { + if p.GetStoreId() == o.GetStoreId() { + diff = false + break + } + } + if diff { + res = append(res, p) + } + } + return res +} + +// GetID returns the ID of the region. +func (r *RegionInfo) GetID() uint64 { + return r.meta.GetId() +} + +// GetMeta returns the meta information of the region. +func (r *RegionInfo) GetMeta() *metapb.Region { + return r.meta +} + +// GetApproximateSize returns the approximate size of the region. +func (r *RegionInfo) GetApproximateSize() int64 { + return r.approximateSize +} + +// GetPendingPeers returns the pending peers of the region. +func (r *RegionInfo) GetPendingPeers() []*metapb.Peer { + return r.pendingPeers +} + +// GetLeader returns the leader of the region. +func (r *RegionInfo) GetLeader() *metapb.Peer { + return r.leader +} + +// GetStartKey returns the start key of the region. +func (r *RegionInfo) GetStartKey() []byte { + return r.meta.StartKey +} + +// GetEndKey returns the end key of the region. +func (r *RegionInfo) GetEndKey() []byte { + return r.meta.EndKey +} + +// GetPeers returns the peers of the region. +func (r *RegionInfo) GetPeers() []*metapb.Peer { + return r.meta.GetPeers() +} + +// GetRegionEpoch returns the region epoch of the region. +func (r *RegionInfo) GetRegionEpoch() *metapb.RegionEpoch { + return r.meta.RegionEpoch +} + +// regionMap wraps a map[uint64]*core.RegionInfo and supports randomly pick a region. +type regionMap struct { + m map[uint64]*RegionInfo + totalSize int64 + totalKeys int64 +} + +func newRegionMap() *regionMap { + return ®ionMap{ + m: make(map[uint64]*RegionInfo), + } +} + +func (rm *regionMap) Len() int { + if rm == nil { + return 0 + } + return len(rm.m) +} + +func (rm *regionMap) Get(id uint64) *RegionInfo { + if rm == nil { + return nil + } + if r, ok := rm.m[id]; ok { + return r + } + return nil +} + +func (rm *regionMap) Put(region *RegionInfo) { + if old, ok := rm.m[region.GetID()]; ok { + rm.totalSize -= old.approximateSize + } + rm.m[region.GetID()] = region + rm.totalSize += region.approximateSize +} + +func (rm *regionMap) Delete(id uint64) { + if rm == nil { + return + } + if old, ok := rm.m[id]; ok { + delete(rm.m, id) + rm.totalSize -= old.approximateSize + } +} + +func (rm *regionMap) TotalSize() int64 { + if rm.Len() == 0 { + return 0 + } + return rm.totalSize +} + +// regionSubTree is used to manager different types of regions. +type regionSubTree struct { + *regionTree + totalSize int64 +} + +func newRegionSubTree() *regionSubTree { + return ®ionSubTree{ + regionTree: newRegionTree(), + totalSize: 0, + } +} + +func (rst *regionSubTree) TotalSize() int64 { + if rst.length() == 0 { + return 0 + } + return rst.totalSize +} + +func (rst *regionSubTree) scanRanges() []*RegionInfo { + if rst.length() == 0 { + return nil + } + var res []*RegionInfo + rst.scanRange([]byte(""), func(region *RegionInfo) bool { + res = append(res, region) + return true + }) + return res +} + +func (rst *regionSubTree) update(region *RegionInfo) { + if r := rst.find(region); r != nil { + rst.totalSize += region.approximateSize - r.region.approximateSize + r.region = region + return + } + rst.totalSize += region.approximateSize + rst.regionTree.update(region) +} + +func (rst *regionSubTree) remove(region *RegionInfo) { + if rst.length() == 0 { + return + } + rst.regionTree.remove(region) +} + +func (rst *regionSubTree) length() int { + if rst == nil { + return 0 + } + return rst.regionTree.length() +} + +func (rst *regionSubTree) RandomRegion(startKey, endKey []byte) *RegionInfo { + if rst.length() == 0 { + return nil + } + return rst.regionTree.RandomRegion(startKey, endKey) +} + +// RegionsInfo for export +type RegionsInfo struct { + tree *regionTree + regions *regionMap // regionID -> regionInfo + leaders map[uint64]*regionSubTree // storeID -> regionSubTree + followers map[uint64]*regionSubTree // storeID -> regionSubTree + learners map[uint64]*regionSubTree // storeID -> regionSubTree + pendingPeers map[uint64]*regionSubTree // storeID -> regionSubTree +} + +// NewRegionsInfo creates RegionsInfo with tree, regions, leaders and followers +func NewRegionsInfo() *RegionsInfo { + return &RegionsInfo{ + tree: newRegionTree(), + regions: newRegionMap(), + leaders: make(map[uint64]*regionSubTree), + followers: make(map[uint64]*regionSubTree), + learners: make(map[uint64]*regionSubTree), + pendingPeers: make(map[uint64]*regionSubTree), + } +} + +// GetRegion returns the RegionInfo with regionID +func (r *RegionsInfo) GetRegion(regionID uint64) *RegionInfo { + region := r.regions.Get(regionID) + if region == nil { + return nil + } + return region +} + +// SetRegion sets the RegionInfo with regionID +func (r *RegionsInfo) SetRegion(region *RegionInfo) []*RegionInfo { + if origin := r.regions.Get(region.GetID()); origin != nil { + r.RemoveRegion(origin) + } + return r.AddRegion(region) +} + +// Length returns the RegionsInfo length +func (r *RegionsInfo) Length() int { + return r.regions.Len() +} + +// TreeLength returns the RegionsInfo tree length(now only used in test) +func (r *RegionsInfo) TreeLength() int { + return r.tree.length() +} + +// GetOverlaps returns the regions which are overlapped with the specified region range. +func (r *RegionsInfo) GetOverlaps(region *RegionInfo) []*RegionInfo { + return r.tree.getOverlaps(region) +} + +// AddRegion adds RegionInfo to regionTree and regionMap, also update leaders and followers by region peers +func (r *RegionsInfo) AddRegion(region *RegionInfo) []*RegionInfo { + // Add to tree and regions. + overlaps := r.tree.update(region) + for _, item := range overlaps { + r.RemoveRegion(r.GetRegion(item.GetID())) + } + + r.regions.Put(region) + + // Add to leaders and followers. + for _, peer := range region.GetVoters() { + storeID := peer.GetStoreId() + if peer.GetId() == region.leader.GetId() { + // Add leader peer to leaders. + store, ok := r.leaders[storeID] + if !ok { + store = newRegionSubTree() + r.leaders[storeID] = store + } + store.update(region) + } else { + // Add follower peer to followers. + store, ok := r.followers[storeID] + if !ok { + store = newRegionSubTree() + r.followers[storeID] = store + } + store.update(region) + } + } + + // Add to learners. + for _, peer := range region.GetLearners() { + storeID := peer.GetStoreId() + store, ok := r.learners[storeID] + if !ok { + store = newRegionSubTree() + r.learners[storeID] = store + } + store.update(region) + } + + for _, peer := range region.pendingPeers { + storeID := peer.GetStoreId() + store, ok := r.pendingPeers[storeID] + if !ok { + store = newRegionSubTree() + r.pendingPeers[storeID] = store + } + store.update(region) + } + + return overlaps +} + +// RemoveRegion removes RegionInfo from regionTree and regionMap +func (r *RegionsInfo) RemoveRegion(region *RegionInfo) { + // Remove from tree and regions. + r.tree.remove(region) + r.regions.Delete(region.GetID()) + // Remove from leaders and followers. + for _, peer := range region.meta.GetPeers() { + storeID := peer.GetStoreId() + r.leaders[storeID].remove(region) + r.followers[storeID].remove(region) + r.learners[storeID].remove(region) + r.pendingPeers[storeID].remove(region) + } +} + +// SearchRegion searches RegionInfo from regionTree +func (r *RegionsInfo) SearchRegion(regionKey []byte) *RegionInfo { + region := r.tree.search(regionKey) + if region == nil { + return nil + } + return r.GetRegion(region.GetID()) +} + +// SearchPrevRegion searches previous RegionInfo from regionTree +func (r *RegionsInfo) SearchPrevRegion(regionKey []byte) *RegionInfo { + region := r.tree.searchPrev(regionKey) + if region == nil { + return nil + } + return r.GetRegion(region.GetID()) +} + +// GetRegions gets all RegionInfo from regionMap +func (r *RegionsInfo) GetRegions() []*RegionInfo { + regions := make([]*RegionInfo, 0, r.regions.Len()) + for _, region := range r.regions.m { + regions = append(regions, region) + } + return regions +} + +// GetStoreRegions gets all RegionInfo with a given storeID +func (r *RegionsInfo) GetStoreRegions(storeID uint64) []*RegionInfo { + regions := make([]*RegionInfo, 0, r.GetStoreLeaderCount(storeID)+r.GetStoreFollowerCount(storeID)) + if leaders, ok := r.leaders[storeID]; ok { + for _, region := range leaders.scanRanges() { + regions = append(regions, region) + } + } + if followers, ok := r.followers[storeID]; ok { + for _, region := range followers.scanRanges() { + regions = append(regions, region) + } + } + return regions +} + +// GetStoreLeaderRegionSize get total size of store's leader regions +func (r *RegionsInfo) GetStoreLeaderRegionSize(storeID uint64) int64 { + return r.leaders[storeID].TotalSize() +} + +// GetStoreFollowerRegionSize get total size of store's follower regions +func (r *RegionsInfo) GetStoreFollowerRegionSize(storeID uint64) int64 { + return r.followers[storeID].TotalSize() +} + +// GetStoreLearnerRegionSize get total size of store's learner regions +func (r *RegionsInfo) GetStoreLearnerRegionSize(storeID uint64) int64 { + return r.learners[storeID].TotalSize() +} + +// GetStoreRegionSize get total size of store's regions +func (r *RegionsInfo) GetStoreRegionSize(storeID uint64) int64 { + return r.GetStoreLeaderRegionSize(storeID) + r.GetStoreFollowerRegionSize(storeID) + r.GetStoreLearnerRegionSize(storeID) +} + +// GetMetaRegions gets a set of metapb.Region from regionMap +func (r *RegionsInfo) GetMetaRegions() []*metapb.Region { + regions := make([]*metapb.Region, 0, r.regions.Len()) + for _, region := range r.regions.m { + regions = append(regions, proto.Clone(region.meta).(*metapb.Region)) + } + return regions +} + +// GetRegionCount gets the total count of RegionInfo of regionMap +func (r *RegionsInfo) GetRegionCount() int { + return r.regions.Len() +} + +// GetStoreRegionCount gets the total count of a store's leader and follower RegionInfo by storeID +func (r *RegionsInfo) GetStoreRegionCount(storeID uint64) int { + return r.GetStoreLeaderCount(storeID) + r.GetStoreFollowerCount(storeID) + r.GetStoreLearnerCount(storeID) +} + +// GetStorePendingPeerCount gets the total count of a store's region that includes pending peer +func (r *RegionsInfo) GetStorePendingPeerCount(storeID uint64) int { + return r.pendingPeers[storeID].length() +} + +// GetStoreLeaderCount get the total count of a store's leader RegionInfo +func (r *RegionsInfo) GetStoreLeaderCount(storeID uint64) int { + return r.leaders[storeID].length() +} + +// GetStoreFollowerCount get the total count of a store's follower RegionInfo +func (r *RegionsInfo) GetStoreFollowerCount(storeID uint64) int { + return r.followers[storeID].length() +} + +// GetStoreLearnerCount get the total count of a store's learner RegionInfo +func (r *RegionsInfo) GetStoreLearnerCount(storeID uint64) int { + return r.learners[storeID].length() +} + +// RandRegion get a region by random +func (r *RegionsInfo) RandRegion(opts ...RegionOption) *RegionInfo { + return randRegion(r.tree, opts...) +} + +// RandPendingRegion randomly gets a store's region with a pending peer. +func (r *RegionsInfo) RandPendingRegion(storeID uint64, opts ...RegionOption) *RegionInfo { + return randRegion(r.pendingPeers[storeID], opts...) +} + +// RandLeaderRegion randomly gets a store's leader region. +func (r *RegionsInfo) RandLeaderRegion(storeID uint64, opts ...RegionOption) *RegionInfo { + return randRegion(r.leaders[storeID], opts...) +} + +// RandFollowerRegion randomly gets a store's follower region. +func (r *RegionsInfo) RandFollowerRegion(storeID uint64, opts ...RegionOption) *RegionInfo { + return randRegion(r.followers[storeID], opts...) +} + +// GetPendingRegionsWithLock return pending regions subtree by storeID +func (r *RegionsInfo) GetPendingRegionsWithLock(storeID uint64, callback func(RegionsContainer)) { + callback(r.pendingPeers[storeID]) +} + +// GetLeadersWithLock return leaders subtree by storeID +func (r *RegionsInfo) GetLeadersWithLock(storeID uint64, callback func(RegionsContainer)) { + callback(r.leaders[storeID]) +} + +// GetFollowersWithLock return leaders subtree by storeID +func (r *RegionsInfo) GetFollowersWithLock(storeID uint64, callback func(RegionsContainer)) { + callback(r.followers[storeID]) +} + +// GetLeader return leader RegionInfo by storeID and regionID(now only used in test) +func (r *RegionsInfo) GetLeader(storeID uint64, region *RegionInfo) *RegionInfo { + return r.leaders[storeID].find(region).region +} + +// GetFollower return follower RegionInfo by storeID and regionID(now only used in test) +func (r *RegionsInfo) GetFollower(storeID uint64, region *RegionInfo) *RegionInfo { + return r.followers[storeID].find(region).region +} + +// ScanRange scans regions intersecting [start key, end key), returns at most +// `limit` regions. limit <= 0 means no limit. +func (r *RegionsInfo) ScanRange(startKey, endKey []byte, limit int) []*RegionInfo { + var res []*RegionInfo + r.tree.scanRange(startKey, func(region *RegionInfo) bool { + if len(endKey) > 0 && bytes.Compare(region.GetStartKey(), endKey) >= 0 { + return false + } + if limit > 0 && len(res) >= limit { + return false + } + res = append(res, r.GetRegion(region.GetID())) + return true + }) + return res +} + +// GetAverageRegionSize returns the average region approximate size. +func (r *RegionsInfo) GetAverageRegionSize() int64 { + if r.regions.Len() == 0 { + return 0 + } + return r.regions.TotalSize() / int64(r.regions.Len()) +} + +const randomRegionMaxRetry = 10 + +// RegionsContainer is a container to store regions. +type RegionsContainer interface { + RandomRegion(startKey, endKey []byte) *RegionInfo +} + +func randRegion(regions RegionsContainer, opts ...RegionOption) *RegionInfo { + for i := 0; i < randomRegionMaxRetry; i++ { + region := regions.RandomRegion(nil, nil) + if region == nil { + return nil + } + isSelect := true + for _, opt := range opts { + if !opt(region) { + isSelect = false + break + } + } + if isSelect { + return region + } + } + return nil +} + +// DiffRegionPeersInfo return the difference of peers info between two RegionInfo +func DiffRegionPeersInfo(origin *RegionInfo, other *RegionInfo) string { + var ret []string + for _, a := range origin.meta.Peers { + both := false + for _, b := range other.meta.Peers { + if reflect.DeepEqual(a, b) { + both = true + break + } + } + if !both { + ret = append(ret, fmt.Sprintf("Remove peer:{%v}", a)) + } + } + for _, b := range other.meta.Peers { + both := false + for _, a := range origin.meta.Peers { + if reflect.DeepEqual(a, b) { + both = true + break + } + } + if !both { + ret = append(ret, fmt.Sprintf("Add peer:{%v}", b)) + } + } + return strings.Join(ret, ",") +} + +// DiffRegionKeyInfo return the difference of key info between two RegionInfo +func DiffRegionKeyInfo(origin *RegionInfo, other *RegionInfo) string { + var ret []string + if !bytes.Equal(origin.meta.StartKey, other.meta.StartKey) { + ret = append(ret, fmt.Sprintf("StartKey Changed:{%s} -> {%s}", HexRegionKey(origin.meta.StartKey), HexRegionKey(other.meta.StartKey))) + } else { + ret = append(ret, fmt.Sprintf("StartKey:{%s}", HexRegionKey(origin.meta.StartKey))) + } + if !bytes.Equal(origin.meta.EndKey, other.meta.EndKey) { + ret = append(ret, fmt.Sprintf("EndKey Changed:{%s} -> {%s}", HexRegionKey(origin.meta.EndKey), HexRegionKey(other.meta.EndKey))) + } else { + ret = append(ret, fmt.Sprintf("EndKey:{%s}", HexRegionKey(origin.meta.EndKey))) + } + + return strings.Join(ret, ", ") +} + +// HexRegionKey converts region key to hex format. Used for formating region in +// logs. +func HexRegionKey(key []byte) []byte { + return []byte(strings.ToUpper(hex.EncodeToString(key))) +} + +// RegionToHexMeta converts a region meta's keys to hex format. Used for formating +// region in logs. +func RegionToHexMeta(meta *metapb.Region) HexRegionMeta { + if meta == nil { + return HexRegionMeta{} + } + meta = proto.Clone(meta).(*metapb.Region) + meta.StartKey = HexRegionKey(meta.StartKey) + meta.EndKey = HexRegionKey(meta.EndKey) + return HexRegionMeta{meta} +} + +// HexRegionMeta is a region meta in the hex format. Used for formating region in logs. +type HexRegionMeta struct { + *metapb.Region +} + +func (h HexRegionMeta) String() string { + return strings.TrimSpace(proto.CompactTextString(h.Region)) +} + +// RegionsToHexMeta converts regions' meta keys to hex format. Used for formating +// region in logs. +func RegionsToHexMeta(regions []*metapb.Region) HexRegionsMeta { + hexRegionMetas := make([]*metapb.Region, len(regions)) + for i, region := range regions { + meta := proto.Clone(region).(*metapb.Region) + meta.StartKey = HexRegionKey(meta.StartKey) + meta.EndKey = HexRegionKey(meta.EndKey) + + hexRegionMetas[i] = meta + } + return HexRegionsMeta(hexRegionMetas) +} + +// HexRegionsMeta is a slice of regions' meta in the hex format. Used for formating +// region in logs. +type HexRegionsMeta []*metapb.Region + +func (h HexRegionsMeta) String() string { + var b strings.Builder + for _, r := range h { + b.WriteString(proto.CompactTextString(r)) + } + return strings.TrimSpace(b.String()) +} diff --git a/scheduler/server/core/region_option.go b/scheduler/server/core/region_option.go new file mode 100644 index 00000000..285bab0a --- /dev/null +++ b/scheduler/server/core/region_option.go @@ -0,0 +1,128 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" +) + +// RegionOption is used to select region. +type RegionOption func(region *RegionInfo) bool + +// HealthRegion checks if the region is healthy. +func HealthRegion() RegionOption { + return func(region *RegionInfo) bool { + return len(region.pendingPeers) == 0 && len(region.learners) == 0 + } +} + +// HealthRegionAllowPending checks if the region is healthy with allowing the pending peer. +func HealthRegionAllowPending() RegionOption { + return func(region *RegionInfo) bool { + return len(region.learners) == 0 + } +} + +// RegionCreateOption used to create region. +type RegionCreateOption func(region *RegionInfo) + +// WithPendingPeers sets the pending peers for the region. +func WithPendingPeers(pengdingPeers []*metapb.Peer) RegionCreateOption { + return func(region *RegionInfo) { + region.pendingPeers = pengdingPeers + } +} + +// WithLeader sets the leader for the region. +func WithLeader(leader *metapb.Peer) RegionCreateOption { + return func(region *RegionInfo) { + region.leader = leader + } +} + +// WithLearners adds learner to the region +func WithLearners(learner []*metapb.Peer) RegionCreateOption { + return func(region *RegionInfo) { + region.learners = append(region.learners, learner...) + } +} + +// WithStartKey sets the start key for the region. +func WithStartKey(key []byte) RegionCreateOption { + return func(region *RegionInfo) { + region.meta.StartKey = key + } +} + +// WithEndKey sets the end key for the region. +func WithEndKey(key []byte) RegionCreateOption { + return func(region *RegionInfo) { + region.meta.EndKey = key + } +} + +// WithIncVersion increases the version of the region. +func WithIncVersion() RegionCreateOption { + return func(region *RegionInfo) { + e := region.meta.GetRegionEpoch() + if e != nil { + e.Version++ + } + } +} + +// WithIncConfVer increases the config version of the region. +func WithIncConfVer() RegionCreateOption { + return func(region *RegionInfo) { + e := region.meta.GetRegionEpoch() + if e != nil { + e.ConfVer++ + } + } +} + +// WithRemoveStorePeer removes the specified peer for the region. +func WithRemoveStorePeer(storeID uint64) RegionCreateOption { + return func(region *RegionInfo) { + var peers []*metapb.Peer + for _, peer := range region.meta.GetPeers() { + if peer.GetStoreId() != storeID { + peers = append(peers, peer) + } + } + region.meta.Peers = peers + } +} + +// SetApproximateSize sets the approximate size for the region. +func SetApproximateSize(v int64) RegionCreateOption { + return func(region *RegionInfo) { + region.approximateSize = v + } +} + +// SetPeers sets the peers for the region. +func SetPeers(peers []*metapb.Peer) RegionCreateOption { + return func(region *RegionInfo) { + region.meta.Peers = peers + } +} + +// WithAddPeer adds a peer for the region. +func WithAddPeer(peer *metapb.Peer) RegionCreateOption { + return func(region *RegionInfo) { + region.meta.Peers = append(region.meta.Peers, peer) + region.voters = append(region.voters, peer) + } +} diff --git a/scheduler/server/core/region_test.go b/scheduler/server/core/region_test.go new file mode 100644 index 00000000..84f142ca --- /dev/null +++ b/scheduler/server/core/region_test.go @@ -0,0 +1,192 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + "math/rand" + "strconv" + "strings" + "testing" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockid" + "github.com/pingcap-incubator/tinykv/scheduler/server/id" + . "github.com/pingcap/check" +) + +func TestCore(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testRegionMapSuite{}) + +type testRegionMapSuite struct{} + +func (s *testRegionMapSuite) TestRegionMap(c *C) { + var empty *regionMap + c.Assert(empty.Len(), Equals, 0) + c.Assert(empty.Get(1), IsNil) + + rm := newRegionMap() + s.check(c, rm) + rm.Put(s.regionInfo(1)) + s.check(c, rm, 1) + + rm.Put(s.regionInfo(2)) + rm.Put(s.regionInfo(3)) + s.check(c, rm, 1, 2, 3) + + rm.Put(s.regionInfo(3)) + rm.Delete(4) + s.check(c, rm, 1, 2, 3) + + rm.Delete(3) + rm.Delete(1) + s.check(c, rm, 2) + + rm.Put(s.regionInfo(3)) + s.check(c, rm, 2, 3) +} + +func (s *testRegionMapSuite) regionInfo(id uint64) *RegionInfo { + return &RegionInfo{ + meta: &metapb.Region{ + Id: id, + }, + approximateSize: int64(id), + } +} + +func (s *testRegionMapSuite) check(c *C, rm *regionMap, ids ...uint64) { + // Check Get. + for _, id := range ids { + c.Assert(rm.Get(id).GetID(), Equals, id) + } + // Check Len. + c.Assert(rm.Len(), Equals, len(ids)) + // Check id set. + expect := make(map[uint64]struct{}) + for _, id := range ids { + expect[id] = struct{}{} + } + set1 := make(map[uint64]struct{}) + for _, r := range rm.m { + set1[r.GetID()] = struct{}{} + } + c.Assert(set1, DeepEquals, expect) + // Check region size. + var total int64 + for _, id := range ids { + total += int64(id) + } + c.Assert(rm.TotalSize(), Equals, total) +} + +var _ = Suite(&testRegionKey{}) + +type testRegionKey struct{} + +func (*testRegionKey) TestRegionKey(c *C) { + testCase := []struct { + key string + expect string + }{ + {`"t\x80\x00\x00\x00\x00\x00\x00\xff!_r\x80\x00\x00\x00\x00\xff\x02\u007fY\x00\x00\x00\x00\x00\xfa"`, + `7480000000000000FF215F728000000000FF027F590000000000FA`}, + {"\"\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\x05\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xf8\"", + `80000000000000FF0500000000000000F8`}, + } + for _, t := range testCase { + got, err := strconv.Unquote(t.key) + c.Assert(err, IsNil) + s := fmt.Sprintln(RegionToHexMeta(&metapb.Region{StartKey: []byte(got)})) + c.Assert(strings.Contains(s, t.expect), IsTrue) + + // start key changed + orgion := NewRegionInfo(&metapb.Region{EndKey: []byte(got)}, nil) + region := NewRegionInfo(&metapb.Region{StartKey: []byte(got), EndKey: []byte(got)}, nil) + s = DiffRegionKeyInfo(orgion, region) + c.Assert(s, Matches, ".*StartKey Changed.*") + c.Assert(strings.Contains(s, t.expect), IsTrue) + + // end key changed + orgion = NewRegionInfo(&metapb.Region{StartKey: []byte(got)}, nil) + region = NewRegionInfo(&metapb.Region{StartKey: []byte(got), EndKey: []byte(got)}, nil) + s = DiffRegionKeyInfo(orgion, region) + c.Assert(s, Matches, ".*EndKey Changed.*") + c.Assert(strings.Contains(s, t.expect), IsTrue) + } +} + +func BenchmarkRandomRegion(b *testing.B) { + regions := NewRegionsInfo() + for i := 0; i < 5000000; i++ { + item := &RegionInfo{meta: &metapb.Region{StartKey: []byte(fmt.Sprintf("%20d", i)), EndKey: []byte(fmt.Sprintf("%20d", i+1))}} + regions.AddRegion(item) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + regions.RandRegion() + } +} + +const keyLength = 100 + +func randomBytes(n int) []byte { + bytes := make([]byte, n) + _, err := rand.Read(bytes) + if err != nil { + panic(err) + } + return bytes +} + +func newRegionInfoID(idAllocator id.Allocator) *RegionInfo { + var ( + peers []*metapb.Peer + leader *metapb.Peer + ) + for i := 0; i < 3; i++ { + id, _ := idAllocator.Alloc() + p := &metapb.Peer{Id: id, StoreId: id} + if i == 0 { + leader = p + } + peers = append(peers, p) + } + regionID, _ := idAllocator.Alloc() + return NewRegionInfo( + &metapb.Region{ + Id: regionID, + StartKey: randomBytes(keyLength), + EndKey: randomBytes(keyLength), + Peers: peers, + }, + leader, + ) +} + +func BenchmarkAddRegion(b *testing.B) { + regions := NewRegionsInfo() + idAllocator := mockid.NewIDAllocator() + var items []*RegionInfo + for i := 0; i < 10000000; i++ { + items = append(items, newRegionInfoID(idAllocator)) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + regions.AddRegion(items[i]) + } +} diff --git a/scheduler/server/core/region_tree.go b/scheduler/server/core/region_tree.go new file mode 100644 index 00000000..2f38050f --- /dev/null +++ b/scheduler/server/core/region_tree.go @@ -0,0 +1,231 @@ +// Copyright 2016 PingCAP, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "bytes" + "fmt" + "math/rand" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/btree" + "github.com/pingcap/log" + "go.uber.org/zap" +) + +var _ btree.Item = ®ionItem{} + +type regionItem struct { + region *RegionInfo +} + +// Less returns true if the region start key is less than the other. +func (r *regionItem) Less(other btree.Item) bool { + left := r.region.GetStartKey() + right := other.(*regionItem).region.GetStartKey() + return bytes.Compare(left, right) < 0 +} + +func (r *regionItem) Contains(key []byte) bool { + start, end := r.region.GetStartKey(), r.region.GetEndKey() + return bytes.Compare(key, start) >= 0 && (len(end) == 0 || bytes.Compare(key, end) < 0) +} + +const ( + defaultBTreeDegree = 64 +) + +type regionTree struct { + tree *btree.BTree +} + +func newRegionTree() *regionTree { + return ®ionTree{ + tree: btree.New(defaultBTreeDegree), + } +} + +func (t *regionTree) length() int { + return t.tree.Len() +} + +// getOverlaps gets the regions which are overlapped with the specified region range. +func (t *regionTree) getOverlaps(region *RegionInfo) []*RegionInfo { + item := ®ionItem{region: region} + + // note that find() gets the last item that is less or equal than the region. + // in the case: |_______a_______|_____b_____|___c___| + // new region is |______d______| + // find() will return regionItem of region_a + // and both startKey of region_a and region_b are less than endKey of region_d, + // thus they are regarded as overlapped regions. + result := t.find(region) + if result == nil { + result = item + } + + var overlaps []*RegionInfo + t.tree.AscendGreaterOrEqual(result, func(i btree.Item) bool { + over := i.(*regionItem) + if len(region.GetEndKey()) > 0 && bytes.Compare(region.GetEndKey(), over.region.GetStartKey()) <= 0 { + return false + } + overlaps = append(overlaps, over.region) + return true + }) + return overlaps +} + +// update updates the tree with the region. +// It finds and deletes all the overlapped regions first, and then +// insert the region. +func (t *regionTree) update(region *RegionInfo) []*RegionInfo { + overlaps := t.getOverlaps(region) + for _, item := range overlaps { + log.Debug("overlapping region", + zap.Uint64("region-id", item.GetID()), + zap.Stringer("delete-region", RegionToHexMeta(item.GetMeta())), + zap.Stringer("update-region", RegionToHexMeta(region.GetMeta()))) + t.tree.Delete(®ionItem{item}) + } + + t.tree.ReplaceOrInsert(®ionItem{region: region}) + + return overlaps +} + +// remove removes a region if the region is in the tree. +// It will do nothing if it cannot find the region or the found region +// is not the same with the region. +func (t *regionTree) remove(region *RegionInfo) { + if t.length() == 0 { + return + } + result := t.find(region) + if result == nil || result.region.GetID() != region.GetID() { + return + } + + t.tree.Delete(result) +} + +// search returns a region that contains the key. +func (t *regionTree) search(regionKey []byte) *RegionInfo { + region := &RegionInfo{meta: &metapb.Region{StartKey: regionKey}} + result := t.find(region) + if result == nil { + return nil + } + return result.region +} + +// searchPrev returns the previous region of the region where the regionKey is located. +func (t *regionTree) searchPrev(regionKey []byte) *RegionInfo { + curRegion := &RegionInfo{meta: &metapb.Region{StartKey: regionKey}} + curRegionItem := t.find(curRegion) + if curRegionItem == nil { + return nil + } + prevRegionItem, _ := t.getAdjacentRegions(curRegionItem.region) + if prevRegionItem == nil { + return nil + } + if !bytes.Equal(prevRegionItem.region.GetEndKey(), curRegionItem.region.GetStartKey()) { + return nil + } + return prevRegionItem.region +} + +// find is a helper function to find an item that contains the regions start +// key. +func (t *regionTree) find(region *RegionInfo) *regionItem { + item := ®ionItem{region: region} + + var result *regionItem + t.tree.DescendLessOrEqual(item, func(i btree.Item) bool { + result = i.(*regionItem) + return false + }) + + if result == nil || !result.Contains(region.GetStartKey()) { + return nil + } + + return result +} + +// scanRage scans from the first region containing or behind the start key +// until f return false +func (t *regionTree) scanRange(startKey []byte, f func(*RegionInfo) bool) { + region := &RegionInfo{meta: &metapb.Region{StartKey: startKey}} + // find if there is a region with key range [s, d), s < startKey < d + startItem := t.find(region) + if startItem == nil { + startItem = ®ionItem{region: &RegionInfo{meta: &metapb.Region{StartKey: startKey}}} + } + t.tree.AscendGreaterOrEqual(startItem, func(item btree.Item) bool { + return f(item.(*regionItem).region) + }) +} + +func (t *regionTree) getAdjacentRegions(region *RegionInfo) (*regionItem, *regionItem) { + item := ®ionItem{region: &RegionInfo{meta: &metapb.Region{StartKey: region.GetStartKey()}}} + var prev, next *regionItem + t.tree.AscendGreaterOrEqual(item, func(i btree.Item) bool { + if bytes.Equal(item.region.GetStartKey(), i.(*regionItem).region.GetStartKey()) { + return true + } + next = i.(*regionItem) + return false + }) + t.tree.DescendLessOrEqual(item, func(i btree.Item) bool { + if bytes.Equal(item.region.GetStartKey(), i.(*regionItem).region.GetStartKey()) { + return true + } + prev = i.(*regionItem) + return false + }) + return prev, next +} + +// RandomRegion is used to get a random region intersecting with the range [startKey, endKey). +func (t *regionTree) RandomRegion(startKey, endKey []byte) *RegionInfo { + if t.length() == 0 { + return nil + } + + var endIndex int + + startRegion, startIndex := t.tree.GetWithIndex(®ionItem{region: &RegionInfo{meta: &metapb.Region{StartKey: startKey}}}) + + if len(endKey) != 0 { + _, endIndex = t.tree.GetWithIndex(®ionItem{region: &RegionInfo{meta: &metapb.Region{StartKey: endKey}}}) + } else { + endIndex = t.tree.Len() + } + + // Consider that the item in the tree may not be continuous, + // we need to check if the previous item contains the key. + if startIndex != 0 && startRegion == nil && t.tree.GetAt(startIndex-1).(*regionItem).Contains(startKey) { + startIndex-- + } + + if endIndex <= startIndex { + log.Error("wrong keys", + zap.String("start-key", fmt.Sprintf("%s", HexRegionKey(startKey))), + zap.String("end-key", fmt.Sprintf("%s", HexRegionKey(startKey)))) + return nil + } + index := rand.Intn(endIndex-startIndex) + startIndex + return t.tree.GetAt(index).(*regionItem).region +} diff --git a/scheduler/server/core/region_tree_test.go b/scheduler/server/core/region_tree_test.go new file mode 100644 index 00000000..c7099966 --- /dev/null +++ b/scheduler/server/core/region_tree_test.go @@ -0,0 +1,370 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + . "github.com/pingcap/check" +) + +var _ = Suite(&testRegionSuite{}) + +type testRegionSuite struct{} + +func (s *testRegionSuite) TestRegionInfo(c *C) { + n := uint64(3) + + peers := make([]*metapb.Peer, 0, n) + for i := uint64(0); i < n; i++ { + p := &metapb.Peer{ + Id: i, + StoreId: i, + } + peers = append(peers, p) + } + region := &metapb.Region{ + Peers: peers, + } + pendingPeer := peers[1] + + info := NewRegionInfo( + region, + peers[0], + WithPendingPeers([]*metapb.Peer{pendingPeer})) + + r := info.Clone() + c.Assert(r, DeepEquals, info) + + for i := uint64(0); i < n; i++ { + c.Assert(r.GetPeer(i), Equals, r.meta.Peers[i]) + } + c.Assert(r.GetPeer(n), IsNil) + c.Assert(r.GetPendingPeer(n), IsNil) + c.Assert(r.GetPendingPeer(pendingPeer.GetId()), DeepEquals, pendingPeer) + + for i := uint64(0); i < n; i++ { + c.Assert(r.GetStorePeer(i).GetStoreId(), Equals, i) + } + c.Assert(r.GetStorePeer(n), IsNil) + + removePeer := &metapb.Peer{ + Id: n, + StoreId: n, + } + r = r.Clone(SetPeers(append(r.meta.Peers, removePeer))) + c.Assert(DiffRegionPeersInfo(info, r), Matches, "Add peer.*") + c.Assert(DiffRegionPeersInfo(r, info), Matches, "Remove peer.*") + c.Assert(r.GetStorePeer(n), DeepEquals, removePeer) + r = r.Clone(WithRemoveStorePeer(n)) + c.Assert(DiffRegionPeersInfo(r, info), Equals, "") + c.Assert(r.GetStorePeer(n), IsNil) + r = r.Clone(WithStartKey([]byte{0})) + c.Assert(DiffRegionKeyInfo(r, info), Matches, "StartKey Changed.*") + r = r.Clone(WithEndKey([]byte{1})) + c.Assert(DiffRegionKeyInfo(r, info), Matches, ".*EndKey Changed.*") + + stores := r.GetStoreIds() + c.Assert(stores, HasLen, int(n)) + for i := uint64(0); i < n; i++ { + _, ok := stores[i] + c.Assert(ok, IsTrue) + } + + followers := r.GetFollowers() + c.Assert(followers, HasLen, int(n-1)) + for i := uint64(1); i < n; i++ { + c.Assert(followers[peers[i].GetStoreId()], DeepEquals, peers[i]) + } +} + +func (s *testRegionSuite) TestRegionItem(c *C) { + item := newRegionItem([]byte("b"), []byte{}) + + c.Assert(item.Less(newRegionItem([]byte("a"), []byte{})), IsFalse) + c.Assert(item.Less(newRegionItem([]byte("b"), []byte{})), IsFalse) + c.Assert(item.Less(newRegionItem([]byte("c"), []byte{})), IsTrue) + + c.Assert(item.Contains([]byte("a")), IsFalse) + c.Assert(item.Contains([]byte("b")), IsTrue) + c.Assert(item.Contains([]byte("c")), IsTrue) + + item = newRegionItem([]byte("b"), []byte("d")) + c.Assert(item.Contains([]byte("a")), IsFalse) + c.Assert(item.Contains([]byte("b")), IsTrue) + c.Assert(item.Contains([]byte("c")), IsTrue) + c.Assert(item.Contains([]byte("d")), IsFalse) +} + +func (s *testRegionSuite) TestRegionTree(c *C) { + tree := newRegionTree() + + c.Assert(tree.search([]byte("a")), IsNil) + + regionA := NewTestRegionInfo([]byte("a"), []byte("b")) + regionB := NewTestRegionInfo([]byte("b"), []byte("c")) + regionC := NewTestRegionInfo([]byte("c"), []byte("d")) + regionD := NewTestRegionInfo([]byte("d"), []byte{}) + + tree.update(regionA) + tree.update(regionC) + c.Assert(tree.search([]byte{}), IsNil) + c.Assert(tree.search([]byte("a")), Equals, regionA) + c.Assert(tree.search([]byte("b")), IsNil) + c.Assert(tree.search([]byte("c")), Equals, regionC) + c.Assert(tree.search([]byte("d")), IsNil) + + // search previous region + c.Assert(tree.searchPrev([]byte("a")), IsNil) + c.Assert(tree.searchPrev([]byte("b")), IsNil) + c.Assert(tree.searchPrev([]byte("c")), IsNil) + + tree.update(regionB) + // search previous region + c.Assert(tree.searchPrev([]byte("c")), Equals, regionB) + c.Assert(tree.searchPrev([]byte("b")), Equals, regionA) + + tree.remove(regionC) + tree.update(regionD) + c.Assert(tree.search([]byte{}), IsNil) + c.Assert(tree.search([]byte("a")), Equals, regionA) + c.Assert(tree.search([]byte("b")), Equals, regionB) + c.Assert(tree.search([]byte("c")), IsNil) + c.Assert(tree.search([]byte("d")), Equals, regionD) + + // check get adjacent regions + prev, next := tree.getAdjacentRegions(regionA) + c.Assert(prev, IsNil) + c.Assert(next.region, Equals, regionB) + prev, next = tree.getAdjacentRegions(regionB) + c.Assert(prev.region, Equals, regionA) + c.Assert(next.region, Equals, regionD) + prev, next = tree.getAdjacentRegions(regionC) + c.Assert(prev.region, Equals, regionB) + c.Assert(next.region, Equals, regionD) + prev, next = tree.getAdjacentRegions(regionD) + c.Assert(prev.region, Equals, regionB) + c.Assert(next, IsNil) + + // region with the same range and different region id will not be delete. + region0 := newRegionItem([]byte{}, []byte("a")).region + tree.update(region0) + c.Assert(tree.search([]byte{}), Equals, region0) + anotherRegion0 := newRegionItem([]byte{}, []byte("a")).region + anotherRegion0.meta.Id = 123 + tree.remove(anotherRegion0) + c.Assert(tree.search([]byte{}), Equals, region0) + + // overlaps with 0, A, B, C. + region0D := newRegionItem([]byte(""), []byte("d")).region + tree.update(region0D) + c.Assert(tree.search([]byte{}), Equals, region0D) + c.Assert(tree.search([]byte("a")), Equals, region0D) + c.Assert(tree.search([]byte("b")), Equals, region0D) + c.Assert(tree.search([]byte("c")), Equals, region0D) + c.Assert(tree.search([]byte("d")), Equals, regionD) + + // overlaps with D. + regionE := newRegionItem([]byte("e"), []byte{}).region + tree.update(regionE) + c.Assert(tree.search([]byte{}), Equals, region0D) + c.Assert(tree.search([]byte("a")), Equals, region0D) + c.Assert(tree.search([]byte("b")), Equals, region0D) + c.Assert(tree.search([]byte("c")), Equals, region0D) + c.Assert(tree.search([]byte("d")), IsNil) + c.Assert(tree.search([]byte("e")), Equals, regionE) +} + +func updateRegions(c *C, tree *regionTree, regions []*RegionInfo) { + for _, region := range regions { + tree.update(region) + c.Assert(tree.search(region.GetStartKey()), Equals, region) + if len(region.GetEndKey()) > 0 { + end := region.GetEndKey()[0] + c.Assert(tree.search([]byte{end - 1}), Equals, region) + c.Assert(tree.search([]byte{end + 1}), Not(Equals), region) + } + } +} + +func (s *testRegionSuite) TestRegionTreeSplitAndMerge(c *C) { + tree := newRegionTree() + regions := []*RegionInfo{newRegionItem([]byte{}, []byte{}).region} + + // Byte will underflow/overflow if n > 7. + n := 7 + + // Split. + for i := 0; i < n; i++ { + regions = SplitRegions(regions) + updateRegions(c, tree, regions) + } + + // Merge. + for i := 0; i < n; i++ { + regions = MergeRegions(regions) + updateRegions(c, tree, regions) + } + + // Split twice and merge once. + for i := 0; i < n*2; i++ { + if (i+1)%3 == 0 { + regions = MergeRegions(regions) + } else { + regions = SplitRegions(regions) + } + updateRegions(c, tree, regions) + } +} + +func (s *testRegionSuite) TestRandomRegion(c *C) { + tree := newRegionTree() + r := tree.RandomRegion([]byte(""), []byte("")) + c.Assert(r, IsNil) + + regionA := NewTestRegionInfo([]byte(""), []byte("g")) + tree.update(regionA) + ra := tree.RandomRegion([]byte(""), []byte("")) + c.Assert(ra, DeepEquals, regionA) + + regionB := NewTestRegionInfo([]byte("g"), []byte("n")) + regionC := NewTestRegionInfo([]byte("n"), []byte("t")) + regionD := NewTestRegionInfo([]byte("t"), []byte("")) + tree.update(regionB) + tree.update(regionC) + tree.update(regionD) + + rb := tree.RandomRegion([]byte("g"), []byte("n")) + c.Assert(rb, DeepEquals, regionB) + rc := tree.RandomRegion([]byte("n"), []byte("t")) + c.Assert(rc, DeepEquals, regionC) + rd := tree.RandomRegion([]byte("t"), []byte("")) + c.Assert(rd, DeepEquals, regionD) + + re := tree.RandomRegion([]byte("a"), []byte("a")) + c.Assert(re, DeepEquals, regionA) + re = tree.RandomRegion([]byte("o"), []byte("s")) + c.Assert(re, DeepEquals, regionC) + re = tree.RandomRegion([]byte(""), []byte("a")) + c.Assert(re, DeepEquals, regionA) + re = tree.RandomRegion([]byte("z"), []byte("")) + c.Assert(re, DeepEquals, regionD) + + checkRandomRegion(c, tree, []*RegionInfo{regionA, regionB, regionC, regionD}, []byte(""), []byte("")) + checkRandomRegion(c, tree, []*RegionInfo{regionA, regionB}, []byte(""), []byte("n")) + checkRandomRegion(c, tree, []*RegionInfo{regionC, regionD}, []byte("n"), []byte("")) + checkRandomRegion(c, tree, []*RegionInfo{regionB, regionC}, []byte("h"), []byte("s")) + checkRandomRegion(c, tree, []*RegionInfo{regionA, regionB, regionC, regionD}, []byte("a"), []byte("z")) +} + +func (s *testRegionSuite) TestRandomRegionDiscontinuous(c *C) { + tree := newRegionTree() + r := tree.RandomRegion([]byte("c"), []byte("f")) + c.Assert(r, IsNil) + + // test for single region + regionA := NewTestRegionInfo([]byte("c"), []byte("f")) + tree.update(regionA) + ra := tree.RandomRegion([]byte("c"), []byte("e")) + c.Assert(ra, DeepEquals, regionA) + ra = tree.RandomRegion([]byte("c"), []byte("f")) + c.Assert(ra, DeepEquals, regionA) + ra = tree.RandomRegion([]byte("c"), []byte("g")) + c.Assert(ra, DeepEquals, regionA) + ra = tree.RandomRegion([]byte("a"), []byte("e")) + c.Assert(ra, DeepEquals, regionA) + ra = tree.RandomRegion([]byte("a"), []byte("f")) + c.Assert(ra, DeepEquals, regionA) + ra = tree.RandomRegion([]byte("a"), []byte("g")) + c.Assert(ra, DeepEquals, regionA) + + regionB := NewTestRegionInfo([]byte("n"), []byte("x")) + tree.update(regionB) + rb := tree.RandomRegion([]byte("g"), []byte("x")) + c.Assert(rb, DeepEquals, regionB) + rb = tree.RandomRegion([]byte("g"), []byte("y")) + c.Assert(rb, DeepEquals, regionB) + rb = tree.RandomRegion([]byte("n"), []byte("y")) + c.Assert(rb, DeepEquals, regionB) + rb = tree.RandomRegion([]byte("o"), []byte("y")) + c.Assert(rb, DeepEquals, regionB) + + regionC := NewTestRegionInfo([]byte("z"), []byte("")) + tree.update(regionC) + rc := tree.RandomRegion([]byte("y"), []byte("")) + c.Assert(rc, DeepEquals, regionC) + regionD := NewTestRegionInfo([]byte(""), []byte("a")) + tree.update(regionD) + rd := tree.RandomRegion([]byte(""), []byte("b")) + c.Assert(rd, DeepEquals, regionD) + + checkRandomRegion(c, tree, []*RegionInfo{regionA, regionB, regionC, regionD}, []byte(""), []byte("")) +} + +func checkRandomRegion(c *C, tree *regionTree, regions []*RegionInfo, startKey, endKey []byte) { + keys := make(map[string]struct{}) + for i := 0; i < 10000 && len(keys) < len(regions); i++ { + re := tree.RandomRegion(startKey, endKey) + c.Assert(re, NotNil) + k := string(re.GetStartKey()) + if _, ok := keys[k]; !ok { + keys[k] = struct{}{} + } + } + for _, region := range regions { + _, ok := keys[string(region.GetStartKey())] + c.Assert(ok, IsTrue) + } + c.Assert(keys, HasLen, len(regions)) +} + +func newRegionItem(start, end []byte) *regionItem { + return ®ionItem{region: NewTestRegionInfo(start, end)} +} + +func BenchmarkRegionTreeUpdate(b *testing.B) { + tree := newRegionTree() + for i := 0; i < b.N; i++ { + item := &RegionInfo{meta: &metapb.Region{StartKey: []byte(fmt.Sprintf("%20d", i)), EndKey: []byte(fmt.Sprintf("%20d", i+1))}} + tree.update(item) + } +} + +const MaxKey = 10000000 + +func BenchmarkRegionTreeUpdateUnordered(b *testing.B) { + tree := newRegionTree() + var items []*RegionInfo + for i := 0; i < MaxKey; i++ { + var startKey, endKey int + key1 := rand.Intn(MaxKey) + key2 := rand.Intn(MaxKey) + if key1 < key2 { + startKey = key1 + endKey = key2 + } else { + startKey = key2 + endKey = key1 + } + items = append(items, &RegionInfo{meta: &metapb.Region{StartKey: []byte(fmt.Sprintf("%20d", startKey)), EndKey: []byte(fmt.Sprintf("%20d", endKey))}}) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + tree.update(items[i]) + } +} diff --git a/scheduler/server/core/storage.go b/scheduler/server/core/storage.go new file mode 100644 index 00000000..86a1f8a4 --- /dev/null +++ b/scheduler/server/core/storage.go @@ -0,0 +1,239 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + "math" + "path" + "strconv" + "strings" + + "github.com/gogo/protobuf/proto" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/scheduler/server/kv" + "github.com/pkg/errors" + "go.etcd.io/etcd/clientv3" +) + +const ( + clusterPath = "raft" + schedulePath = "schedule" + gcPath = "gc" + + customScheduleConfigPath = "scheduler_config" +) + +const ( + maxKVRangeLimit = 10000 + minKVRangeLimit = 100 +) + +// Storage wraps all kv operations, keep it stateless. +type Storage struct { + kv.Base +} + +// NewStorage creates Storage instance with Base. +func NewStorage(base kv.Base) *Storage { + return &Storage{ + Base: base, + } +} + +func (s *Storage) storePath(storeID uint64) string { + return path.Join(clusterPath, "s", fmt.Sprintf("%020d", storeID)) +} + +func regionPath(regionID uint64) string { + return path.Join(clusterPath, "r", fmt.Sprintf("%020d", regionID)) +} + +// ClusterStatePath returns the path to save an option. +func (s *Storage) ClusterStatePath(option string) string { + return path.Join(clusterPath, "status", option) +} + +func (s *Storage) storeLeaderWeightPath(storeID uint64) string { + return path.Join(schedulePath, "store_weight", fmt.Sprintf("%020d", storeID), "leader") +} + +func (s *Storage) storeRegionWeightPath(storeID uint64) string { + return path.Join(schedulePath, "store_weight", fmt.Sprintf("%020d", storeID), "region") +} + +// SaveScheduleConfig saves the config of scheduler. +func (s *Storage) SaveScheduleConfig(scheduleName string, data []byte) error { + configPath := path.Join(customScheduleConfigPath, scheduleName) + return s.Save(configPath, string(data)) +} + +// RemoveScheduleConfig remvoes the config of scheduler. +func (s *Storage) RemoveScheduleConfig(scheduleName string) error { + configPath := path.Join(customScheduleConfigPath, scheduleName) + return s.Remove(configPath) +} + +// LoadScheduleConfig loads the config of scheduler. +func (s *Storage) LoadScheduleConfig(scheduleName string) (string, error) { + configPath := path.Join(customScheduleConfigPath, scheduleName) + return s.Load(configPath) +} + +// LoadMeta loads cluster meta from storage. +func (s *Storage) LoadMeta(meta *metapb.Cluster) (bool, error) { + return loadProto(s.Base, clusterPath, meta) +} + +// SaveMeta save cluster meta to storage. +func (s *Storage) SaveMeta(meta *metapb.Cluster) error { + return saveProto(s.Base, clusterPath, meta) +} + +// LoadStore loads one store from storage. +func (s *Storage) LoadStore(storeID uint64, store *metapb.Store) (bool, error) { + return loadProto(s.Base, s.storePath(storeID), store) +} + +// SaveStore saves one store to storage. +func (s *Storage) SaveStore(store *metapb.Store) error { + return saveProto(s.Base, s.storePath(store.GetId()), store) +} + +// DeleteStore deletes one store from storage. +func (s *Storage) DeleteStore(store *metapb.Store) error { + return s.Remove(s.storePath(store.GetId())) +} + +// LoadStores loads all stores from storage to StoresInfo. +func (s *Storage) LoadStores(f func(store *StoreInfo)) error { + nextID := uint64(0) + endKey := s.storePath(math.MaxUint64) + for { + key := s.storePath(nextID) + _, res, err := s.LoadRange(key, endKey, minKVRangeLimit) + if err != nil { + return err + } + for _, str := range res { + store := &metapb.Store{} + if err := store.Unmarshal([]byte(str)); err != nil { + return errors.WithStack(err) + } + leaderWeight, err := s.loadFloatWithDefaultValue(s.storeLeaderWeightPath(store.GetId()), 1.0) + if err != nil { + return err + } + regionWeight, err := s.loadFloatWithDefaultValue(s.storeRegionWeightPath(store.GetId()), 1.0) + if err != nil { + return err + } + newStoreInfo := NewStoreInfo(store, SetLeaderWeight(leaderWeight), SetRegionWeight(regionWeight)) + + nextID = store.GetId() + 1 + f(newStoreInfo) + } + if len(res) < minKVRangeLimit { + return nil + } + } +} + +// SaveStoreWeight saves a store's leader and region weight to storage. +func (s *Storage) SaveStoreWeight(storeID uint64, leader, region float64) error { + leaderValue := strconv.FormatFloat(leader, 'f', -1, 64) + if err := s.Save(s.storeLeaderWeightPath(storeID), leaderValue); err != nil { + return err + } + regionValue := strconv.FormatFloat(region, 'f', -1, 64) + return s.Save(s.storeRegionWeightPath(storeID), regionValue) +} + +func (s *Storage) loadFloatWithDefaultValue(path string, def float64) (float64, error) { + res, err := s.Load(path) + if err != nil { + return 0, err + } + if res == "" { + return def, nil + } + val, err := strconv.ParseFloat(res, 64) + if err != nil { + return 0, errors.WithStack(err) + } + return val, nil +} + +// Flush flushes the dirty region to storage. +func (s *Storage) Flush() error { + return nil +} + +// Close closes the s. +func (s *Storage) Close() error { + return nil +} + +// SaveGCSafePoint saves new GC safe point to storage. +func (s *Storage) SaveGCSafePoint(safePoint uint64) error { + key := path.Join(gcPath, "safe_point") + value := strconv.FormatUint(safePoint, 16) + return s.Save(key, value) +} + +// LoadGCSafePoint loads current GC safe point from storage. +func (s *Storage) LoadGCSafePoint() (uint64, error) { + key := path.Join(gcPath, "safe_point") + value, err := s.Load(key) + if err != nil { + return 0, err + } + if value == "" { + return 0, nil + } + safePoint, err := strconv.ParseUint(value, 16, 64) + if err != nil { + return 0, err + } + return safePoint, nil +} + +// LoadAllScheduleConfig loads all schedulers' config. +func (s *Storage) LoadAllScheduleConfig() ([]string, []string, error) { + keys, values, err := s.LoadRange(customScheduleConfigPath, clientv3.GetPrefixRangeEnd(customScheduleConfigPath), 1000) + for i, key := range keys { + keys[i] = strings.TrimPrefix(key, customScheduleConfigPath+"/") + } + return keys, values, err +} + +func loadProto(s kv.Base, key string, msg proto.Message) (bool, error) { + value, err := s.Load(key) + if err != nil { + return false, err + } + if value == "" { + return false, nil + } + err = proto.Unmarshal([]byte(value), msg) + return true, errors.WithStack(err) +} + +func saveProto(s kv.Base, key string, msg proto.Message) error { + value, err := proto.Marshal(msg) + if err != nil { + return errors.WithStack(err) + } + return s.Save(key, string(value)) +} diff --git a/scheduler/server/core/storage_test.go b/scheduler/server/core/storage_test.go new file mode 100644 index 00000000..034ef478 --- /dev/null +++ b/scheduler/server/core/storage_test.go @@ -0,0 +1,117 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "math" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/scheduler/server/kv" + . "github.com/pingcap/check" +) + +var _ = Suite(&testKVSuite{}) + +type testKVSuite struct { +} + +func (s *testKVSuite) TestBasic(c *C) { + storage := NewStorage(kv.NewMemoryKV()) + + c.Assert(storage.storePath(123), Equals, "raft/s/00000000000000000123") + c.Assert(regionPath(123), Equals, "raft/r/00000000000000000123") + + meta := &metapb.Cluster{Id: 123} + ok, err := storage.LoadMeta(meta) + c.Assert(ok, IsFalse) + c.Assert(err, IsNil) + c.Assert(storage.SaveMeta(meta), IsNil) + newMeta := &metapb.Cluster{} + ok, err = storage.LoadMeta(newMeta) + c.Assert(ok, IsTrue) + c.Assert(err, IsNil) + c.Assert(newMeta, DeepEquals, meta) + + store := &metapb.Store{Id: 123} + ok, err = storage.LoadStore(123, store) + c.Assert(ok, IsFalse) + c.Assert(err, IsNil) + c.Assert(storage.SaveStore(store), IsNil) + newStore := &metapb.Store{} + ok, err = storage.LoadStore(123, newStore) + c.Assert(ok, IsTrue) + c.Assert(err, IsNil) + c.Assert(newStore, DeepEquals, store) +} + +func mustSaveStores(c *C, s *Storage, n int) []*metapb.Store { + stores := make([]*metapb.Store, 0, n) + for i := 0; i < n; i++ { + store := &metapb.Store{Id: uint64(i)} + stores = append(stores, store) + } + + for _, store := range stores { + c.Assert(s.SaveStore(store), IsNil) + } + + return stores +} + +func (s *testKVSuite) TestLoadStores(c *C) { + storage := NewStorage(kv.NewMemoryKV()) + cache := NewStoresInfo() + + n := 10 + stores := mustSaveStores(c, storage, n) + c.Assert(storage.LoadStores(cache.SetStore), IsNil) + + c.Assert(cache.GetStoreCount(), Equals, n) + for _, store := range cache.GetMetaStores() { + c.Assert(store, DeepEquals, stores[store.GetId()]) + } +} + +func (s *testKVSuite) TestStoreWeight(c *C) { + storage := NewStorage(kv.NewMemoryKV()) + cache := NewStoresInfo() + const n = 3 + + mustSaveStores(c, storage, n) + c.Assert(storage.SaveStoreWeight(1, 2.0, 3.0), IsNil) + c.Assert(storage.SaveStoreWeight(2, 0.2, 0.3), IsNil) + c.Assert(storage.LoadStores(cache.SetStore), IsNil) + leaderWeights := []float64{1.0, 2.0, 0.2} + regionWeights := []float64{1.0, 3.0, 0.3} + for i := 0; i < n; i++ { + c.Assert(cache.GetStore(uint64(i)).GetLeaderWeight(), Equals, leaderWeights[i]) + c.Assert(cache.GetStore(uint64(i)).GetRegionWeight(), Equals, regionWeights[i]) + } +} + +func (s *testKVSuite) TestLoadGCSafePoint(c *C) { + storage := NewStorage(kv.NewMemoryKV()) + testData := []uint64{0, 1, 2, 233, 2333, 23333333333, math.MaxUint64} + + r, e := storage.LoadGCSafePoint() + c.Assert(r, Equals, uint64(0)) + c.Assert(e, IsNil) + for _, safePoint := range testData { + err := storage.SaveGCSafePoint(safePoint) + c.Assert(err, IsNil) + safePoint1, err := storage.LoadGCSafePoint() + c.Assert(err, IsNil) + c.Assert(safePoint, Equals, safePoint1) + } +} diff --git a/scheduler/server/core/store.go b/scheduler/server/core/store.go new file mode 100644 index 00000000..0d0d4e7e --- /dev/null +++ b/scheduler/server/core/store.go @@ -0,0 +1,494 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + "math" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap/errcode" + "github.com/pingcap/log" + "go.uber.org/zap" +) + +// StoreInfo contains information about a store. +type StoreInfo struct { + meta *metapb.Store + stats *schedulerpb.StoreStats + // Blocked means that the store is blocked from balance. + blocked bool + leaderCount int + regionCount int + leaderSize int64 + regionSize int64 + pendingPeerCount int + lastHeartbeatTS time.Time + leaderWeight float64 + regionWeight float64 + available func() bool +} + +// NewStoreInfo creates StoreInfo with meta data. +func NewStoreInfo(store *metapb.Store, opts ...StoreCreateOption) *StoreInfo { + storeInfo := &StoreInfo{ + meta: store, + stats: &schedulerpb.StoreStats{}, + leaderWeight: 1.0, + regionWeight: 1.0, + } + for _, opt := range opts { + opt(storeInfo) + } + return storeInfo +} + +// Clone creates a copy of current StoreInfo. +func (s *StoreInfo) Clone(opts ...StoreCreateOption) *StoreInfo { + meta := proto.Clone(s.meta).(*metapb.Store) + store := &StoreInfo{ + meta: meta, + stats: s.stats, + blocked: s.blocked, + leaderCount: s.leaderCount, + regionCount: s.regionCount, + leaderSize: s.leaderSize, + regionSize: s.regionSize, + pendingPeerCount: s.pendingPeerCount, + lastHeartbeatTS: s.lastHeartbeatTS, + leaderWeight: s.leaderWeight, + regionWeight: s.regionWeight, + available: s.available, + } + + for _, opt := range opts { + opt(store) + } + return store +} + +// IsBlocked returns if the store is blocked. +func (s *StoreInfo) IsBlocked() bool { + return s.blocked +} + +// IsAvailable returns if the store bucket of limitation is available +func (s *StoreInfo) IsAvailable() bool { + if s.available == nil { + return true + } + return s.available() +} + +// IsUp checks if the store's state is Up. +func (s *StoreInfo) IsUp() bool { + return s.GetState() == metapb.StoreState_Up +} + +// IsOffline checks if the store's state is Offline. +func (s *StoreInfo) IsOffline() bool { + return s.GetState() == metapb.StoreState_Offline +} + +// IsTombstone checks if the store's state is Tombstone. +func (s *StoreInfo) IsTombstone() bool { + return s.GetState() == metapb.StoreState_Tombstone +} + +// DownTime returns the time elapsed since last heartbeat. +func (s *StoreInfo) DownTime() time.Duration { + return time.Since(s.GetLastHeartbeatTS()) +} + +// GetMeta returns the meta information of the store. +func (s *StoreInfo) GetMeta() *metapb.Store { + return s.meta +} + +// GetState returns the state of the store. +func (s *StoreInfo) GetState() metapb.StoreState { + return s.meta.GetState() +} + +// GetAddress returns the address of the store. +func (s *StoreInfo) GetAddress() string { + return s.meta.GetAddress() +} + +// GetID returns the ID of the store. +func (s *StoreInfo) GetID() uint64 { + return s.meta.GetId() +} + +// GetStoreStats returns the statistics information of the store. +func (s *StoreInfo) GetStoreStats() *schedulerpb.StoreStats { + return s.stats +} + +// GetCapacity returns the capacity size of the store. +func (s *StoreInfo) GetCapacity() uint64 { + return s.stats.GetCapacity() +} + +// GetAvailable returns the available size of the store. +func (s *StoreInfo) GetAvailable() uint64 { + return s.stats.GetAvailable() +} + +// GetUsedSize returns the used size of the store. +func (s *StoreInfo) GetUsedSize() uint64 { + return s.stats.GetUsedSize() +} + +// IsBusy returns if the store is busy. +func (s *StoreInfo) IsBusy() bool { + return s.stats.GetIsBusy() +} + +// GetSendingSnapCount returns the current sending snapshot count of the store. +func (s *StoreInfo) GetSendingSnapCount() uint32 { + return s.stats.GetSendingSnapCount() +} + +// GetReceivingSnapCount returns the current receiving snapshot count of the store. +func (s *StoreInfo) GetReceivingSnapCount() uint32 { + return s.stats.GetReceivingSnapCount() +} + +// GetApplyingSnapCount returns the current applying snapshot count of the store. +func (s *StoreInfo) GetApplyingSnapCount() uint32 { + return s.stats.GetApplyingSnapCount() +} + +// GetStartTime returns the start time of the store. +func (s *StoreInfo) GetStartTime() uint32 { + return s.stats.GetStartTime() +} + +// GetLeaderCount returns the leader count of the store. +func (s *StoreInfo) GetLeaderCount() int { + return s.leaderCount +} + +// GetRegionCount returns the Region count of the store. +func (s *StoreInfo) GetRegionCount() int { + return s.regionCount +} + +// GetLeaderSize returns the leader size of the store. +func (s *StoreInfo) GetLeaderSize() int64 { + return s.leaderSize +} + +// GetRegionSize returns the Region size of the store. +func (s *StoreInfo) GetRegionSize() int64 { + return s.regionSize +} + +// GetPendingPeerCount returns the pending peer count of the store. +func (s *StoreInfo) GetPendingPeerCount() int { + return s.pendingPeerCount +} + +// GetLeaderWeight returns the leader weight of the store. +func (s *StoreInfo) GetLeaderWeight() float64 { + return s.leaderWeight +} + +// GetRegionWeight returns the Region weight of the store. +func (s *StoreInfo) GetRegionWeight() float64 { + return s.regionWeight +} + +// GetLastHeartbeatTS returns the last heartbeat timestamp of the store. +func (s *StoreInfo) GetLastHeartbeatTS() time.Time { + return s.lastHeartbeatTS +} + +const minWeight = 1e-6 + +// LeaderScore returns the store's leader score. +func (s *StoreInfo) LeaderScore(delta int64) float64 { + return float64(int64(s.GetLeaderCount())+delta) / math.Max(s.GetLeaderWeight(), minWeight) +} + +// RegionScore returns the store's region score. +func (s *StoreInfo) RegionScore() float64 { + return float64(s.GetRegionSize()) / math.Max(s.GetRegionWeight(), minWeight) +} + +// StorageSize returns store's used storage size reported from tikv. +func (s *StoreInfo) StorageSize() uint64 { + return s.GetUsedSize() +} + +// AvailableRatio is store's freeSpace/capacity. +func (s *StoreInfo) AvailableRatio() float64 { + if s.GetCapacity() == 0 { + return 0 + } + return float64(s.GetAvailable()) / float64(s.GetCapacity()) +} + +// IsLowSpace checks if the store is lack of space. +func (s *StoreInfo) IsLowSpace(lowSpaceRatio float64) bool { + return s.GetStoreStats() != nil && s.AvailableRatio() < 1-lowSpaceRatio +} + +// ResourceCount returns count of leader/region in the store. +func (s *StoreInfo) ResourceCount(kind ResourceKind) uint64 { + switch kind { + case LeaderKind: + return uint64(s.GetLeaderCount()) + case RegionKind: + return uint64(s.GetRegionCount()) + default: + return 0 + } +} + +// ResourceSize returns size of leader/region in the store +func (s *StoreInfo) ResourceSize(kind ResourceKind) int64 { + switch kind { + case LeaderKind: + return s.GetLeaderSize() + case RegionKind: + return s.GetRegionSize() + default: + return 0 + } +} + +// ResourceScore returns score of leader/region in the store. +func (s *StoreInfo) ResourceScore(scheduleKind ScheduleKind, delta int64) float64 { + switch scheduleKind.Resource { + case LeaderKind: + return s.LeaderScore(delta) + case RegionKind: + return s.RegionScore() + default: + return 0 + } +} + +// ResourceWeight returns weight of leader/region in the score +func (s *StoreInfo) ResourceWeight(kind ResourceKind) float64 { + switch kind { + case LeaderKind: + leaderWeight := s.GetLeaderWeight() + if leaderWeight <= 0 { + return minWeight + } + return leaderWeight + case RegionKind: + regionWeight := s.GetRegionWeight() + if regionWeight <= 0 { + return minWeight + } + return regionWeight + default: + return 0 + } +} + +// GetStartTS returns the start timestamp. +func (s *StoreInfo) GetStartTS() time.Time { + return time.Unix(int64(s.GetStartTime()), 0) +} + +// GetUptime returns the uptime. +func (s *StoreInfo) GetUptime() time.Duration { + uptime := s.GetLastHeartbeatTS().Sub(s.GetStartTS()) + if uptime > 0 { + return uptime + } + return 0 +} + +var ( + // If a store's last heartbeat is storeDisconnectDuration ago, the store will + // be marked as disconnected state. The value should be greater than tikv's + // store heartbeat interval (default 10s). + storeDisconnectDuration = 20 * time.Second + storeUnhealthDuration = 10 * time.Minute +) + +// IsDisconnected checks if a store is disconnected, which means PD misses +// tikv's store heartbeat for a short time, maybe caused by process restart or +// temporary network failure. +func (s *StoreInfo) IsDisconnected() bool { + return s.DownTime() > storeDisconnectDuration +} + +// IsUnhealth checks if a store is unhealth. +func (s *StoreInfo) IsUnhealth() bool { + return s.DownTime() > storeUnhealthDuration +} + +type storeNotFoundErr struct { + storeID uint64 +} + +func (e storeNotFoundErr) Error() string { + return fmt.Sprintf("store %v not found", e.storeID) +} + +// NewStoreNotFoundErr is for log of store not found +func NewStoreNotFoundErr(storeID uint64) errcode.ErrorCode { + return errcode.NewNotFoundErr(storeNotFoundErr{storeID}) +} + +// StoresInfo contains information about all stores. +type StoresInfo struct { + stores map[uint64]*StoreInfo +} + +// NewStoresInfo create a StoresInfo with map of storeID to StoreInfo +func NewStoresInfo() *StoresInfo { + return &StoresInfo{ + stores: make(map[uint64]*StoreInfo), + } +} + +// GetStore returns a copy of the StoreInfo with the specified storeID. +func (s *StoresInfo) GetStore(storeID uint64) *StoreInfo { + store, ok := s.stores[storeID] + if !ok { + return nil + } + return store +} + +// TakeStore returns the point of the origin StoreInfo with the specified storeID. +func (s *StoresInfo) TakeStore(storeID uint64) *StoreInfo { + store, ok := s.stores[storeID] + if !ok { + return nil + } + return store +} + +// SetStore sets a StoreInfo with storeID. +func (s *StoresInfo) SetStore(store *StoreInfo) { + s.stores[store.GetID()] = store +} + +// BlockStore blocks a StoreInfo with storeID. +func (s *StoresInfo) BlockStore(storeID uint64) errcode.ErrorCode { + op := errcode.Op("store.block") + store, ok := s.stores[storeID] + if !ok { + return op.AddTo(NewStoreNotFoundErr(storeID)) + } + if store.IsBlocked() { + return op.AddTo(StoreBlockedErr{StoreID: storeID}) + } + s.stores[storeID] = store.Clone(SetStoreBlock()) + return nil +} + +// UnblockStore unblocks a StoreInfo with storeID. +func (s *StoresInfo) UnblockStore(storeID uint64) { + store, ok := s.stores[storeID] + if !ok { + log.Fatal("store is unblocked, but it is not found", + zap.Uint64("store-id", storeID)) + } + s.stores[storeID] = store.Clone(SetStoreUnBlock()) +} + +// AttachAvailableFunc attaches f to a specific store. +func (s *StoresInfo) AttachAvailableFunc(storeID uint64, f func() bool) { + if store, ok := s.stores[storeID]; ok { + s.stores[storeID] = store.Clone(SetAvailableFunc(f)) + } +} + +// GetStores gets a complete set of StoreInfo. +func (s *StoresInfo) GetStores() []*StoreInfo { + stores := make([]*StoreInfo, 0, len(s.stores)) + for _, store := range s.stores { + stores = append(stores, store) + } + return stores +} + +// GetMetaStores gets a complete set of metapb.Store. +func (s *StoresInfo) GetMetaStores() []*metapb.Store { + stores := make([]*metapb.Store, 0, len(s.stores)) + for _, store := range s.stores { + stores = append(stores, store.GetMeta()) + } + return stores +} + +// DeleteStore deletes tombstone record form store +func (s *StoresInfo) DeleteStore(store *StoreInfo) { + delete(s.stores, store.GetID()) +} + +// GetStoreCount returns the total count of storeInfo. +func (s *StoresInfo) GetStoreCount() int { + return len(s.stores) +} + +// SetLeaderCount sets the leader count to a storeInfo. +func (s *StoresInfo) SetLeaderCount(storeID uint64, leaderCount int) { + if store, ok := s.stores[storeID]; ok { + s.stores[storeID] = store.Clone(SetLeaderCount(leaderCount)) + } +} + +// SetRegionCount sets the region count to a storeInfo. +func (s *StoresInfo) SetRegionCount(storeID uint64, regionCount int) { + if store, ok := s.stores[storeID]; ok { + s.stores[storeID] = store.Clone(SetRegionCount(regionCount)) + } +} + +// SetPendingPeerCount sets the pending count to a storeInfo. +func (s *StoresInfo) SetPendingPeerCount(storeID uint64, pendingPeerCount int) { + if store, ok := s.stores[storeID]; ok { + s.stores[storeID] = store.Clone(SetPendingPeerCount(pendingPeerCount)) + } +} + +// SetLeaderSize sets the leader size to a storeInfo. +func (s *StoresInfo) SetLeaderSize(storeID uint64, leaderSize int64) { + if store, ok := s.stores[storeID]; ok { + s.stores[storeID] = store.Clone(SetLeaderSize(leaderSize)) + } +} + +// SetRegionSize sets the region size to a storeInfo. +func (s *StoresInfo) SetRegionSize(storeID uint64, regionSize int64) { + if store, ok := s.stores[storeID]; ok { + s.stores[storeID] = store.Clone(SetRegionSize(regionSize)) + } +} + +// UpdateStoreStatus updates the information of the store. +func (s *StoresInfo) UpdateStoreStatus(storeID uint64, leaderCount int, regionCount int, pendingPeerCount int, leaderSize int64, regionSize int64) { + if store, ok := s.stores[storeID]; ok { + newStore := store.Clone(SetLeaderCount(leaderCount), + SetRegionCount(regionCount), + SetPendingPeerCount(pendingPeerCount), + SetLeaderSize(leaderSize), + SetRegionSize(regionSize)) + s.SetStore(newStore) + } +} diff --git a/scheduler/server/core/store_option.go b/scheduler/server/core/store_option.go new file mode 100644 index 00000000..90c49056 --- /dev/null +++ b/scheduler/server/core/store_option.go @@ -0,0 +1,127 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "time" + + "github.com/gogo/protobuf/proto" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" +) + +// StoreCreateOption is used to create store. +type StoreCreateOption func(region *StoreInfo) + +// SetStoreAddress sets the address for the store. +func SetStoreAddress(address string) StoreCreateOption { + return func(store *StoreInfo) { + meta := proto.Clone(store.meta).(*metapb.Store) + meta.Address = address + store.meta = meta + } +} + +// SetStoreState sets the state for the store. +func SetStoreState(state metapb.StoreState) StoreCreateOption { + return func(store *StoreInfo) { + meta := proto.Clone(store.meta).(*metapb.Store) + meta.State = state + store.meta = meta + } +} + +// SetStoreBlock stops balancer from selecting the store. +func SetStoreBlock() StoreCreateOption { + return func(store *StoreInfo) { + store.blocked = true + } +} + +// SetStoreUnBlock allows balancer to select the store. +func SetStoreUnBlock() StoreCreateOption { + return func(store *StoreInfo) { + store.blocked = false + } +} + +// SetLeaderCount sets the leader count for the store. +func SetLeaderCount(leaderCount int) StoreCreateOption { + return func(store *StoreInfo) { + store.leaderCount = leaderCount + } +} + +// SetRegionCount sets the Region count for the store. +func SetRegionCount(regionCount int) StoreCreateOption { + return func(store *StoreInfo) { + store.regionCount = regionCount + } +} + +// SetPendingPeerCount sets the pending peer count for the store. +func SetPendingPeerCount(pendingPeerCount int) StoreCreateOption { + return func(store *StoreInfo) { + store.pendingPeerCount = pendingPeerCount + } +} + +// SetLeaderSize sets the leader size for the store. +func SetLeaderSize(leaderSize int64) StoreCreateOption { + return func(store *StoreInfo) { + store.leaderSize = leaderSize + } +} + +// SetRegionSize sets the Region size for the store. +func SetRegionSize(regionSize int64) StoreCreateOption { + return func(store *StoreInfo) { + store.regionSize = regionSize + } +} + +// SetLeaderWeight sets the leader weight for the store. +func SetLeaderWeight(leaderWeight float64) StoreCreateOption { + return func(store *StoreInfo) { + store.leaderWeight = leaderWeight + } +} + +// SetRegionWeight sets the Region weight for the store. +func SetRegionWeight(regionWeight float64) StoreCreateOption { + return func(store *StoreInfo) { + store.regionWeight = regionWeight + } +} + +// SetLastHeartbeatTS sets the time of last heartbeat for the store. +func SetLastHeartbeatTS(lastHeartbeatTS time.Time) StoreCreateOption { + return func(store *StoreInfo) { + store.lastHeartbeatTS = lastHeartbeatTS + } +} + +// SetStoreStats sets the statistics information for the store. +func SetStoreStats(stats *schedulerpb.StoreStats) StoreCreateOption { + return func(store *StoreInfo) { + store.stats = stats + } +} + +// SetAvailableFunc sets a customize function for the store. The function f returns true if the store limit is not exceeded. +func SetAvailableFunc(f func() bool) StoreCreateOption { + return func(store *StoreInfo) { + store.available = f + } +} diff --git a/scheduler/server/core/store_test.go b/scheduler/server/core/store_test.go new file mode 100644 index 00000000..91cacfed --- /dev/null +++ b/scheduler/server/core/store_test.go @@ -0,0 +1,56 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "sync" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + . "github.com/pingcap/check" +) + +var _ = Suite(&testConcurrencySuite{}) + +type testConcurrencySuite struct{} + +func (s *testConcurrencySuite) TestCloneStore(c *C) { + meta := &metapb.Store{Id: 1, Address: "mock://tikv-1"} + store := NewStoreInfo(meta) + start := time.Now() + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + defer wg.Done() + for { + if time.Since(start) > time.Second { + break + } + store.GetMeta().GetState() + } + }() + go func() { + defer wg.Done() + for { + if time.Since(start) > time.Second { + break + } + store.Clone( + SetStoreState(metapb.StoreState_Up), + SetLastHeartbeatTS(time.Now()), + ) + } + }() + wg.Wait() +} diff --git a/scheduler/server/core/test_util.go b/scheduler/server/core/test_util.go new file mode 100644 index 00000000..7584e290 --- /dev/null +++ b/scheduler/server/core/test_util.go @@ -0,0 +1,114 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "math" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" +) + +// SplitRegions split a set of RegionInfo by the middle of regionKey +func SplitRegions(regions []*RegionInfo) []*RegionInfo { + results := make([]*RegionInfo, 0, len(regions)*2) + for _, region := range regions { + start, end := byte(0), byte(math.MaxUint8) + if len(region.GetStartKey()) > 0 { + start = region.GetStartKey()[0] + } + if len(region.GetEndKey()) > 0 { + end = region.GetEndKey()[0] + } + middle := []byte{start/2 + end/2} + left := region.Clone() + left.meta.Id = region.GetID() + uint64(len(regions)) + left.meta.EndKey = middle + left.meta.RegionEpoch.Version++ + right := region.Clone() + right.meta.Id = region.GetID() + uint64(len(regions)*2) + right.meta.StartKey = middle + right.meta.RegionEpoch.Version++ + results = append(results, left, right) + } + return results +} + +// MergeRegions merge a set of RegionInfo by regionKey +func MergeRegions(regions []*RegionInfo) []*RegionInfo { + results := make([]*RegionInfo, 0, len(regions)/2) + for i := 0; i < len(regions); i += 2 { + left := regions[i] + right := regions[i] + if i+1 < len(regions) { + right = regions[i+1] + } + region := &RegionInfo{meta: &metapb.Region{ + Id: left.GetID() + uint64(len(regions)), + StartKey: left.GetStartKey(), + EndKey: right.GetEndKey(), + }} + if left.GetRegionEpoch().GetVersion() > right.GetRegionEpoch().GetVersion() { + region.meta.RegionEpoch = left.GetRegionEpoch() + } else { + region.meta.RegionEpoch = right.GetRegionEpoch() + } + region.meta.RegionEpoch.Version++ + results = append(results, region) + } + return results +} + +// NewTestRegionInfo creates a RegionInfo for test. +func NewTestRegionInfo(start, end []byte) *RegionInfo { + return &RegionInfo{meta: &metapb.Region{ + StartKey: start, + EndKey: end, + RegionEpoch: &metapb.RegionEpoch{}, + }} +} + +// NewStoreInfoWithIdAndCount is create a store with specified id and regionCount. +func NewStoreInfoWithIdAndCount(id uint64, regionCount int) *StoreInfo { + stats := &schedulerpb.StoreStats{} + stats.Capacity = uint64(1024) + stats.Available = uint64(1024) + store := NewStoreInfo( + &metapb.Store{ + Id: id, + }, + SetStoreStats(stats), + SetRegionCount(regionCount), + SetRegionSize(int64(regionCount)*10), + ) + return store +} + +// NewStoreInfoWithSizeCount is create a store with size and count. +func NewStoreInfoWithSizeCount(id uint64, regionCount, leaderCount int, regionSize, leaderSize int64) *StoreInfo { + stats := &schedulerpb.StoreStats{} + stats.Capacity = uint64(1024) + stats.Available = uint64(1024) + store := NewStoreInfo( + &metapb.Store{ + Id: id, + }, + SetStoreStats(stats), + SetRegionCount(regionCount), + SetRegionSize(regionSize), + SetLeaderCount(leaderCount), + SetLeaderSize(leaderSize), + ) + return store +} diff --git a/scheduler/server/grpc_service.go b/scheduler/server/grpc_service.go new file mode 100644 index 00000000..41a250da --- /dev/null +++ b/scheduler/server/grpc_service.go @@ -0,0 +1,708 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "fmt" + "io" + "sync/atomic" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const slowThreshold = 5 * time.Millisecond + +// notLeaderError is returned when current server is not the leader and not possible to process request. +// TODO: work as proxy. +var notLeaderError = status.Errorf(codes.Unavailable, "not leader") + +// GetMembers implements gRPC PDServer. +func (s *Server) GetMembers(context.Context, *schedulerpb.GetMembersRequest) (*schedulerpb.GetMembersResponse, error) { + if s.IsClosed() { + return nil, status.Errorf(codes.Unknown, "server not started") + } + members, err := GetMembers(s.GetClient()) + if err != nil { + return nil, status.Errorf(codes.Unknown, err.Error()) + } + + var etcdLeader *schedulerpb.Member + leadID := s.member.GetEtcdLeader() + for _, m := range members { + if m.MemberId == leadID { + etcdLeader = m + break + } + } + + return &schedulerpb.GetMembersResponse{ + Header: s.header(), + Members: members, + Leader: s.member.GetLeader(), + EtcdLeader: etcdLeader, + }, nil +} + +// Tso implements gRPC PDServer. +func (s *Server) Tso(stream schedulerpb.Scheduler_TsoServer) error { + for { + request, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return errors.WithStack(err) + } + start := time.Now() + // TSO uses leader lease to determine validity. No need to check leader here. + if s.IsClosed() { + return status.Errorf(codes.Unknown, "server not started") + } + if request.GetHeader().GetClusterId() != s.clusterID { + return status.Errorf(codes.FailedPrecondition, "mismatch cluster id, need %d but got %d", s.clusterID, request.GetHeader().GetClusterId()) + } + count := request.GetCount() + ts, err := s.tso.GetRespTS(count) + if err != nil { + return status.Errorf(codes.Unknown, err.Error()) + } + + elapsed := time.Since(start) + if elapsed > slowThreshold { + log.Warn("get timestamp too slow", zap.Duration("cost", elapsed)) + } + response := &schedulerpb.TsoResponse{ + Header: s.header(), + Timestamp: &ts, + Count: count, + } + if err := stream.Send(response); err != nil { + return errors.WithStack(err) + } + } +} + +// Bootstrap implements gRPC PDServer. +func (s *Server) Bootstrap(ctx context.Context, request *schedulerpb.BootstrapRequest) (*schedulerpb.BootstrapResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + cluster := s.GetRaftCluster() + if cluster != nil { + err := &schedulerpb.Error{ + Type: schedulerpb.ErrorType_ALREADY_BOOTSTRAPPED, + Message: "cluster is already bootstrapped", + } + return &schedulerpb.BootstrapResponse{ + Header: s.errorHeader(err), + }, nil + } + if _, err := s.bootstrapCluster(request); err != nil { + return nil, status.Errorf(codes.Unknown, err.Error()) + } + + return &schedulerpb.BootstrapResponse{ + Header: s.header(), + }, nil +} + +// IsBootstrapped implements gRPC PDServer. +func (s *Server) IsBootstrapped(ctx context.Context, request *schedulerpb.IsBootstrappedRequest) (*schedulerpb.IsBootstrappedResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + cluster := s.GetRaftCluster() + return &schedulerpb.IsBootstrappedResponse{ + Header: s.header(), + Bootstrapped: cluster != nil, + }, nil +} + +// AllocID implements gRPC PDServer. +func (s *Server) AllocID(ctx context.Context, request *schedulerpb.AllocIDRequest) (*schedulerpb.AllocIDResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + // We can use an allocator for all types ID allocation. + id, err := s.idAllocator.Alloc() + if err != nil { + return nil, status.Errorf(codes.Unknown, err.Error()) + } + + return &schedulerpb.AllocIDResponse{ + Header: s.header(), + Id: id, + }, nil +} + +// GetStore implements gRPC PDServer. +func (s *Server) GetStore(ctx context.Context, request *schedulerpb.GetStoreRequest) (*schedulerpb.GetStoreResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + cluster := s.GetRaftCluster() + if cluster == nil { + return &schedulerpb.GetStoreResponse{Header: s.notBootstrappedHeader()}, nil + } + + storeID := request.GetStoreId() + store := cluster.GetStore(storeID) + if store == nil { + return nil, status.Errorf(codes.Unknown, "invalid store ID %d, not found", storeID) + } + return &schedulerpb.GetStoreResponse{ + Header: s.header(), + Store: store.GetMeta(), + Stats: store.GetStoreStats(), + }, nil +} + +// checkStore2 returns an error response if the store exists and is in tombstone state. +// It returns nil if it can't get the store. +// Copied from server/command.go +func checkStore2(cluster *RaftCluster, storeID uint64) *schedulerpb.Error { + store := cluster.GetStore(storeID) + if store != nil { + if store.GetState() == metapb.StoreState_Tombstone { + return &schedulerpb.Error{ + Type: schedulerpb.ErrorType_STORE_TOMBSTONE, + Message: "store is tombstone", + } + } + } + return nil +} + +// PutStore implements gRPC PDServer. +func (s *Server) PutStore(ctx context.Context, request *schedulerpb.PutStoreRequest) (*schedulerpb.PutStoreResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + cluster := s.GetRaftCluster() + if cluster == nil { + return &schedulerpb.PutStoreResponse{Header: s.notBootstrappedHeader()}, nil + } + + store := request.GetStore() + if pberr := checkStore2(cluster, store.GetId()); pberr != nil { + return &schedulerpb.PutStoreResponse{ + Header: s.errorHeader(pberr), + }, nil + } + + if err := cluster.putStore(store); err != nil { + return nil, status.Errorf(codes.Unknown, err.Error()) + } + + log.Info("put store ok", zap.Stringer("store", store)) + + return &schedulerpb.PutStoreResponse{ + Header: s.header(), + }, nil +} + +// GetAllStores implements gRPC PDServer. +func (s *Server) GetAllStores(ctx context.Context, request *schedulerpb.GetAllStoresRequest) (*schedulerpb.GetAllStoresResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + cluster := s.GetRaftCluster() + if cluster == nil { + return &schedulerpb.GetAllStoresResponse{Header: s.notBootstrappedHeader()}, nil + } + + // Don't return tombstone stores. + var stores []*metapb.Store + if request.GetExcludeTombstoneStores() { + for _, store := range cluster.GetMetaStores() { + if store.GetState() != metapb.StoreState_Tombstone { + stores = append(stores, store) + } + } + } else { + stores = cluster.GetMetaStores() + } + + return &schedulerpb.GetAllStoresResponse{ + Header: s.header(), + Stores: stores, + }, nil +} + +// StoreHeartbeat implements gRPC PDServer. +func (s *Server) StoreHeartbeat(ctx context.Context, request *schedulerpb.StoreHeartbeatRequest) (*schedulerpb.StoreHeartbeatResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + if request.GetStats() == nil { + return nil, errors.Errorf("invalid store heartbeat command, but %v", request) + } + cluster := s.GetRaftCluster() + if cluster == nil { + return &schedulerpb.StoreHeartbeatResponse{Header: s.notBootstrappedHeader()}, nil + } + + if pberr := checkStore2(cluster, request.GetStats().GetStoreId()); pberr != nil { + return &schedulerpb.StoreHeartbeatResponse{ + Header: s.errorHeader(pberr), + }, nil + } + + err := cluster.handleStoreHeartbeat(request.Stats) + if err != nil { + return nil, status.Errorf(codes.Unknown, err.Error()) + } + + return &schedulerpb.StoreHeartbeatResponse{ + Header: s.header(), + }, nil +} + +const regionHeartbeatSendTimeout = 5 * time.Second + +var errSendRegionHeartbeatTimeout = errors.New("send region heartbeat timeout") + +// heartbeatServer wraps Scheduler_RegionHeartbeatServer to ensure when any error +// occurs on Send() or Recv(), both endpoints will be closed. +type heartbeatServer struct { + stream schedulerpb.Scheduler_RegionHeartbeatServer + closed int32 +} + +func (s *heartbeatServer) Send(m *schedulerpb.RegionHeartbeatResponse) error { + if atomic.LoadInt32(&s.closed) == 1 { + return io.EOF + } + done := make(chan error, 1) + go func() { done <- s.stream.Send(m) }() + select { + case err := <-done: + if err != nil { + atomic.StoreInt32(&s.closed, 1) + } + return errors.WithStack(err) + case <-time.After(regionHeartbeatSendTimeout): + atomic.StoreInt32(&s.closed, 1) + return errors.WithStack(errSendRegionHeartbeatTimeout) + } +} + +func (s *heartbeatServer) Recv() (*schedulerpb.RegionHeartbeatRequest, error) { + if atomic.LoadInt32(&s.closed) == 1 { + return nil, io.EOF + } + req, err := s.stream.Recv() + if err != nil { + atomic.StoreInt32(&s.closed, 1) + return nil, errors.WithStack(err) + } + return req, nil +} + +// RegionHeartbeat implements gRPC PDServer. +func (s *Server) RegionHeartbeat(stream schedulerpb.Scheduler_RegionHeartbeatServer) error { + server := &heartbeatServer{stream: stream} + cluster := s.GetRaftCluster() + if cluster == nil { + resp := &schedulerpb.RegionHeartbeatResponse{ + Header: s.notBootstrappedHeader(), + } + err := server.Send(resp) + return errors.WithStack(err) + } + + var lastBind time.Time + for { + request, err := server.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return errors.WithStack(err) + } + + if err = s.validateRequest(request.GetHeader()); err != nil { + return err + } + + storeID := request.GetLeader().GetStoreId() + store := cluster.GetStore(storeID) + if store == nil { + return errors.Errorf("invalid store ID %d, not found", storeID) + } + + hbStreams := cluster.GetHeartbeatStreams() + + if time.Since(lastBind) > s.cfg.HeartbeatStreamBindInterval.Duration { + hbStreams.bindStream(storeID, server) + lastBind = time.Now() + } + + region := core.RegionFromHeartbeat(request) + if region.GetLeader() == nil { + log.Error("invalid request, the leader is nil", zap.Reflect("reqeust", request)) + continue + } + if region.GetID() == 0 { + msg := fmt.Sprintf("invalid request region, %v", request) + hbStreams.sendErr(schedulerpb.ErrorType_UNKNOWN, msg, request.GetLeader()) + continue + } + + err = cluster.HandleRegionHeartbeat(region) + if err != nil { + msg := err.Error() + hbStreams.sendErr(schedulerpb.ErrorType_UNKNOWN, msg, request.GetLeader()) + } + } +} + +// GetRegion implements gRPC PDServer. +func (s *Server) GetRegion(ctx context.Context, request *schedulerpb.GetRegionRequest) (*schedulerpb.GetRegionResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + cluster := s.GetRaftCluster() + if cluster == nil { + return &schedulerpb.GetRegionResponse{Header: s.notBootstrappedHeader()}, nil + } + region, leader := cluster.GetRegionByKey(request.GetRegionKey()) + return &schedulerpb.GetRegionResponse{ + Header: s.header(), + Region: region, + Leader: leader, + }, nil +} + +// GetPrevRegion implements gRPC PDServer +func (s *Server) GetPrevRegion(ctx context.Context, request *schedulerpb.GetRegionRequest) (*schedulerpb.GetRegionResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + cluster := s.GetRaftCluster() + if cluster == nil { + return &schedulerpb.GetRegionResponse{Header: s.notBootstrappedHeader()}, nil + } + + region, leader := cluster.GetPrevRegionByKey(request.GetRegionKey()) + return &schedulerpb.GetRegionResponse{ + Header: s.header(), + Region: region, + Leader: leader, + }, nil +} + +// GetRegionByID implements gRPC PDServer. +func (s *Server) GetRegionByID(ctx context.Context, request *schedulerpb.GetRegionByIDRequest) (*schedulerpb.GetRegionResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + cluster := s.GetRaftCluster() + if cluster == nil { + return &schedulerpb.GetRegionResponse{Header: s.notBootstrappedHeader()}, nil + } + id := request.GetRegionId() + region, leader := cluster.GetRegionByID(id) + return &schedulerpb.GetRegionResponse{ + Header: s.header(), + Region: region, + Leader: leader, + }, nil +} + +// ScanRegions implements gRPC PDServer. +func (s *Server) ScanRegions(ctx context.Context, request *schedulerpb.ScanRegionsRequest) (*schedulerpb.ScanRegionsResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + cluster := s.GetRaftCluster() + if cluster == nil { + return &schedulerpb.ScanRegionsResponse{Header: s.notBootstrappedHeader()}, nil + } + regions := cluster.ScanRegions(request.GetStartKey(), request.GetEndKey(), int(request.GetLimit())) + resp := &schedulerpb.ScanRegionsResponse{Header: s.header()} + for _, r := range regions { + leader := r.GetLeader() + if leader == nil { + leader = &metapb.Peer{} + } + resp.Regions = append(resp.Regions, r.GetMeta()) + resp.Leaders = append(resp.Leaders, leader) + } + return resp, nil +} + +// AskSplit implements gRPC PDServer. +func (s *Server) AskSplit(ctx context.Context, request *schedulerpb.AskSplitRequest) (*schedulerpb.AskSplitResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + cluster := s.GetRaftCluster() + if cluster == nil { + return &schedulerpb.AskSplitResponse{Header: s.notBootstrappedHeader()}, nil + } + if request.GetRegion() == nil { + return nil, errors.New("missing region for split") + } + req := &schedulerpb.AskSplitRequest{ + Region: request.Region, + } + split, err := cluster.handleAskSplit(req) + if err != nil { + return nil, status.Errorf(codes.Unknown, err.Error()) + } + + return &schedulerpb.AskSplitResponse{ + Header: s.header(), + NewRegionId: split.NewRegionId, + NewPeerIds: split.NewPeerIds, + }, nil +} + +// ReportSplit implements gRPC PDServer. +func (s *Server) ReportSplit(ctx context.Context, request *schedulerpb.ReportSplitRequest) (*schedulerpb.ReportSplitResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + cluster := s.GetRaftCluster() + if cluster == nil { + return &schedulerpb.ReportSplitResponse{Header: s.notBootstrappedHeader()}, nil + } + _, err := cluster.handleReportSplit(request) + if err != nil { + return nil, status.Errorf(codes.Unknown, err.Error()) + } + + return &schedulerpb.ReportSplitResponse{ + Header: s.header(), + }, nil +} + +// GetClusterConfig implements gRPC PDServer. +func (s *Server) GetClusterConfig(ctx context.Context, request *schedulerpb.GetClusterConfigRequest) (*schedulerpb.GetClusterConfigResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + cluster := s.GetRaftCluster() + if cluster == nil { + return &schedulerpb.GetClusterConfigResponse{Header: s.notBootstrappedHeader()}, nil + } + return &schedulerpb.GetClusterConfigResponse{ + Header: s.header(), + Cluster: cluster.GetConfig(), + }, nil +} + +// PutClusterConfig implements gRPC PDServer. +func (s *Server) PutClusterConfig(ctx context.Context, request *schedulerpb.PutClusterConfigRequest) (*schedulerpb.PutClusterConfigResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + cluster := s.GetRaftCluster() + if cluster == nil { + return &schedulerpb.PutClusterConfigResponse{Header: s.notBootstrappedHeader()}, nil + } + conf := request.GetCluster() + if err := cluster.putConfig(conf); err != nil { + return nil, status.Errorf(codes.Unknown, err.Error()) + } + + log.Info("put cluster config ok", zap.Reflect("config", conf)) + + return &schedulerpb.PutClusterConfigResponse{ + Header: s.header(), + }, nil +} + +// ScatterRegion implements gRPC PDServer. +func (s *Server) ScatterRegion(ctx context.Context, request *schedulerpb.ScatterRegionRequest) (*schedulerpb.ScatterRegionResponse, error) { + // if err := s.validateRequest(request.GetHeader()); err != nil { + // return nil, err + // } + + // cluster := s.GetRaftCluster() + // if cluster == nil { + // return &schedulerpb.ScatterRegionResponse{Header: s.notBootstrappedHeader()}, nil + // } + + // region := cluster.GetRegion(request.GetRegionId()) + // if region == nil { + // if request.GetRegion() == nil { + // return nil, errors.Errorf("region %d not found", request.GetRegionId()) + // } + // region = core.NewRegionInfo(request.GetRegion(), request.GetLeader()) + // } + + // if cluster.IsRegionHot(region) { + // return nil, errors.Errorf("region %d is a hot region", region.GetID()) + // } + + // co := cluster.GetCoordinator() + // op, err := co.regionScatterer.Scatter(region) + // if err != nil { + // return nil, err + // } + // if op != nil { + // co.opController.AddOperator(op) + // } + + return &schedulerpb.ScatterRegionResponse{ + Header: s.header(), + }, nil +} + +// GetGCSafePoint implements gRPC PDServer. +func (s *Server) GetGCSafePoint(ctx context.Context, request *schedulerpb.GetGCSafePointRequest) (*schedulerpb.GetGCSafePointResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + cluster := s.GetRaftCluster() + if cluster == nil { + return &schedulerpb.GetGCSafePointResponse{Header: s.notBootstrappedHeader()}, nil + } + + safePoint, err := s.storage.LoadGCSafePoint() + if err != nil { + return nil, err + } + + return &schedulerpb.GetGCSafePointResponse{ + Header: s.header(), + SafePoint: safePoint, + }, nil +} + +// UpdateGCSafePoint implements gRPC PDServer. +func (s *Server) UpdateGCSafePoint(ctx context.Context, request *schedulerpb.UpdateGCSafePointRequest) (*schedulerpb.UpdateGCSafePointResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + cluster := s.GetRaftCluster() + if cluster == nil { + return &schedulerpb.UpdateGCSafePointResponse{Header: s.notBootstrappedHeader()}, nil + } + + oldSafePoint, err := s.storage.LoadGCSafePoint() + if err != nil { + return nil, err + } + + newSafePoint := request.SafePoint + + // Only save the safe point if it's greater than the previous one + if newSafePoint > oldSafePoint { + if err := s.storage.SaveGCSafePoint(newSafePoint); err != nil { + return nil, err + } + log.Info("updated gc safe point", + zap.Uint64("safe-point", newSafePoint)) + } else if newSafePoint < oldSafePoint { + log.Warn("trying to update gc safe point", + zap.Uint64("old-safe-point", oldSafePoint), + zap.Uint64("new-safe-point", newSafePoint)) + newSafePoint = oldSafePoint + } + + return &schedulerpb.UpdateGCSafePointResponse{ + Header: s.header(), + NewSafePoint: newSafePoint, + }, nil +} + +// GetOperator gets information about the operator belonging to the speicfy region. +func (s *Server) GetOperator(ctx context.Context, request *schedulerpb.GetOperatorRequest) (*schedulerpb.GetOperatorResponse, error) { + if err := s.validateRequest(request.GetHeader()); err != nil { + return nil, err + } + + cluster := s.GetRaftCluster() + if cluster == nil { + return &schedulerpb.GetOperatorResponse{Header: s.notBootstrappedHeader()}, nil + } + + opController := cluster.coordinator.opController + requestID := request.GetRegionId() + r := opController.GetOperatorStatus(requestID) + if r == nil { + header := s.errorHeader(&schedulerpb.Error{ + Type: schedulerpb.ErrorType_REGION_NOT_FOUND, + Message: "Not Found", + }) + return &schedulerpb.GetOperatorResponse{Header: header}, nil + } + + return &schedulerpb.GetOperatorResponse{ + Header: s.header(), + RegionId: requestID, + Desc: []byte(r.Op.Desc()), + Kind: []byte(r.Op.Kind().String()), + Status: r.Status, + }, nil +} + +// validateRequest checks if Server is leader and clusterID is matched. +// TODO: Call it in gRPC intercepter. +func (s *Server) validateRequest(header *schedulerpb.RequestHeader) error { + if s.IsClosed() || !s.member.IsLeader() { + return errors.WithStack(notLeaderError) + } + if header.GetClusterId() != s.clusterID { + return status.Errorf(codes.FailedPrecondition, "mismatch cluster id, need %d but got %d", s.clusterID, header.GetClusterId()) + } + return nil +} + +func (s *Server) header() *schedulerpb.ResponseHeader { + return &schedulerpb.ResponseHeader{ClusterId: s.clusterID} +} + +func (s *Server) errorHeader(err *schedulerpb.Error) *schedulerpb.ResponseHeader { + return &schedulerpb.ResponseHeader{ + ClusterId: s.clusterID, + Error: err, + } +} + +func (s *Server) notBootstrappedHeader() *schedulerpb.ResponseHeader { + return s.errorHeader(&schedulerpb.Error{ + Type: schedulerpb.ErrorType_NOT_BOOTSTRAPPED, + Message: "cluster is not bootstrapped", + }) +} diff --git a/scheduler/server/heartbeat_streams.go b/scheduler/server/heartbeat_streams.go new file mode 100644 index 00000000..075eca43 --- /dev/null +++ b/scheduler/server/heartbeat_streams.go @@ -0,0 +1,171 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "sync" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/logutil" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap/log" + "go.uber.org/zap" +) + +const heartbeatStreamKeepAliveInterval = time.Minute + +type heartbeatStream interface { + Send(*schedulerpb.RegionHeartbeatResponse) error +} + +type streamUpdate struct { + storeID uint64 + stream heartbeatStream +} + +type heartbeatStreams struct { + wg sync.WaitGroup + hbStreamCtx context.Context + hbStreamCancel context.CancelFunc + clusterID uint64 + streams map[uint64]heartbeatStream + msgCh chan *schedulerpb.RegionHeartbeatResponse + streamCh chan streamUpdate + cluster *RaftCluster +} + +func newHeartbeatStreams(ctx context.Context, clusterID uint64, cluster *RaftCluster) *heartbeatStreams { + hbStreamCtx, hbStreamCancel := context.WithCancel(ctx) + hs := &heartbeatStreams{ + hbStreamCtx: hbStreamCtx, + hbStreamCancel: hbStreamCancel, + clusterID: clusterID, + streams: make(map[uint64]heartbeatStream), + msgCh: make(chan *schedulerpb.RegionHeartbeatResponse, regionheartbeatSendChanCap), + streamCh: make(chan streamUpdate, 1), + cluster: cluster, + } + hs.wg.Add(1) + go hs.run() + return hs +} + +func (s *heartbeatStreams) run() { + defer logutil.LogPanic() + + defer s.wg.Done() + + keepAliveTicker := time.NewTicker(heartbeatStreamKeepAliveInterval) + defer keepAliveTicker.Stop() + + keepAlive := &schedulerpb.RegionHeartbeatResponse{Header: &schedulerpb.ResponseHeader{ClusterId: s.clusterID}} + + for { + select { + case update := <-s.streamCh: + s.streams[update.storeID] = update.stream + case msg := <-s.msgCh: + storeID := msg.GetTargetPeer().GetStoreId() + store := s.cluster.GetStore(storeID) + if store == nil { + log.Error("failed to get store", + zap.Uint64("region-id", msg.RegionId), + zap.Uint64("store-id", storeID)) + delete(s.streams, storeID) + continue + } + if stream, ok := s.streams[storeID]; ok { + if err := stream.Send(msg); err != nil { + log.Error("send heartbeat message fail", + zap.Uint64("region-id", msg.RegionId), zap.Error(err)) + delete(s.streams, storeID) + } + } else { + log.Debug("heartbeat stream not found, skip send message", + zap.Uint64("region-id", msg.RegionId), + zap.Uint64("store-id", storeID)) + } + case <-keepAliveTicker.C: + for storeID, stream := range s.streams { + store := s.cluster.GetStore(storeID) + if store == nil { + log.Error("failed to get store", zap.Uint64("store-id", storeID)) + delete(s.streams, storeID) + continue + } + if err := stream.Send(keepAlive); err != nil { + log.Error("send keepalive message fail", + zap.Uint64("target-store-id", storeID), + zap.Error(err)) + delete(s.streams, storeID) + } + } + case <-s.hbStreamCtx.Done(): + return + } + } +} + +func (s *heartbeatStreams) Close() { + s.hbStreamCancel() + s.wg.Wait() +} + +func (s *heartbeatStreams) bindStream(storeID uint64, stream heartbeatStream) { + update := streamUpdate{ + storeID: storeID, + stream: stream, + } + select { + case s.streamCh <- update: + case <-s.hbStreamCtx.Done(): + } +} + +func (s *heartbeatStreams) SendMsg(region *core.RegionInfo, msg *schedulerpb.RegionHeartbeatResponse) { + if region.GetLeader() == nil { + return + } + + msg.Header = &schedulerpb.ResponseHeader{ClusterId: s.clusterID} + msg.RegionId = region.GetID() + msg.RegionEpoch = region.GetRegionEpoch() + msg.TargetPeer = region.GetLeader() + + select { + case s.msgCh <- msg: + case <-s.hbStreamCtx.Done(): + } +} + +func (s *heartbeatStreams) sendErr(errType schedulerpb.ErrorType, errMsg string, targetPeer *metapb.Peer) { + msg := &schedulerpb.RegionHeartbeatResponse{ + Header: &schedulerpb.ResponseHeader{ + ClusterId: s.clusterID, + Error: &schedulerpb.Error{ + Type: errType, + Message: errMsg, + }, + }, + TargetPeer: targetPeer, + } + + select { + case s.msgCh <- msg: + case <-s.hbStreamCtx.Done(): + } +} diff --git a/scheduler/server/id/id.go b/scheduler/server/id/id.go new file mode 100644 index 00000000..97c11e7d --- /dev/null +++ b/scheduler/server/id/id.go @@ -0,0 +1,116 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package id + +import ( + "path" + "sync" + + "github.com/pingcap-incubator/tinykv/scheduler/pkg/etcdutil" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/typeutil" + "github.com/pingcap-incubator/tinykv/scheduler/server/kv" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.etcd.io/etcd/clientv3" + "go.uber.org/zap" +) + +// Allocator is the allocator to generate unique ID. +type Allocator interface { + Alloc() (uint64, error) +} + +const allocStep = uint64(1000) + +// AllocatorImpl is used to allocate ID. +type AllocatorImpl struct { + mu sync.Mutex + base uint64 + end uint64 + + client *clientv3.Client + rootPath string + member string +} + +// NewAllocatorImpl creates a new IDAllocator. +func NewAllocatorImpl(client *clientv3.Client, rootPath string, member string) *AllocatorImpl { + return &AllocatorImpl{client: client, rootPath: rootPath, member: member} +} + +// Alloc returns a new id. +func (alloc *AllocatorImpl) Alloc() (uint64, error) { + alloc.mu.Lock() + defer alloc.mu.Unlock() + + if alloc.base == alloc.end { + end, err := alloc.generate() + if err != nil { + return 0, err + } + + alloc.end = end + alloc.base = alloc.end - allocStep + } + + alloc.base++ + + return alloc.base, nil +} + +func (alloc *AllocatorImpl) generate() (uint64, error) { + key := alloc.getAllocIDPath() + value, err := etcdutil.GetValue(alloc.client, key) + if err != nil { + return 0, err + } + + var ( + cmp clientv3.Cmp + end uint64 + ) + + if value == nil { + // create the key + cmp = clientv3.Compare(clientv3.CreateRevision(key), "=", 0) + } else { + // update the key + end, err = typeutil.BytesToUint64(value) + if err != nil { + return 0, err + } + + cmp = clientv3.Compare(clientv3.Value(key), "=", string(value)) + } + + end += allocStep + value = typeutil.Uint64ToBytes(end) + txn := kv.NewSlowLogTxn(alloc.client) + leaderPath := path.Join(alloc.rootPath, "leader") + t := txn.If(append([]clientv3.Cmp{cmp}, clientv3.Compare(clientv3.Value(leaderPath), "=", alloc.member))...) + resp, err := t.Then(clientv3.OpPut(key, string(value))).Commit() + if err != nil { + return 0, err + } + if !resp.Succeeded { + return 0, errors.New("generate id failed, we may not leader") + } + + log.Info("idAllocator allocates a new id", zap.Uint64("alloc-id", end)) + return end, nil +} + +func (alloc *AllocatorImpl) getAllocIDPath() string { + return path.Join(alloc.rootPath, "alloc_id") +} diff --git a/scheduler/server/kv/etcd_kv.go b/scheduler/server/kv/etcd_kv.go new file mode 100644 index 00000000..76f393d6 --- /dev/null +++ b/scheduler/server/kv/etcd_kv.go @@ -0,0 +1,166 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "context" + "path" + "strings" + "time" + + "github.com/pingcap-incubator/tinykv/scheduler/pkg/etcdutil" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.etcd.io/etcd/clientv3" + "go.uber.org/zap" +) + +const ( + kvRequestTimeout = time.Second * 10 + kvSlowRequestTime = time.Second * 1 + requestTimeout = 10 * time.Second + slowRequestTime = 1 * time.Second +) + +var ( + errTxnFailed = errors.New("failed to commit transaction") +) + +type etcdKVBase struct { + client *clientv3.Client + rootPath string +} + +// NewEtcdKVBase creates a new etcd kv. +func NewEtcdKVBase(client *clientv3.Client, rootPath string) *etcdKVBase { + return &etcdKVBase{ + client: client, + rootPath: rootPath, + } +} + +func (kv *etcdKVBase) Load(key string) (string, error) { + key = path.Join(kv.rootPath, key) + + resp, err := etcdutil.EtcdKVGet(kv.client, key) + if err != nil { + return "", err + } + if n := len(resp.Kvs); n == 0 { + return "", nil + } else if n > 1 { + return "", errors.Errorf("load more than one kvs: key %v kvs %v", key, n) + } + return string(resp.Kvs[0].Value), nil +} + +func (kv *etcdKVBase) LoadRange(key, endKey string, limit int) ([]string, []string, error) { + key = path.Join(kv.rootPath, key) + endKey = path.Join(kv.rootPath, endKey) + + withRange := clientv3.WithRange(endKey) + withLimit := clientv3.WithLimit(int64(limit)) + resp, err := etcdutil.EtcdKVGet(kv.client, key, withRange, withLimit) + if err != nil { + return nil, nil, err + } + keys := make([]string, 0, len(resp.Kvs)) + values := make([]string, 0, len(resp.Kvs)) + for _, item := range resp.Kvs { + keys = append(keys, strings.TrimPrefix(strings.TrimPrefix(string(item.Key), kv.rootPath), "/")) + values = append(values, string(item.Value)) + } + return keys, values, nil +} + +func (kv *etcdKVBase) Save(key, value string) error { + key = path.Join(kv.rootPath, key) + + txn := NewSlowLogTxn(kv.client) + resp, err := txn.Then(clientv3.OpPut(key, value)).Commit() + if err != nil { + log.Error("save to etcd meet error", zap.Error(err)) + return errors.WithStack(err) + } + if !resp.Succeeded { + return errors.WithStack(errTxnFailed) + } + return nil +} + +func (kv *etcdKVBase) Remove(key string) error { + key = path.Join(kv.rootPath, key) + + txn := NewSlowLogTxn(kv.client) + resp, err := txn.Then(clientv3.OpDelete(key)).Commit() + if err != nil { + log.Error("remove from etcd meet error", zap.Error(err)) + return errors.WithStack(err) + } + if !resp.Succeeded { + return errors.WithStack(errTxnFailed) + } + return nil +} + +// SlowLogTxn wraps etcd transaction and log slow one. +type SlowLogTxn struct { + clientv3.Txn + cancel context.CancelFunc +} + +// NewSlowLogTxn create a SlowLogTxn. +func NewSlowLogTxn(client *clientv3.Client) clientv3.Txn { + ctx, cancel := context.WithTimeout(client.Ctx(), requestTimeout) + return &SlowLogTxn{ + Txn: client.Txn(ctx), + cancel: cancel, + } +} + +// If takes a list of comparison. If all comparisons passed in succeed, +// the operations passed into Then() will be executed. Or the operations +// passed into Else() will be executed. +func (t *SlowLogTxn) If(cs ...clientv3.Cmp) clientv3.Txn { + return &SlowLogTxn{ + Txn: t.Txn.If(cs...), + cancel: t.cancel, + } +} + +// Then takes a list of operations. The Ops list will be executed, if the +// comparisons passed in If() succeed. +func (t *SlowLogTxn) Then(ops ...clientv3.Op) clientv3.Txn { + return &SlowLogTxn{ + Txn: t.Txn.Then(ops...), + cancel: t.cancel, + } +} + +// Commit implements Txn Commit interface. +func (t *SlowLogTxn) Commit() (*clientv3.TxnResponse, error) { + start := time.Now() + resp, err := t.Txn.Commit() + t.cancel() + + cost := time.Since(start) + if cost > slowRequestTime { + log.Warn("txn runs too slow", + zap.Error(err), + zap.Reflect("response", resp), + zap.Duration("cost", cost)) + } + + return resp, errors.WithStack(err) +} diff --git a/scheduler/server/kv/etcd_kv_test.go b/scheduler/server/kv/etcd_kv_test.go new file mode 100644 index 00000000..16e61808 --- /dev/null +++ b/scheduler/server/kv/etcd_kv_test.go @@ -0,0 +1,118 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "path" + "strconv" + "testing" + + "github.com/pingcap-incubator/tinykv/scheduler/pkg/tempurl" + . "github.com/pingcap/check" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/embed" +) + +func TestKV(t *testing.T) { + TestingT(t) +} + +type testEtcdKVSuite struct{} + +var _ = Suite(&testEtcdKVSuite{}) + +func (s *testEtcdKVSuite) TestEtcdKV(c *C) { + cfg := newTestSingleConfig() + etcd, err := embed.StartEtcd(cfg) + c.Assert(err, IsNil) + + ep := cfg.LCUrls[0].String() + client, err := clientv3.New(clientv3.Config{ + Endpoints: []string{ep}, + }) + c.Assert(err, IsNil) + rootPath := path.Join("/pd", strconv.FormatUint(100, 10)) + + kv := NewEtcdKVBase(client, rootPath) + + keys := []string{"test/key1", "test/key2", "test/key3", "test/key4", "test/key5"} + vals := []string{"val1", "val2", "val3", "val4", "val5"} + + v, err := kv.Load(keys[0]) + c.Assert(err, IsNil) + c.Assert(v, Equals, "") + + for i := range keys { + err = kv.Save(keys[i], vals[i]) + c.Assert(err, IsNil) + } + for i := range keys { + v, err = kv.Load(keys[i]) + c.Assert(err, IsNil) + c.Assert(v, Equals, vals[i]) + } + ks, vs, err := kv.LoadRange(keys[0], "test/zzz", 100) + c.Assert(err, IsNil) + c.Assert(ks, DeepEquals, keys) + c.Assert(vs, DeepEquals, vals) + ks, vs, err = kv.LoadRange(keys[0], "test/zzz", 3) + c.Assert(err, IsNil) + c.Assert(ks, DeepEquals, keys[:3]) + c.Assert(vs, DeepEquals, vals[:3]) + ks, vs, err = kv.LoadRange(keys[0], keys[3], 100) + c.Assert(err, IsNil) + c.Assert(ks, DeepEquals, keys[:3]) + c.Assert(vs, DeepEquals, vals[:3]) + + v, err = kv.Load(keys[1]) + c.Assert(err, IsNil) + c.Assert(v, Equals, "val2") + c.Assert(kv.Remove(keys[1]), IsNil) + v, err = kv.Load(keys[1]) + c.Assert(err, IsNil) + c.Assert(v, Equals, "") + + etcd.Close() + cleanConfig(cfg) +} + +func newTestSingleConfig() *embed.Config { + cfg := embed.NewConfig() + cfg.Name = "test_etcd" + cfg.Dir, _ = ioutil.TempDir("/tmp", "test_etcd") + cfg.WalDir = "" + cfg.Logger = "zap" + cfg.LogOutputs = []string{"stdout"} + + pu, _ := url.Parse(tempurl.Alloc()) + cfg.LPUrls = []url.URL{*pu} + cfg.APUrls = cfg.LPUrls + cu, _ := url.Parse(tempurl.Alloc()) + cfg.LCUrls = []url.URL{*cu} + cfg.ACUrls = cfg.LCUrls + + cfg.StrictReconfigCheck = false + cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, &cfg.LPUrls[0]) + cfg.ClusterState = embed.ClusterStateFlagNew + return cfg +} + +func cleanConfig(cfg *embed.Config) { + // Clean data directory + os.RemoveAll(cfg.Dir) +} diff --git a/scheduler/server/kv/kv.go b/scheduler/server/kv/kv.go new file mode 100644 index 00000000..768eb703 --- /dev/null +++ b/scheduler/server/kv/kv.go @@ -0,0 +1,22 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +// Base is an abstract interface for load/save pd cluster data. +type Base interface { + Load(key string) (string, error) + LoadRange(key, endKey string, limit int) (keys []string, values []string, err error) + Save(key, value string) error + Remove(key string) error +} diff --git a/scheduler/server/kv/mem_kv.go b/scheduler/server/kv/mem_kv.go new file mode 100644 index 00000000..a32d3bc5 --- /dev/null +++ b/scheduler/server/kv/mem_kv.go @@ -0,0 +1,78 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "sync" + + "github.com/google/btree" +) + +type memoryKV struct { + sync.RWMutex + tree *btree.BTree +} + +// NewMemoryKV returns an in-memory kvBase for testing. +func NewMemoryKV() Base { + return &memoryKV{ + tree: btree.New(2), + } +} + +type memoryKVItem struct { + key, value string +} + +func (s memoryKVItem) Less(than btree.Item) bool { + return s.key < than.(memoryKVItem).key +} + +func (kv *memoryKV) Load(key string) (string, error) { + kv.RLock() + defer kv.RUnlock() + item := kv.tree.Get(memoryKVItem{key, ""}) + if item == nil { + return "", nil + } + return item.(memoryKVItem).value, nil +} + +func (kv *memoryKV) LoadRange(key, endKey string, limit int) ([]string, []string, error) { + kv.RLock() + defer kv.RUnlock() + keys := make([]string, 0, limit) + values := make([]string, 0, limit) + kv.tree.AscendRange(memoryKVItem{key, ""}, memoryKVItem{endKey, ""}, func(item btree.Item) bool { + keys = append(keys, item.(memoryKVItem).key) + values = append(values, item.(memoryKVItem).value) + return len(keys) < limit + }) + return keys, values, nil +} + +func (kv *memoryKV) Save(key, value string) error { + kv.Lock() + defer kv.Unlock() + kv.tree.ReplaceOrInsert(memoryKVItem{key, value}) + return nil +} + +func (kv *memoryKV) Remove(key string) error { + kv.Lock() + defer kv.Unlock() + + kv.tree.Delete(memoryKVItem{key, ""}) + return nil +} diff --git a/scheduler/server/member/leader.go b/scheduler/server/member/leader.go new file mode 100644 index 00000000..99f6f35d --- /dev/null +++ b/scheduler/server/member/leader.go @@ -0,0 +1,400 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package member + +import ( + "context" + "fmt" + "math/rand" + "path" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/etcdutil" + "github.com/pingcap-incubator/tinykv/scheduler/server/config" + "github.com/pingcap-incubator/tinykv/scheduler/server/kv" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/embed" + "go.etcd.io/etcd/mvcc/mvccpb" + "go.uber.org/zap" +) + +const ( + // The timeout to wait transfer etcd leader to complete. + moveLeaderTimeout = 5 * time.Second + requestTimeout = etcdutil.DefaultRequestTimeout + slowRequestTime = etcdutil.DefaultSlowRequestTime +) + +// Member is used for the election related logic. +type Member struct { + leader atomic.Value + // Etcd and cluster information. + etcd *embed.Etcd + client *clientv3.Client + id uint64 // etcd server id. + member *schedulerpb.Member // current PD's info. + rootPath string + // memberValue is the serialized string of `member`. It will be save in + // etcd leader key when the PD node is successfully elected as the leader + // of the cluster. Every write will use it to check leadership. + memberValue string +} + +// NewMember create a new Member. +func NewMember(etcd *embed.Etcd, client *clientv3.Client, id uint64) *Member { + return &Member{ + etcd: etcd, + client: client, + id: id, + } +} + +// ID returns the unique etcd ID for this server in etcd cluster. +func (m *Member) ID() uint64 { + return m.id +} + +// MemberValue returns the member value. +func (m *Member) MemberValue() string { + return m.memberValue +} + +// Member returns the member. +func (m *Member) Member() *schedulerpb.Member { + return m.member +} + +// Etcd returns etcd related information. +func (m *Member) Etcd() *embed.Etcd { + return m.etcd +} + +// IsLeader returns whether the server is leader or not. +func (m *Member) IsLeader() bool { + // If server is not started. Both leaderID and ID could be 0. + return m.GetLeaderID() == m.ID() +} + +// GetLeaderID returns current leader's member ID. +func (m *Member) GetLeaderID() uint64 { + return m.GetLeader().GetMemberId() +} + +// GetLeader returns current leader of PD cluster. +func (m *Member) GetLeader() *schedulerpb.Member { + leader := m.leader.Load() + if leader == nil { + return nil + } + member := leader.(*schedulerpb.Member) + if member.GetMemberId() == 0 { + return nil + } + return member +} + +// EnableLeader sets the member to leader. +func (m *Member) EnableLeader() { + m.leader.Store(m.member) +} + +// DisableLeader reset the leader value. +func (m *Member) DisableLeader() { + m.leader.Store(&schedulerpb.Member{}) +} + +// GetLeaderPath returns the path of the leader. +func (m *Member) GetLeaderPath() string { + return path.Join(m.rootPath, "leader") +} + +// CheckLeader checks returns true if it is needed to check later. +func (m *Member) CheckLeader(name string) (*schedulerpb.Member, int64, bool) { + if m.GetEtcdLeader() == 0 { + log.Error("no etcd leader, check leader later") + time.Sleep(200 * time.Millisecond) + return nil, 0, true + } + + leader, rev, err := getLeader(m.client, m.GetLeaderPath()) + if err != nil { + log.Error("get leader meet error", zap.Error(err)) + time.Sleep(200 * time.Millisecond) + return nil, 0, true + } + if leader != nil { + if m.isSameLeader(leader) { + // oh, we are already leader, we may meet something wrong + // in previous CampaignLeader. we can delete and campaign again. + log.Warn("the leader has not changed, delete and campaign again", zap.Stringer("old-leader", leader)) + if err = m.deleteLeaderKey(); err != nil { + log.Error("delete leader key meet error", zap.Error(err)) + time.Sleep(200 * time.Millisecond) + return nil, 0, true + } + } + } + return leader, rev, false +} + +// CheckPriority if the leader will be moved according to the priority. +func (m *Member) CheckPriority(ctx context.Context) { + etcdLeader := m.GetEtcdLeader() + if etcdLeader == m.ID() || etcdLeader == 0 { + return + } + myPriority, err := m.GetMemberLeaderPriority(m.ID()) + if err != nil { + log.Error("failed to load leader priority", zap.Error(err)) + return + } + leaderPriority, err := m.GetMemberLeaderPriority(etcdLeader) + if err != nil { + log.Error("failed to load etcd leader priority", zap.Error(err)) + return + } + if myPriority > leaderPriority { + err := m.MoveEtcdLeader(ctx, etcdLeader, m.ID()) + if err != nil { + log.Error("failed to transfer etcd leader", zap.Error(err)) + } else { + log.Info("transfer etcd leader", + zap.Uint64("from", etcdLeader), + zap.Uint64("to", m.ID())) + } + } +} + +// MoveEtcdLeader tries to transfer etcd leader. +func (m *Member) MoveEtcdLeader(ctx context.Context, old, new uint64) error { + moveCtx, cancel := context.WithTimeout(ctx, moveLeaderTimeout) + defer cancel() + return errors.WithStack(m.etcd.Server.MoveLeader(moveCtx, old, new)) +} + +// getLeader gets server leader from etcd. +func getLeader(c *clientv3.Client, leaderPath string) (*schedulerpb.Member, int64, error) { + leader := &schedulerpb.Member{} + ok, rev, err := etcdutil.GetProtoMsgWithModRev(c, leaderPath, leader) + if err != nil { + return nil, 0, err + } + if !ok { + return nil, 0, nil + } + + return leader, rev, nil +} + +// GetEtcdLeader returns the etcd leader ID. +func (m *Member) GetEtcdLeader() uint64 { + return m.etcd.Server.Lead() +} + +func (m *Member) isSameLeader(leader *schedulerpb.Member) bool { + return leader.GetMemberId() == m.ID() +} + +// MemberInfo initializes the member info. +func (m *Member) MemberInfo(cfg *config.Config, name string, rootPath string) { + leader := &schedulerpb.Member{ + Name: name, + MemberId: m.ID(), + ClientUrls: strings.Split(cfg.AdvertiseClientUrls, ","), + PeerUrls: strings.Split(cfg.AdvertisePeerUrls, ","), + } + + data, err := leader.Marshal() + if err != nil { + // can't fail, so panic here. + log.Fatal("marshal leader meet error", zap.Stringer("leader", leader), zap.Error(err)) + } + m.member = leader + m.memberValue = string(data) + m.rootPath = rootPath +} + +// CampaignLeader is used to campaign the leader. +func (m *Member) CampaignLeader(lease *LeaderLease, leaseTimeout int64) error { + err := lease.Grant(leaseTimeout) + if err != nil { + return err + } + + leaderKey := m.GetLeaderPath() + // The leader key must not exist, so the CreateRevision is 0. + resp, err := kv.NewSlowLogTxn(m.client). + If(clientv3.Compare(clientv3.CreateRevision(leaderKey), "=", 0)). + Then(clientv3.OpPut(leaderKey, m.memberValue, clientv3.WithLease(lease.ID))). + Commit() + if err != nil { + return errors.WithStack(err) + } + if !resp.Succeeded { + return errors.New("failed to campaign leader, other server may campaign ok") + } + return nil +} + +// ResignLeader resigns current PD's leadership. If nextLeader is empty, all +// other pd-servers can campaign. +func (m *Member) ResignLeader(ctx context.Context, from string, nextLeader string) error { + log.Info("try to resign leader to next leader", zap.String("from", from), zap.String("to", nextLeader)) + // Determine next leaders. + var leaderIDs []uint64 + res, err := etcdutil.ListEtcdMembers(m.client) + if err != nil { + return err + } + for _, member := range res.Members { + if (nextLeader == "" && member.ID != m.id) || (nextLeader != "" && member.Name == nextLeader) { + leaderIDs = append(leaderIDs, member.GetID()) + } + } + if len(leaderIDs) == 0 { + return errors.New("no valid pd to transfer leader") + } + nextLeaderID := leaderIDs[rand.Intn(len(leaderIDs))] + return m.MoveEtcdLeader(ctx, m.ID(), nextLeaderID) +} + +// LeaderTxn returns txn() with a leader comparison to guarantee that +// the transaction can be executed only if the server is leader. +func (m *Member) LeaderTxn(cs ...clientv3.Cmp) clientv3.Txn { + txn := kv.NewSlowLogTxn(m.client) + return txn.If(append(cs, m.leaderCmp())...) +} + +func (m *Member) getMemberLeaderPriorityPath(id uint64) string { + return path.Join(m.rootPath, fmt.Sprintf("member/%d/leader_priority", id)) +} + +// SetMemberLeaderPriority saves a member's priority to be elected as the etcd leader. +func (m *Member) SetMemberLeaderPriority(id uint64, priority int) error { + key := m.getMemberLeaderPriorityPath(id) + res, err := m.LeaderTxn().Then(clientv3.OpPut(key, strconv.Itoa(priority))).Commit() + if err != nil { + return errors.WithStack(err) + } + if !res.Succeeded { + return errors.New("save leader priority failed, maybe not leader") + } + return nil +} + +// DeleteMemberLeaderPriority removes a member's priority config. +func (m *Member) DeleteMemberLeaderPriority(id uint64) error { + key := m.getMemberLeaderPriorityPath(id) + res, err := m.LeaderTxn().Then(clientv3.OpDelete(key)).Commit() + if err != nil { + return errors.WithStack(err) + } + if !res.Succeeded { + return errors.New("delete leader priority failed, maybe not leader") + } + return nil +} + +// GetMemberLeaderPriority loads a member's priority to be elected as the etcd leader. +func (m *Member) GetMemberLeaderPriority(id uint64) (int, error) { + key := m.getMemberLeaderPriorityPath(id) + res, err := etcdutil.EtcdKVGet(m.client, key) + if err != nil { + return 0, err + } + if len(res.Kvs) == 0 { + return 0, nil + } + priority, err := strconv.ParseInt(string(res.Kvs[0].Value), 10, 32) + if err != nil { + return 0, errors.WithStack(err) + } + return int(priority), nil +} + +func (m *Member) deleteLeaderKey() error { + // delete leader itself and let others start a new election again. + leaderKey := m.GetLeaderPath() + resp, err := m.LeaderTxn().Then(clientv3.OpDelete(leaderKey)).Commit() + if err != nil { + return errors.WithStack(err) + } + if !resp.Succeeded { + return errors.New("resign leader failed, we are not leader already") + } + + return nil +} + +func (m *Member) leaderCmp() clientv3.Cmp { + return clientv3.Compare(clientv3.Value(m.GetLeaderPath()), "=", m.memberValue) +} + +// WatchLeader is used to watch the changes of the leader. +func (m *Member) WatchLeader(serverCtx context.Context, leader *schedulerpb.Member, revision int64) { + m.leader.Store(leader) + defer m.leader.Store(&schedulerpb.Member{}) + + watcher := clientv3.NewWatcher(m.client) + defer watcher.Close() + + ctx, cancel := context.WithCancel(serverCtx) + defer cancel() + + // The revision is the revision of last modification on this key. + // If the revision is compacted, will meet required revision has been compacted error. + // In this case, use the compact revision to re-watch the key. + for { + rch := watcher.Watch(ctx, m.GetLeaderPath(), clientv3.WithRev(revision)) + for wresp := range rch { + // meet compacted error, use the compact revision. + if wresp.CompactRevision != 0 { + log.Warn("required revision has been compacted, use the compact revision", + zap.Int64("required-revision", revision), + zap.Int64("compact-revision", wresp.CompactRevision)) + revision = wresp.CompactRevision + break + } + if wresp.Canceled { + log.Error("leader watcher is canceled with", zap.Int64("revision", revision), zap.Error(wresp.Err())) + return + } + + for _, ev := range wresp.Events { + if ev.Type == mvccpb.DELETE { + log.Info("leader is deleted") + return + } + } + } + + select { + case <-ctx.Done(): + // server closed, return + return + default: + } + } +} + +// Close gracefully shuts down all servers/listeners. +func (m *Member) Close() { + m.Etcd().Close() +} diff --git a/scheduler/server/member/lease.go b/scheduler/server/member/lease.go new file mode 100644 index 00000000..3a0bef64 --- /dev/null +++ b/scheduler/server/member/lease.go @@ -0,0 +1,140 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package member + +import ( + "context" + "sync/atomic" + "time" + + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.etcd.io/etcd/clientv3" + "go.uber.org/zap" +) + +// LeaderLease is used for renewing leadership of PD server. +type LeaderLease struct { + client *clientv3.Client + lease clientv3.Lease + ID clientv3.LeaseID + leaseTimeout time.Duration + + expireTime atomic.Value +} + +// NewLeaderLease creates a lease. +func NewLeaderLease(client *clientv3.Client) *LeaderLease { + return &LeaderLease{ + client: client, + lease: clientv3.NewLease(client), + } +} + +// Grant uses `lease.Grant` to initialize the lease and expireTime. +func (l *LeaderLease) Grant(leaseTimeout int64) error { + start := time.Now() + ctx, cancel := context.WithTimeout(l.client.Ctx(), requestTimeout) + leaseResp, err := l.lease.Grant(ctx, leaseTimeout) + cancel() + if err != nil { + return errors.WithStack(err) + } + if cost := time.Since(start); cost > slowRequestTime { + log.Warn("lease grants too slow", zap.Duration("cost", cost)) + } + l.ID = leaseResp.ID + l.leaseTimeout = time.Duration(leaseTimeout) * time.Second + l.expireTime.Store(start.Add(time.Duration(leaseResp.TTL) * time.Second)) + return nil +} + +const revokeLeaseTimeout = time.Second + +// Close releases the lease. +func (l *LeaderLease) Close() error { + // Reset expire time. + l.expireTime.Store(time.Time{}) + // Try to revoke lease to make subsequent elections faster. + ctx, cancel := context.WithTimeout(l.client.Ctx(), revokeLeaseTimeout) + defer cancel() + l.lease.Revoke(ctx, l.ID) + return l.lease.Close() +} + +// IsExpired checks if the lease is expired. If it returns true, current PD +// server should step down and try to re-elect again. +func (l *LeaderLease) IsExpired() bool { + return time.Now().After(l.expireTime.Load().(time.Time)) +} + +// KeepAlive auto renews the lease and update expireTime. +func (l *LeaderLease) KeepAlive(ctx context.Context) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + timeCh := l.keepAliveWorker(ctx, l.leaseTimeout/3) + + var maxExpire time.Time + for { + select { + case t := <-timeCh: + if t.After(maxExpire) { + maxExpire = t + l.expireTime.Store(t) + } + case <-time.After(l.leaseTimeout): + return + case <-ctx.Done(): + return + } + } +} + +// Periodically call `lease.KeepAliveOnce` and post back latest received expire time into the channel. +func (l *LeaderLease) keepAliveWorker(ctx context.Context, interval time.Duration) <-chan time.Time { + ch := make(chan time.Time) + + go func() { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + go func() { + start := time.Now() + ctx1, cancel := context.WithTimeout(ctx, time.Duration(l.leaseTimeout)) + defer cancel() + res, err := l.lease.KeepAliveOnce(ctx1, l.ID) + if err != nil { + log.Warn("leader lease keep alive failed", zap.Error(err)) + return + } + if res.TTL > 0 { + expire := start.Add(time.Duration(res.TTL) * time.Second) + select { + case ch <- expire: + case <-ctx1.Done(): + } + } + }() + + select { + case <-ctx.Done(): + return + case <-ticker.C: + } + } + }() + + return ch +} diff --git a/scheduler/server/schedule/checker/replica_checker.go b/scheduler/server/schedule/checker/replica_checker.go new file mode 100644 index 00000000..98e87140 --- /dev/null +++ b/scheduler/server/schedule/checker/replica_checker.go @@ -0,0 +1,204 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "fmt" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/filter" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/operator" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/opt" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/selector" + "github.com/pingcap/log" + "go.uber.org/zap" +) + +const replicaCheckerName = "replica-checker" + +const ( + offlineStatus = "offline" + downStatus = "down" +) + +// ReplicaChecker ensures region has the best replicas. +// Including the following: +// Replica number management. +// Unhealthy replica management, mainly used for disaster recovery of TiKV. +// Location management, mainly used for cross data center deployment. +type ReplicaChecker struct { + name string + cluster opt.Cluster + filters []filter.Filter +} + +// NewReplicaChecker creates a replica checker. +func NewReplicaChecker(cluster opt.Cluster, n ...string) *ReplicaChecker { + name := replicaCheckerName + if len(n) != 0 { + name = n[0] + } + filters := []filter.Filter{ + filter.NewHealthFilter(name), + } + + return &ReplicaChecker{ + name: name, + cluster: cluster, + filters: filters, + } +} + +// Check verifies a region's replicas, creating an operator.Operator if need. +func (r *ReplicaChecker) Check(region *core.RegionInfo) *operator.Operator { + if op := r.checkOfflinePeer(region); op != nil { + op.SetPriorityLevel(core.HighPriority) + return op + } + + if len(region.GetPeers()) < r.cluster.GetMaxReplicas() { + log.Debug("region has fewer than max replicas", zap.Uint64("region-id", region.GetID()), zap.Int("peers", len(region.GetPeers()))) + newPeer := r.selectBestPeerToAddReplica(region) + if newPeer == nil { + return nil + } + return operator.CreateAddPeerOperator("make-up-replica", region, newPeer.GetId(), newPeer.GetStoreId(), operator.OpReplica) + } + + // when add learner peer, the number of peer will exceed max replicas for a while, + // just comparing the the number of voters to avoid too many cancel add operator log. + if len(region.GetVoters()) > r.cluster.GetMaxReplicas() { + log.Debug("region has more than max replicas", zap.Uint64("region-id", region.GetID()), zap.Int("peers", len(region.GetPeers()))) + oldPeer := r.selectWorstPeer(region) + if oldPeer == nil { + return nil + } + op, err := operator.CreateRemovePeerOperator("remove-extra-replica", r.cluster, operator.OpReplica, region, oldPeer.GetStoreId()) + if err != nil { + return nil + } + return op + } + + return nil +} + +// SelectBestReplacementStore returns a store id that to be used to replace the old peer and distinct score. +func (r *ReplicaChecker) SelectBestReplacementStore(region *core.RegionInfo, oldPeer *metapb.Peer, filters ...filter.Filter) uint64 { + filters = append(filters, filter.NewExcludedFilter(r.name, nil, region.GetStoreIds())) + newRegion := region.Clone(core.WithRemoveStorePeer(oldPeer.GetStoreId())) + return r.selectBestStoreToAddReplica(newRegion, filters...) +} + +// selectBestPeerToAddReplica returns a new peer that to be used to add a replica and distinct score. +func (r *ReplicaChecker) selectBestPeerToAddReplica(region *core.RegionInfo, filters ...filter.Filter) *metapb.Peer { + storeID := r.selectBestStoreToAddReplica(region, filters...) + if storeID == 0 { + log.Debug("no best store to add replica", zap.Uint64("region-id", region.GetID())) + return nil + } + newPeer, err := r.cluster.AllocPeer(storeID) + if err != nil { + return nil + } + return newPeer +} + +// selectBestStoreToAddReplica returns the store to add a replica. +func (r *ReplicaChecker) selectBestStoreToAddReplica(region *core.RegionInfo, filters ...filter.Filter) uint64 { + // Add some must have filters. + newFilters := []filter.Filter{ + filter.NewStateFilter(r.name), + filter.NewExcludedFilter(r.name, nil, region.GetStoreIds()), + } + filters = append(filters, r.filters...) + filters = append(filters, newFilters...) + regionStores := r.cluster.GetRegionStores(region) + s := selector.NewReplicaSelector(regionStores, r.filters...) + target := s.SelectTarget(r.cluster, r.cluster.GetStores(), filters...) + if target == nil { + return 0 + } + return target.GetID() +} + +// selectWorstPeer returns the worst peer in the region. +func (r *ReplicaChecker) selectWorstPeer(region *core.RegionInfo) *metapb.Peer { + regionStores := r.cluster.GetRegionStores(region) + s := selector.NewReplicaSelector(regionStores, r.filters...) + worstStore := s.SelectSource(r.cluster, regionStores) + if worstStore == nil { + log.Debug("no worst store", zap.Uint64("region-id", region.GetID())) + return nil + } + return region.GetStorePeer(worstStore.GetID()) +} + +func (r *ReplicaChecker) checkOfflinePeer(region *core.RegionInfo) *operator.Operator { + // just skip learner + if len(region.GetLearners()) != 0 { + return nil + } + + for _, peer := range region.GetPeers() { + storeID := peer.GetStoreId() + store := r.cluster.GetStore(storeID) + if store == nil { + log.Warn("lost the store, maybe you are recovering the PD cluster", zap.Uint64("store-id", storeID)) + return nil + } + if store.IsUp() { + continue + } + + return r.fixPeer(region, peer, offlineStatus) + } + + return nil +} + +func (r *ReplicaChecker) fixPeer(region *core.RegionInfo, peer *metapb.Peer, status string) *operator.Operator { + removeExtra := fmt.Sprintf("remove-extra-%s-replica", status) + // Check the number of replicas first. + if len(region.GetPeers()) > r.cluster.GetMaxReplicas() { + op, err := operator.CreateRemovePeerOperator(removeExtra, r.cluster, operator.OpReplica, region, peer.GetStoreId()) + if err != nil { + return nil + } + return op + } + + storeID := r.SelectBestReplacementStore(region, peer) + if storeID == 0 { + log.Debug("no best store to add replica", zap.Uint64("region-id", region.GetID())) + return nil + } + newPeer, err := r.cluster.AllocPeer(storeID) + if err != nil { + return nil + } + + replace := fmt.Sprintf("replace-%s-replica", status) + var op *operator.Operator + if status == offlineStatus { + op, err = operator.CreateOfflinePeerOperator(replace, r.cluster, region, operator.OpReplica, peer.GetStoreId(), newPeer.GetStoreId(), newPeer.GetId()) + } else { + op, err = operator.CreateMovePeerOperator(replace, r.cluster, region, operator.OpReplica, peer.GetStoreId(), newPeer.GetStoreId(), newPeer.GetId()) + } + if err != nil { + return nil + } + return op +} diff --git a/scheduler/server/schedule/checker_controller.go b/scheduler/server/schedule/checker_controller.go new file mode 100644 index 00000000..6c80d627 --- /dev/null +++ b/scheduler/server/schedule/checker_controller.go @@ -0,0 +1,55 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedule + +import ( + "context" + + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/checker" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/operator" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/opt" +) + +// CheckerController is used to manage all checkers. +type CheckerController struct { + cluster opt.Cluster + opController *OperatorController + replicaChecker *checker.ReplicaChecker +} + +// NewCheckerController create a new CheckerController. +// TODO: isSupportMerge should be removed. +func NewCheckerController(ctx context.Context, cluster opt.Cluster, opController *OperatorController) *CheckerController { + return &CheckerController{ + cluster: cluster, + opController: opController, + replicaChecker: checker.NewReplicaChecker(cluster), + } +} + +// CheckRegion will check the region and add a new operator if needed. +func (c *CheckerController) CheckRegion(region *core.RegionInfo) (bool, []*operator.Operator) { //return checkerIsBusy,ops + // If PD has restarted, it need to check learners added before and promote them. + // Don't check isRaftLearnerEnabled cause it maybe disable learner feature but there are still some learners to promote. + opController := c.opController + checkerIsBusy := true + if opController.OperatorCount(operator.OpReplica) < c.cluster.GetReplicaScheduleLimit() { + checkerIsBusy = false + if op := c.replicaChecker.Check(region); op != nil { + return checkerIsBusy, []*operator.Operator{op} + } + } + return checkerIsBusy, nil +} diff --git a/scheduler/server/schedule/filter/filters.go b/scheduler/server/schedule/filter/filters.go new file mode 100644 index 00000000..1709ef30 --- /dev/null +++ b/scheduler/server/schedule/filter/filters.go @@ -0,0 +1,234 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package filter + +import ( + "github.com/pingcap-incubator/tinykv/scheduler/pkg/slice" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/opt" +) + +//revive:disable:unused-parameter + +// SelectSourceStores selects stores that be selected as source store from the list. +func SelectSourceStores(stores []*core.StoreInfo, filters []Filter, opt opt.Options) []*core.StoreInfo { + return filterStoresBy(stores, func(s *core.StoreInfo) bool { + return slice.NoneOf(filters, func(i int) bool { return filters[i].Source(opt, s) }) + }) +} + +// SelectTargetStores selects stores that be selected as target store from the list. +func SelectTargetStores(stores []*core.StoreInfo, filters []Filter, opt opt.Options) []*core.StoreInfo { + return filterStoresBy(stores, func(s *core.StoreInfo) bool { + return slice.NoneOf(filters, func(i int) bool { return filters[i].Target(opt, s) }) + }) +} + +func filterStoresBy(stores []*core.StoreInfo, keepPred func(*core.StoreInfo) bool) (selected []*core.StoreInfo) { + for _, s := range stores { + if keepPred(s) { + selected = append(selected, s) + } + } + return +} + +// Filter is an interface to filter source and target store. +type Filter interface { + // Scope is used to indicate where the filter will act on. + Scope() string + Type() string + // Return true if the store should not be used as a source store. + Source(opt opt.Options, store *core.StoreInfo) bool + // Return true if the store should not be used as a target store. + Target(opt opt.Options, store *core.StoreInfo) bool +} + +// Source checks if store can pass all Filters as source store. +func Source(opt opt.Options, store *core.StoreInfo, filters []Filter) bool { + for _, filter := range filters { + if filter.Source(opt, store) { + return true + } + } + return false +} + +// Target checks if store can pass all Filters as target store. +func Target(opt opt.Options, store *core.StoreInfo, filters []Filter) bool { + for _, filter := range filters { + if filter.Target(opt, store) { + return true + } + } + return false +} + +type excludedFilter struct { + scope string + sources map[uint64]struct{} + targets map[uint64]struct{} +} + +// NewExcludedFilter creates a Filter that filters all specified stores. +func NewExcludedFilter(scope string, sources, targets map[uint64]struct{}) Filter { + return &excludedFilter{ + scope: scope, + sources: sources, + targets: targets, + } +} + +func (f *excludedFilter) Scope() string { + return f.scope +} + +func (f *excludedFilter) Type() string { + return "exclude-filter" +} + +func (f *excludedFilter) Source(opt opt.Options, store *core.StoreInfo) bool { + _, ok := f.sources[store.GetID()] + return ok +} + +func (f *excludedFilter) Target(opt opt.Options, store *core.StoreInfo) bool { + _, ok := f.targets[store.GetID()] + return ok +} + +type stateFilter struct{ scope string } + +// NewStateFilter creates a Filter that filters all stores that are not UP. +func NewStateFilter(scope string) Filter { + return &stateFilter{scope: scope} +} + +func (f *stateFilter) Scope() string { + return f.scope +} + +func (f *stateFilter) Type() string { + return "state-filter" +} + +func (f *stateFilter) Source(opt opt.Options, store *core.StoreInfo) bool { + return store.IsTombstone() +} + +func (f *stateFilter) Target(opt opt.Options, store *core.StoreInfo) bool { + return !store.IsUp() +} + +type healthFilter struct{ scope string } + +// NewHealthFilter creates a Filter that filters all stores that are Busy or Down. +func NewHealthFilter(scope string) Filter { + return &healthFilter{scope: scope} +} + +func (f *healthFilter) Scope() string { + return f.scope +} + +func (f *healthFilter) Type() string { + return "health-filter" +} + +func (f *healthFilter) filter(opt opt.Options, store *core.StoreInfo) bool { + if store.IsBusy() { + return true + } + return store.DownTime() > opt.GetMaxStoreDownTime() +} + +func (f *healthFilter) Source(opt opt.Options, store *core.StoreInfo) bool { + return f.filter(opt, store) +} + +func (f *healthFilter) Target(opt opt.Options, store *core.StoreInfo) bool { + return f.filter(opt, store) +} + +// StoreStateFilter is used to determine whether a store can be selected as the +// source or target of the schedule based on the store's state. +type StoreStateFilter struct { + ActionScope string + // Set true if the schedule involves any transfer leader operation. + TransferLeader bool + // Set true if the schedule involves any move region operation. + MoveRegion bool +} + +// Scope returns the scheduler or the checker which the filter acts on. +func (f StoreStateFilter) Scope() string { + return f.ActionScope +} + +// Type returns the type of the Filter. +func (f StoreStateFilter) Type() string { + return "store-state-filter" +} + +// Source returns true when the store cannot be selected as the schedule +// source. +func (f StoreStateFilter) Source(opt opt.Options, store *core.StoreInfo) bool { + if store.IsTombstone() || + store.DownTime() > opt.GetMaxStoreDownTime() { + return true + } + if f.TransferLeader && (store.IsDisconnected() || store.IsBlocked()) { + return true + } + + if f.MoveRegion && f.filterMoveRegion(opt, store) { + return true + } + return false +} + +// Target returns true when the store cannot be selected as the schedule +// target. +func (f StoreStateFilter) Target(opts opt.Options, store *core.StoreInfo) bool { + if store.IsTombstone() || + store.IsOffline() || + store.DownTime() > opts.GetMaxStoreDownTime() { + return true + } + if f.TransferLeader && + (store.IsDisconnected() || + store.IsBlocked() || + store.IsBusy()) { + return true + } + + if f.MoveRegion { + if f.filterMoveRegion(opts, store) { + return true + } + } + return false +} + +func (f StoreStateFilter) filterMoveRegion(opt opt.Options, store *core.StoreInfo) bool { + if store.IsBusy() { + return true + } + + if !store.IsAvailable() { + return true + } + + return false +} diff --git a/scheduler/server/schedule/operator/operator.go b/scheduler/server/schedule/operator/operator.go new file mode 100644 index 00000000..c9ce396c --- /dev/null +++ b/scheduler/server/schedule/operator/operator.go @@ -0,0 +1,434 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package operator + +import ( + "errors" + "fmt" + "strings" + "sync/atomic" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap/log" + "go.uber.org/zap" +) + +const ( + // LeaderOperatorWaitTime is the duration that when a leader operator lives + // longer than it, the operator will be considered timeout. + LeaderOperatorWaitTime = 10 * time.Second + // RegionOperatorWaitTime is the duration that when a region operator lives + // longer than it, the operator will be considered timeout. + RegionOperatorWaitTime = 10 * time.Minute +) + +// Cluster provides an overview of a cluster's regions distribution. +type Cluster interface { + GetStore(id uint64) *core.StoreInfo + AllocPeer(storeID uint64) (*metapb.Peer, error) +} + +// OpStep describes the basic scheduling steps that can not be subdivided. +type OpStep interface { + fmt.Stringer + ConfVerChanged(region *core.RegionInfo) bool + IsFinish(region *core.RegionInfo) bool +} + +// TransferLeader is an OpStep that transfers a region's leader. +type TransferLeader struct { + FromStore, ToStore uint64 +} + +// ConfVerChanged returns true if the conf version has been changed by this step +func (tl TransferLeader) ConfVerChanged(region *core.RegionInfo) bool { + return false // transfer leader never change the conf version +} + +func (tl TransferLeader) String() string { + return fmt.Sprintf("transfer leader from store %v to store %v", tl.FromStore, tl.ToStore) +} + +// IsFinish checks if current step is finished. +func (tl TransferLeader) IsFinish(region *core.RegionInfo) bool { + return region.GetLeader().GetStoreId() == tl.ToStore +} + +// AddPeer is an OpStep that adds a region peer. +type AddPeer struct { + ToStore, PeerID uint64 +} + +// ConfVerChanged returns true if the conf version has been changed by this step +func (ap AddPeer) ConfVerChanged(region *core.RegionInfo) bool { + if p := region.GetStoreVoter(ap.ToStore); p != nil { + return p.GetId() == ap.PeerID + } + return false +} +func (ap AddPeer) String() string { + return fmt.Sprintf("add peer %v on store %v", ap.PeerID, ap.ToStore) +} + +// IsFinish checks if current step is finished. +func (ap AddPeer) IsFinish(region *core.RegionInfo) bool { + if p := region.GetStoreVoter(ap.ToStore); p != nil { + if p.GetId() != ap.PeerID { + log.Warn("obtain unexpected peer", zap.String("expect", ap.String()), zap.Uint64("obtain-voter", p.GetId())) + return false + } + return region.GetPendingVoter(p.GetId()) == nil + } + return false +} + +// RemovePeer is an OpStep that removes a region peer. +type RemovePeer struct { + FromStore uint64 +} + +// ConfVerChanged returns true if the conf version has been changed by this step +func (rp RemovePeer) ConfVerChanged(region *core.RegionInfo) bool { + return region.GetStorePeer(rp.FromStore) == nil +} + +func (rp RemovePeer) String() string { + return fmt.Sprintf("remove peer on store %v", rp.FromStore) +} + +// IsFinish checks if current step is finished. +func (rp RemovePeer) IsFinish(region *core.RegionInfo) bool { + return region.GetStorePeer(rp.FromStore) == nil +} + +// Operator contains execution steps generated by scheduler. +type Operator struct { + desc string + brief string + regionID uint64 + regionEpoch *metapb.RegionEpoch + kind OpKind + steps []OpStep + currentStep int32 + createTime time.Time + // startTime is used to record the start time of an operator which is added into running operators. + startTime time.Time + stepTime int64 + level core.PriorityLevel +} + +// NewOperator creates a new operator. +func NewOperator(desc, brief string, regionID uint64, regionEpoch *metapb.RegionEpoch, kind OpKind, steps ...OpStep) *Operator { + level := core.NormalPriority + if kind&OpAdmin != 0 { + level = core.HighPriority + } + return &Operator{ + desc: desc, + brief: brief, + regionID: regionID, + regionEpoch: regionEpoch, + kind: kind, + steps: steps, + createTime: time.Now(), + stepTime: time.Now().UnixNano(), + level: level, + } +} + +func (o *Operator) String() string { + stepStrs := make([]string, len(o.steps)) + for i := range o.steps { + stepStrs[i] = o.steps[i].String() + } + s := fmt.Sprintf("%s {%s} (kind:%s, region:%v(%v,%v), createAt:%s, startAt:%s, currentStep:%v, steps:[%s])", o.desc, o.brief, o.kind, o.regionID, o.regionEpoch.GetVersion(), o.regionEpoch.GetConfVer(), o.createTime, o.startTime, atomic.LoadInt32(&o.currentStep), strings.Join(stepStrs, ", ")) + if o.IsTimeout() { + s = s + " timeout" + } + if o.IsFinish() { + s = s + " finished" + } + return s +} + +// MarshalJSON serializes custom types to JSON. +func (o *Operator) MarshalJSON() ([]byte, error) { + return []byte(`"` + o.String() + `"`), nil +} + +// Desc returns the operator's short description. +func (o *Operator) Desc() string { + return o.desc +} + +// SetDesc sets the description for the operator. +func (o *Operator) SetDesc(desc string) { + o.desc = desc +} + +// AttachKind attaches an operator kind for the operator. +func (o *Operator) AttachKind(kind OpKind) { + o.kind |= kind +} + +// RegionID returns the region that operator is targeted. +func (o *Operator) RegionID() uint64 { + return o.regionID +} + +// RegionEpoch returns the region's epoch that is attached to the operator. +func (o *Operator) RegionEpoch() *metapb.RegionEpoch { + return o.regionEpoch +} + +// Kind returns operator's kind. +func (o *Operator) Kind() OpKind { + return o.kind +} + +// ElapsedTime returns duration since it was created. +func (o *Operator) ElapsedTime() time.Duration { + return time.Since(o.createTime) +} + +// RunningTime returns duration since it was promoted. +func (o *Operator) RunningTime() time.Duration { + return time.Since(o.startTime) +} + +// SetStartTime sets the start time for operator. +func (o *Operator) SetStartTime(t time.Time) { + o.startTime = t +} + +// GetStartTime ges the start time for operator. +func (o *Operator) GetStartTime() time.Time { + return o.startTime +} + +// Len returns the operator's steps count. +func (o *Operator) Len() int { + return len(o.steps) +} + +// Step returns the i-th step. +func (o *Operator) Step(i int) OpStep { + if i >= 0 && i < len(o.steps) { + return o.steps[i] + } + return nil +} + +// Check checks if current step is finished, returns next step to take action. +// It's safe to be called by multiple goroutine concurrently. +func (o *Operator) Check(region *core.RegionInfo) OpStep { + for step := atomic.LoadInt32(&o.currentStep); int(step) < len(o.steps); step++ { + if o.steps[int(step)].IsFinish(region) { + atomic.StoreInt32(&o.currentStep, step+1) + atomic.StoreInt64(&o.stepTime, time.Now().UnixNano()) + } else { + return o.steps[int(step)] + } + } + return nil +} + +// ConfVerChanged returns the number of confver has consumed by steps +func (o *Operator) ConfVerChanged(region *core.RegionInfo) int { + total := 0 + current := atomic.LoadInt32(&o.currentStep) + if current == int32(len(o.steps)) { + current-- + } + // including current step, it may has taken effects in this heartbeat + for _, step := range o.steps[0 : current+1] { + if step.ConfVerChanged(region) { + total++ + } + } + return total +} + +// SetPriorityLevel sets the priority level for operator. +func (o *Operator) SetPriorityLevel(level core.PriorityLevel) { + o.level = level +} + +// GetPriorityLevel gets the priority level. +func (o *Operator) GetPriorityLevel() core.PriorityLevel { + return o.level +} + +// IsFinish checks if all steps are finished. +func (o *Operator) IsFinish() bool { + return atomic.LoadInt32(&o.currentStep) >= int32(len(o.steps)) +} + +// IsTimeout checks the operator's create time and determines if it is timeout. +func (o *Operator) IsTimeout() bool { + var timeout bool + if o.IsFinish() { + return false + } + if o.startTime.IsZero() { + return false + } + if o.kind&OpRegion != 0 { + timeout = time.Since(o.startTime) > RegionOperatorWaitTime + } else { + timeout = time.Since(o.startTime) > LeaderOperatorWaitTime + } + if timeout { + return true + } + return false +} + +// CreateAddPeerOperator creates an operator that adds a new peer. +func CreateAddPeerOperator(desc string, region *core.RegionInfo, peerID uint64, toStoreID uint64, kind OpKind) *Operator { + steps := CreateAddPeerSteps(toStoreID, peerID) + brief := fmt.Sprintf("add peer: store %v", toStoreID) + return NewOperator(desc, brief, region.GetID(), region.GetRegionEpoch(), kind|OpRegion, steps...) +} + +// CreateRemovePeerOperator creates an operator that removes a peer from region. +func CreateRemovePeerOperator(desc string, cluster Cluster, kind OpKind, region *core.RegionInfo, storeID uint64) (*Operator, error) { + removeKind, steps, err := removePeerSteps(cluster, region, storeID, getRegionFollowerIDs(region)) + if err != nil { + return nil, err + } + brief := fmt.Sprintf("rm peer: store %v", storeID) + return NewOperator(desc, brief, region.GetID(), region.GetRegionEpoch(), removeKind|kind, steps...), nil +} + +// CreateAddPeerSteps creates an OpStep list that add a new peer. +func CreateAddPeerSteps(newStore uint64, peerID uint64) []OpStep { + st := []OpStep{ + AddPeer{ToStore: newStore, PeerID: peerID}, + } + return st +} + +// CreateTransferLeaderOperator creates an operator that transfers the leader from a source store to a target store. +func CreateTransferLeaderOperator(desc string, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, kind OpKind) *Operator { + step := TransferLeader{FromStore: sourceStoreID, ToStore: targetStoreID} + brief := fmt.Sprintf("transfer leader: store %v to %v", sourceStoreID, targetStoreID) + return NewOperator(desc, brief, region.GetID(), region.GetRegionEpoch(), kind|OpLeader, step) +} + +// interleaveStepGroups interleaves two slice of step groups. For example: +// +// a = [[opA1, opA2], [opA3], [opA4, opA5, opA6]] +// b = [[opB1], [opB2], [opB3, opB4], [opB5, opB6]] +// c = interleaveStepGroups(a, b, 0) +// c == [opA1, opA2, opB1, opA3, opB2, opA4, opA5, opA6, opB3, opB4, opB5, opB6] +// +// sizeHint is a hint for the capacity of returned slice. +func interleaveStepGroups(a, b [][]OpStep, sizeHint int) []OpStep { + steps := make([]OpStep, 0, sizeHint) + i, j := 0, 0 + for ; i < len(a) && j < len(b); i, j = i+1, j+1 { + steps = append(steps, a[i]...) + steps = append(steps, b[j]...) + } + for ; i < len(a); i++ { + steps = append(steps, a[i]...) + } + for ; j < len(b); j++ { + steps = append(steps, b[j]...) + } + return steps +} + +// CreateMovePeerOperator creates an operator that replaces an old peer with a new peer. +func CreateMovePeerOperator(desc string, cluster Cluster, region *core.RegionInfo, kind OpKind, oldStore, newStore uint64, peerID uint64) (*Operator, error) { + removeKind, steps, err := removePeerSteps(cluster, region, oldStore, append(getRegionFollowerIDs(region), newStore)) + if err != nil { + return nil, err + } + st := CreateAddPeerSteps(newStore, peerID) + steps = append(st, steps...) + brief := fmt.Sprintf("mv peer: store %v to %v", oldStore, newStore) + return NewOperator(desc, brief, region.GetID(), region.GetRegionEpoch(), removeKind|kind|OpRegion, steps...), nil +} + +// CreateOfflinePeerOperator creates an operator that replaces an old peer with a new peer when offline a store. +func CreateOfflinePeerOperator(desc string, cluster Cluster, region *core.RegionInfo, kind OpKind, oldStore, newStore uint64, peerID uint64) (*Operator, error) { + k, steps, err := transferLeaderStep(cluster, region, oldStore, append(getRegionFollowerIDs(region))) + if err != nil { + return nil, err + } + kind |= k + st := CreateAddPeerSteps(newStore, peerID) + steps = append(steps, st...) + steps = append(steps, RemovePeer{FromStore: oldStore}) + brief := fmt.Sprintf("mv peer: store %v to %v", oldStore, newStore) + return NewOperator(desc, brief, region.GetID(), region.GetRegionEpoch(), kind|OpRegion, steps...), nil +} + +func getRegionFollowerIDs(region *core.RegionInfo) []uint64 { + var ids []uint64 + for id := range region.GetFollowers() { + ids = append(ids, id) + } + return ids +} + +// removePeerSteps returns the steps to safely remove a peer. It prevents removing leader by transfer its leadership first. +func removePeerSteps(cluster Cluster, region *core.RegionInfo, storeID uint64, followerIDs []uint64) (kind OpKind, steps []OpStep, err error) { + kind, steps, err = transferLeaderStep(cluster, region, storeID, followerIDs) + if err != nil { + return + } + + steps = append(steps, RemovePeer{FromStore: storeID}) + kind |= OpRegion + return +} + +func transferLeaderStep(cluster Cluster, region *core.RegionInfo, storeID uint64, followerIDs []uint64) (kind OpKind, steps []OpStep, err error) { + if region.GetLeader() != nil && region.GetLeader().GetStoreId() == storeID { + kind, steps, err = transferLeaderToSuitableSteps(cluster, storeID, followerIDs) + if err != nil { + log.Debug("failed to create transfer leader step", zap.Uint64("region-id", region.GetID()), zap.Error(err)) + return + } + } + return +} + +// findAvailableStore finds the first available store. +func findAvailableStore(cluster Cluster, storeIDs []uint64) (int, uint64) { + for i, id := range storeIDs { + store := cluster.GetStore(id) + if store != nil { + return i, id + } else { + log.Debug("nil store", zap.Uint64("store-id", id)) + } + } + return -1, 0 +} + +// transferLeaderToSuitableSteps returns the first suitable store to become region leader. +// Returns an error if there is no suitable store. +func transferLeaderToSuitableSteps(cluster Cluster, leaderID uint64, storeIDs []uint64) (OpKind, []OpStep, error) { + _, id := findAvailableStore(cluster, storeIDs) + if id != 0 { + return OpLeader, []OpStep{TransferLeader{FromStore: leaderID, ToStore: id}}, nil + } + return 0, nil, errors.New("no suitable store to become region leader") +} diff --git a/scheduler/server/schedule/operator/operator_kind.go b/scheduler/server/schedule/operator/operator_kind.go new file mode 100644 index 00000000..8cd8370c --- /dev/null +++ b/scheduler/server/schedule/operator/operator_kind.go @@ -0,0 +1,85 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package operator + +import ( + "strings" + + "github.com/pkg/errors" +) + +// OpKind is a bit field to identify operator types. +type OpKind uint32 + +// Flags for operators. +const ( + OpLeader OpKind = 1 << iota // Include leader transfer. + OpRegion // Include peer movement. + OpAdmin // Initiated by admin. + OpAdjacent // Initiated by adjacent region scheduler. + OpReplica // Initiated by replica checkers. + OpBalance // Initiated by balancers. + OpMerge // Initiated by merge checkers or merge schedulers. + OpRange // Initiated by range scheduler. + opMax +) + +var flagToName = map[OpKind]string{ + OpLeader: "leader", + OpRegion: "region", + OpAdmin: "admin", + OpAdjacent: "adjacent", + OpReplica: "replica", + OpBalance: "balance", + OpMerge: "merge", + OpRange: "range", +} + +var nameToFlag = map[string]OpKind{ + "leader": OpLeader, + "region": OpRegion, + "admin": OpAdmin, + "adjacent": OpAdjacent, + "replica": OpReplica, + "balance": OpBalance, + "merge": OpMerge, + "range": OpRange, +} + +func (k OpKind) String() string { + var flagNames []string + for flag := OpKind(1); flag < opMax; flag <<= 1 { + if k&flag != 0 { + flagNames = append(flagNames, flagToName[flag]) + } + } + if len(flagNames) == 0 { + return "unknown" + } + return strings.Join(flagNames, ",") +} + +// ParseOperatorKind converts string (flag name list concat by ',') to OpKind. +func ParseOperatorKind(str string) (OpKind, error) { + var k OpKind + for _, flagName := range strings.Split(str, ",") { + flag, ok := nameToFlag[flagName] + if !ok { + return 0, errors.Errorf("unknown flag name: %s", flagName) + } + k |= flag + } + return k, nil + +} diff --git a/scheduler/server/schedule/operator/operator_test.go b/scheduler/server/schedule/operator/operator_test.go new file mode 100644 index 00000000..aaf6042c --- /dev/null +++ b/scheduler/server/schedule/operator/operator_test.go @@ -0,0 +1,177 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package operator + +import ( + "encoding/json" + "sync/atomic" + "testing" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockcluster" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockoption" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + . "github.com/pingcap/check" +) + +func Test(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testOperatorSuite{}) + +type testOperatorSuite struct { + cluster *mockcluster.Cluster +} + +func (s *testOperatorSuite) SetUpTest(c *C) { + cfg := mockoption.NewScheduleOptions() + cfg.MaxMergeRegionSize = 2 + cfg.MaxMergeRegionKeys = 2 + s.cluster = mockcluster.NewCluster(cfg) + stores := []uint64{1, 2, 3, 4, 5, 6, 7, 8} + for _, storeID := range stores { + s.cluster.PutStore(core.NewStoreInfo(&metapb.Store{Id: storeID})) + } +} + +func (s *testOperatorSuite) newTestRegion(regionID uint64, leaderPeer uint64, peers ...[2]uint64) *core.RegionInfo { + var ( + region metapb.Region + leader *metapb.Peer + ) + region.Id = regionID + for i := range peers { + peer := &metapb.Peer{ + Id: peers[i][1], + StoreId: peers[i][0], + } + region.Peers = append(region.Peers, peer) + if peer.GetId() == leaderPeer { + leader = peer + } + } + regionInfo := core.NewRegionInfo(®ion, leader, core.SetApproximateSize(50)) + return regionInfo +} + +func genAddPeers(store uint64, groups [][]uint64) [][]OpStep { + ret := make([][]OpStep, len(groups)) + for i, grp := range groups { + steps := make([]OpStep, len(grp)) + for j, id := range grp { + steps[j] = AddPeer{ToStore: store, PeerID: id} + } + ret[i] = steps + } + return ret +} + +func (s *testOperatorSuite) TestInterleaveStepGroups(c *C) { + a := genAddPeers(1, [][]uint64{{1, 2}, {3}, {4, 5, 6}}) + b := genAddPeers(1, [][]uint64{{11}, {12}, {13, 14}, {15, 16}}) + ans := genAddPeers(1, [][]uint64{{1, 2, 11, 3, 12, 4, 5, 6, 13, 14, 15, 16}}) + res := interleaveStepGroups(a, b, 12) + c.Assert(res, DeepEquals, ans[0]) +} + +func (s *testOperatorSuite) TestFindAvailableStore(c *C) { + stores := []uint64{8, 7, 3, 4, 7, 3, 1, 5, 6} + i, id := findAvailableStore(s.cluster, stores) + c.Assert(i, Equals, 0) + c.Assert(id, Equals, uint64(8)) + i, id = findAvailableStore(s.cluster, stores[2:]) + c.Assert(i, Equals, 0) + c.Assert(id, Equals, uint64(3)) +} + +func (s *testOperatorSuite) TestOperatorStep(c *C) { + region := s.newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) + c.Assert(TransferLeader{FromStore: 1, ToStore: 2}.IsFinish(region), IsFalse) + c.Assert(TransferLeader{FromStore: 2, ToStore: 1}.IsFinish(region), IsTrue) + c.Assert(AddPeer{ToStore: 3, PeerID: 3}.IsFinish(region), IsFalse) + c.Assert(AddPeer{ToStore: 1, PeerID: 1}.IsFinish(region), IsTrue) + c.Assert(RemovePeer{FromStore: 1}.IsFinish(region), IsFalse) + c.Assert(RemovePeer{FromStore: 3}.IsFinish(region), IsTrue) +} + +func (s *testOperatorSuite) newTestOperator(regionID uint64, kind OpKind, steps ...OpStep) *Operator { + return NewOperator("test", "test", regionID, &metapb.RegionEpoch{}, OpAdmin|kind, steps...) +} + +func (s *testOperatorSuite) checkSteps(c *C, op *Operator, steps []OpStep) { + c.Assert(op.Len(), Equals, len(steps)) + for i := range steps { + c.Assert(op.Step(i), Equals, steps[i]) + } +} + +func (s *testOperatorSuite) TestOperator(c *C) { + region := s.newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) + // addPeer1, transferLeader1, removePeer3 + steps := []OpStep{ + AddPeer{ToStore: 1, PeerID: 1}, + TransferLeader{FromStore: 3, ToStore: 1}, + RemovePeer{FromStore: 3}, + } + op := s.newTestOperator(1, OpLeader|OpRegion, steps...) + c.Assert(op.GetPriorityLevel(), Equals, core.HighPriority) + s.checkSteps(c, op, steps) + c.Assert(op.Check(region), IsNil) + c.Assert(op.IsFinish(), IsTrue) + op.startTime = time.Now() + op.startTime = op.startTime.Add(-RegionOperatorWaitTime - time.Second) + c.Assert(op.IsTimeout(), IsFalse) + + // addPeer1, transferLeader1, removePeer2 + steps = []OpStep{ + AddPeer{ToStore: 1, PeerID: 1}, + TransferLeader{FromStore: 2, ToStore: 1}, + RemovePeer{FromStore: 2}, + } + op = s.newTestOperator(1, OpLeader|OpRegion, steps...) + s.checkSteps(c, op, steps) + c.Assert(op.Check(region), Equals, RemovePeer{FromStore: 2}) + c.Assert(atomic.LoadInt32(&op.currentStep), Equals, int32(2)) + op.startTime = time.Now() + c.Assert(op.IsTimeout(), IsFalse) + op.startTime = op.startTime.Add(-LeaderOperatorWaitTime - time.Second) + c.Assert(op.IsTimeout(), IsFalse) + op.startTime = op.startTime.Add(-RegionOperatorWaitTime - time.Second) + c.Assert(op.IsTimeout(), IsTrue) + res, err := json.Marshal(op) + c.Assert(err, IsNil) + c.Assert(len(res), Equals, len(op.String())+2) + + // check short timeout for transfer leader only operators. + steps = []OpStep{TransferLeader{FromStore: 2, ToStore: 1}} + op = s.newTestOperator(1, OpLeader, steps...) + op.startTime = time.Now() + c.Assert(op.IsTimeout(), IsFalse) + op.startTime = op.startTime.Add(-LeaderOperatorWaitTime - time.Second) + c.Assert(op.IsTimeout(), IsTrue) +} + +func (s *testOperatorSuite) TestOperatorKind(c *C) { + c.Assert((OpLeader | OpReplica).String(), Equals, "leader,replica") + c.Assert(OpKind(0).String(), Equals, "unknown") + k, err := ParseOperatorKind("balance,region,leader") + c.Assert(err, IsNil) + c.Assert(k, Equals, OpBalance|OpRegion|OpLeader) + _, err = ParseOperatorKind("leader,region") + c.Assert(err, IsNil) + _, err = ParseOperatorKind("foobar") + c.Assert(err, NotNil) +} diff --git a/scheduler/server/schedule/operator_controller.go b/scheduler/server/schedule/operator_controller.go new file mode 100644 index 00000000..f3b749c8 --- /dev/null +++ b/scheduler/server/schedule/operator_controller.go @@ -0,0 +1,363 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedule + +import ( + "container/heap" + "context" + "fmt" + "sync" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/eraftpb" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/cache" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/operator" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/opt" + "github.com/pingcap/log" + "go.uber.org/zap" +) + +// The source of dispatched region. +const ( + DispatchFromHeartBeat = "heartbeat" + DispatchFromNotifierQueue = "active push" + DispatchFromCreate = "create" +) + +var ( + slowNotifyInterval = 5 * time.Second + fastNotifyInterval = 2 * time.Second +) + +// HeartbeatStreams is an interface of async region heartbeat. +type HeartbeatStreams interface { + SendMsg(region *core.RegionInfo, msg *schedulerpb.RegionHeartbeatResponse) +} + +// OperatorController is used to limit the speed of scheduling. +type OperatorController struct { + sync.RWMutex + ctx context.Context + cluster opt.Cluster + operators map[uint64]*operator.Operator + hbStreams HeartbeatStreams + counts map[operator.OpKind]uint64 + opRecords *OperatorRecords + opNotifierQueue operatorQueue +} + +// NewOperatorController creates a OperatorController. +func NewOperatorController(ctx context.Context, cluster opt.Cluster, hbStreams HeartbeatStreams) *OperatorController { + return &OperatorController{ + ctx: ctx, + cluster: cluster, + operators: make(map[uint64]*operator.Operator), + hbStreams: hbStreams, + counts: make(map[operator.OpKind]uint64), + opRecords: NewOperatorRecords(ctx), + opNotifierQueue: make(operatorQueue, 0), + } +} + +// Ctx returns a context which will be canceled once RaftCluster is stopped. +// For now, it is only used to control the lifetime of TTL cache in schedulers. +func (oc *OperatorController) Ctx() context.Context { + return oc.ctx +} + +// Dispatch is used to dispatch the operator of a region. +func (oc *OperatorController) Dispatch(region *core.RegionInfo, source string) { + // Check existed operator. + if op := oc.GetOperator(region.GetID()); op != nil { + timeout := op.IsTimeout() + if step := op.Check(region); step != nil && !timeout { + // When the "source" is heartbeat, the region may have a newer + // confver than the region that the operator holds. In this case, + // the operator is stale, and will not be executed even we would + // have sent it to TiKV servers. Here, we just cancel it. + origin := op.RegionEpoch() + latest := region.GetRegionEpoch() + changes := latest.GetConfVer() - origin.GetConfVer() + if source == DispatchFromHeartBeat && + changes > uint64(op.ConfVerChanged(region)) { + + if oc.RemoveOperator(op) { + log.Info("stale operator", zap.Uint64("region-id", region.GetID()), zap.Duration("takes", op.RunningTime()), + zap.Reflect("operator", op), zap.Uint64("diff", changes)) + oc.opRecords.Put(op, schedulerpb.OperatorStatus_CANCEL) + } + + return + } + + oc.SendScheduleCommand(region, step, source) + return + } + if op.IsFinish() && oc.RemoveOperator(op) { + log.Info("operator finish", zap.Uint64("region-id", region.GetID()), zap.Duration("takes", op.RunningTime()), zap.Reflect("operator", op)) + oc.opRecords.Put(op, schedulerpb.OperatorStatus_SUCCESS) + } else if timeout && oc.RemoveOperator(op) { + log.Info("operator timeout", zap.Uint64("region-id", region.GetID()), zap.Duration("takes", op.RunningTime()), zap.Reflect("operator", op)) + oc.opRecords.Put(op, schedulerpb.OperatorStatus_TIMEOUT) + } + } +} + +func (oc *OperatorController) getNextPushOperatorTime(step operator.OpStep, now time.Time) time.Time { + nextTime := slowNotifyInterval + switch step.(type) { + case operator.TransferLeader: + nextTime = fastNotifyInterval + } + return now.Add(nextTime) +} + +// AddOperator adds operators to the running operators. +func (oc *OperatorController) AddOperator(ops ...*operator.Operator) bool { + oc.Lock() + defer oc.Unlock() + + if !oc.checkAddOperator(ops...) { + for _, op := range ops { + oc.opRecords.Put(op, schedulerpb.OperatorStatus_CANCEL) + } + return false + } + for _, op := range ops { + oc.addOperatorLocked(op) + } + return true +} + +// checkAddOperator checks if the operator can be added. +// There are several situations that cannot be added: +// - There is no such region in the cluster +// - The epoch of the operator and the epoch of the corresponding region are no longer consistent. +// - The region already has a higher priority or same priority operator. +func (oc *OperatorController) checkAddOperator(ops ...*operator.Operator) bool { + for _, op := range ops { + region := oc.cluster.GetRegion(op.RegionID()) + if region == nil { + log.Debug("region not found, cancel add operator", zap.Uint64("region-id", op.RegionID())) + return false + } + if region.GetRegionEpoch().GetVersion() != op.RegionEpoch().GetVersion() || region.GetRegionEpoch().GetConfVer() != op.RegionEpoch().GetConfVer() { + log.Debug("region epoch not match, cancel add operator", zap.Uint64("region-id", op.RegionID()), zap.Reflect("old", region.GetRegionEpoch()), zap.Reflect("new", op.RegionEpoch())) + return false + } + if old := oc.operators[op.RegionID()]; old != nil && !isHigherPriorityOperator(op, old) { + log.Debug("already have operator, cancel add operator", zap.Uint64("region-id", op.RegionID()), zap.Reflect("old", old)) + return false + } + } + return true +} + +func isHigherPriorityOperator(new, old *operator.Operator) bool { + return new.GetPriorityLevel() > old.GetPriorityLevel() +} + +func (oc *OperatorController) addOperatorLocked(op *operator.Operator) bool { + regionID := op.RegionID() + + log.Info("add operator", zap.Uint64("region-id", regionID), zap.Reflect("operator", op)) + + // If there is an old operator, replace it. The priority should be checked + // already. + if old, ok := oc.operators[regionID]; ok { + _ = oc.removeOperatorLocked(old) + log.Info("replace old operator", zap.Uint64("region-id", regionID), zap.Duration("takes", old.RunningTime()), zap.Reflect("operator", old)) + oc.opRecords.Put(old, schedulerpb.OperatorStatus_REPLACE) + } + + oc.operators[regionID] = op + op.SetStartTime(time.Now()) + oc.updateCounts(oc.operators) + + var step operator.OpStep + if region := oc.cluster.GetRegion(op.RegionID()); region != nil { + if step = op.Check(region); step != nil { + oc.SendScheduleCommand(region, step, DispatchFromCreate) + } + } + + heap.Push(&oc.opNotifierQueue, &operatorWithTime{op: op, time: oc.getNextPushOperatorTime(step, time.Now())}) + return true +} + +// RemoveOperator removes a operator from the running operators. +func (oc *OperatorController) RemoveOperator(op *operator.Operator) (found bool) { + oc.Lock() + defer oc.Unlock() + return oc.removeOperatorLocked(op) +} + +// GetOperatorStatus gets the operator and its status with the specify id. +func (oc *OperatorController) GetOperatorStatus(id uint64) *OperatorWithStatus { + oc.Lock() + defer oc.Unlock() + if op, ok := oc.operators[id]; ok { + return &OperatorWithStatus{ + Op: op, + Status: schedulerpb.OperatorStatus_RUNNING, + } + } + return oc.opRecords.Get(id) +} + +func (oc *OperatorController) removeOperatorLocked(op *operator.Operator) bool { + regionID := op.RegionID() + if cur := oc.operators[regionID]; cur == op { + delete(oc.operators, regionID) + oc.updateCounts(oc.operators) + return true + } + return false +} + +// GetOperator gets a operator from the given region. +func (oc *OperatorController) GetOperator(regionID uint64) *operator.Operator { + oc.RLock() + defer oc.RUnlock() + return oc.operators[regionID] +} + +// GetOperators gets operators from the running operators. +func (oc *OperatorController) GetOperators() []*operator.Operator { + oc.RLock() + defer oc.RUnlock() + + operators := make([]*operator.Operator, 0, len(oc.operators)) + for _, op := range oc.operators { + operators = append(operators, op) + } + + return operators +} + +// SendScheduleCommand sends a command to the region. +func (oc *OperatorController) SendScheduleCommand(region *core.RegionInfo, step operator.OpStep, source string) { + log.Info("send schedule command", zap.Uint64("region-id", region.GetID()), zap.Stringer("step", step), zap.String("source", source)) + switch st := step.(type) { + case operator.TransferLeader: + cmd := &schedulerpb.RegionHeartbeatResponse{ + TransferLeader: &schedulerpb.TransferLeader{ + Peer: region.GetStorePeer(st.ToStore), + }, + } + oc.hbStreams.SendMsg(region, cmd) + case operator.AddPeer: + if region.GetStorePeer(st.ToStore) != nil { + // The newly added peer is pending. + return + } + cmd := &schedulerpb.RegionHeartbeatResponse{ + ChangePeer: &schedulerpb.ChangePeer{ + ChangeType: eraftpb.ConfChangeType_AddNode, + Peer: &metapb.Peer{ + Id: st.PeerID, + StoreId: st.ToStore, + }, + }, + } + oc.hbStreams.SendMsg(region, cmd) + case operator.RemovePeer: + cmd := &schedulerpb.RegionHeartbeatResponse{ + ChangePeer: &schedulerpb.ChangePeer{ + ChangeType: eraftpb.ConfChangeType_RemoveNode, + Peer: region.GetStorePeer(st.FromStore), + }, + } + oc.hbStreams.SendMsg(region, cmd) + default: + log.Error("unknown operator step", zap.Reflect("step", step)) + } +} + +// updateCounts updates resource counts using current pending operators. +func (oc *OperatorController) updateCounts(operators map[uint64]*operator.Operator) { + for k := range oc.counts { + delete(oc.counts, k) + } + for _, op := range operators { + oc.counts[op.Kind()]++ + } +} + +// OperatorCount gets the count of operators filtered by mask. +func (oc *OperatorController) OperatorCount(mask operator.OpKind) uint64 { + oc.RLock() + defer oc.RUnlock() + var total uint64 + for k, count := range oc.counts { + if k&mask != 0 { + total += count + } + } + return total +} + +// SetOperator is only used for test. +func (oc *OperatorController) SetOperator(op *operator.Operator) { + oc.Lock() + defer oc.Unlock() + oc.operators[op.RegionID()] = op +} + +// OperatorWithStatus records the operator and its status. +type OperatorWithStatus struct { + Op *operator.Operator + Status schedulerpb.OperatorStatus +} + +// MarshalJSON returns the status of operator as a JSON string +func (o *OperatorWithStatus) MarshalJSON() ([]byte, error) { + return []byte(`"` + fmt.Sprintf("status: %s, operator: %s", o.Status.String(), o.Op.String()) + `"`), nil +} + +// OperatorRecords remains the operator and its status for a while. +type OperatorRecords struct { + ttl *cache.TTL +} + +const operatorStatusRemainTime = 10 * time.Minute + +// NewOperatorRecords returns a OperatorRecords. +func NewOperatorRecords(ctx context.Context) *OperatorRecords { + return &OperatorRecords{ + ttl: cache.NewTTL(ctx, time.Minute, operatorStatusRemainTime), + } +} + +// Get gets the operator and its status. +func (o *OperatorRecords) Get(id uint64) *OperatorWithStatus { + v, exist := o.ttl.Get(id) + if !exist { + return nil + } + return v.(*OperatorWithStatus) +} + +// Put puts the operator and its status. +func (o *OperatorRecords) Put(op *operator.Operator, status schedulerpb.OperatorStatus) { + id := op.RegionID() + record := &OperatorWithStatus{ + Op: op, + Status: status, + } + o.ttl.Put(id, record) +} diff --git a/scheduler/server/schedule/operator_controller_test.go b/scheduler/server/schedule/operator_controller_test.go new file mode 100644 index 00000000..3a27b57c --- /dev/null +++ b/scheduler/server/schedule/operator_controller_test.go @@ -0,0 +1,134 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedule + +import ( + "context" + "testing" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockcluster" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockhbstream" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockoption" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/operator" + . "github.com/pingcap/check" +) + +func Test(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testOperatorControllerSuite{}) + +type testOperatorControllerSuite struct { + ctx context.Context + cancel context.CancelFunc +} + +func (t *testOperatorControllerSuite) SetUpSuite(c *C) { + t.ctx, t.cancel = context.WithCancel(context.Background()) +} + +func (t *testOperatorControllerSuite) TearDownSuite(c *C) { + t.cancel() +} + +func (t *testOperatorControllerSuite) TestOperatorStatus(c *C) { + opt := mockoption.NewScheduleOptions() + tc := mockcluster.NewCluster(opt) + oc := NewOperatorController(t.ctx, tc, mockhbstream.NewHeartbeatStream()) + tc.AddLeaderStore(1, 2) + tc.AddLeaderStore(2, 0) + tc.AddLeaderRegion(1, 1, 2) + tc.AddLeaderRegion(2, 1, 2) + steps := []operator.OpStep{ + operator.RemovePeer{FromStore: 2}, + operator.AddPeer{ToStore: 2, PeerID: 4}, + } + op1 := operator.NewOperator("test", "test", 1, &metapb.RegionEpoch{}, operator.OpRegion, steps...) + op2 := operator.NewOperator("test", "test", 2, &metapb.RegionEpoch{}, operator.OpRegion, steps...) + region1 := tc.GetRegion(1) + region2 := tc.GetRegion(2) + op1.SetStartTime(time.Now()) + oc.SetOperator(op1) + op2.SetStartTime(time.Now()) + oc.SetOperator(op2) + c.Assert(oc.GetOperatorStatus(1).Status, Equals, schedulerpb.OperatorStatus_RUNNING) + c.Assert(oc.GetOperatorStatus(2).Status, Equals, schedulerpb.OperatorStatus_RUNNING) + op1.SetStartTime(time.Now().Add(-10 * time.Minute)) + region2 = ApplyOperatorStep(region2, op2) + tc.PutRegion(region2) + oc.Dispatch(region1, "test") + oc.Dispatch(region2, "test") + c.Assert(oc.GetOperatorStatus(1).Status, Equals, schedulerpb.OperatorStatus_TIMEOUT) + c.Assert(oc.GetOperatorStatus(2).Status, Equals, schedulerpb.OperatorStatus_RUNNING) + ApplyOperator(tc, op2) + oc.Dispatch(region2, "test") + c.Assert(oc.GetOperatorStatus(2).Status, Equals, schedulerpb.OperatorStatus_SUCCESS) +} + +// #1652 +func (t *testOperatorControllerSuite) TestDispatchOutdatedRegion(c *C) { + cluster := mockcluster.NewCluster(mockoption.NewScheduleOptions()) + stream := mockhbstream.NewHeartbeatStreams(cluster.ID) + controller := NewOperatorController(t.ctx, cluster, stream) + + cluster.AddLeaderStore(1, 2) + cluster.AddLeaderStore(2, 0) + cluster.AddLeaderRegion(1, 1, 2) + steps := []operator.OpStep{ + operator.TransferLeader{FromStore: 1, ToStore: 2}, + operator.RemovePeer{FromStore: 1}, + } + + op := operator.NewOperator("test", "test", 1, + &metapb.RegionEpoch{ConfVer: 0, Version: 0}, + operator.OpRegion, steps...) + c.Assert(controller.AddOperator(op), Equals, true) + c.Assert(len(stream.MsgCh()), Equals, 1) + + // report the result of transferring leader + region := cluster.MockRegionInfo(1, 2, []uint64{1, 2}, + &metapb.RegionEpoch{ConfVer: 0, Version: 0}) + + controller.Dispatch(region, DispatchFromHeartBeat) + c.Assert(op.ConfVerChanged(region), Equals, 0) + c.Assert(len(stream.MsgCh()), Equals, 2) + + // report the result of removing peer + region = cluster.MockRegionInfo(1, 2, []uint64{2}, + &metapb.RegionEpoch{ConfVer: 0, Version: 0}) + + controller.Dispatch(region, DispatchFromHeartBeat) + c.Assert(op.ConfVerChanged(region), Equals, 1) + c.Assert(len(stream.MsgCh()), Equals, 2) + + // add and disaptch op again, the op should be stale + op = operator.NewOperator("test", "test", 1, + &metapb.RegionEpoch{ConfVer: 0, Version: 0}, + operator.OpRegion, steps...) + c.Assert(controller.AddOperator(op), Equals, true) + c.Assert(op.ConfVerChanged(region), Equals, 0) + c.Assert(len(stream.MsgCh()), Equals, 3) + + // report region with an abnormal confver + region = cluster.MockRegionInfo(1, 1, []uint64{1, 2}, + &metapb.RegionEpoch{ConfVer: 1, Version: 0}) + controller.Dispatch(region, DispatchFromHeartBeat) + c.Assert(op.ConfVerChanged(region), Equals, 0) + // no new step + c.Assert(len(stream.MsgCh()), Equals, 3) +} diff --git a/scheduler/server/schedule/operator_queue.go b/scheduler/server/schedule/operator_queue.go new file mode 100644 index 00000000..3d1aefb3 --- /dev/null +++ b/scheduler/server/schedule/operator_queue.go @@ -0,0 +1,53 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedule + +import ( + "time" + + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/operator" +) + +type operatorWithTime struct { + op *operator.Operator + time time.Time +} + +type operatorQueue []*operatorWithTime + +func (opn operatorQueue) Len() int { return len(opn) } + +func (opn operatorQueue) Less(i, j int) bool { + return opn[i].time.Before(opn[j].time) +} + +func (opn operatorQueue) Swap(i, j int) { + opn[i], opn[j] = opn[j], opn[i] +} + +func (opn *operatorQueue) Push(x interface{}) { + item := x.(*operatorWithTime) + *opn = append(*opn, item) +} + +func (opn *operatorQueue) Pop() interface{} { + old := *opn + n := len(old) + if n == 0 { + return nil + } + item := old[n-1] + *opn = old[0 : n-1] + return item +} diff --git a/scheduler/server/schedule/opt/opts.go b/scheduler/server/schedule/opt/opts.go new file mode 100644 index 00000000..1f705f24 --- /dev/null +++ b/scheduler/server/schedule/opt/opts.go @@ -0,0 +1,46 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package opt + +import ( + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" +) + +// Options for schedulers. +type Options interface { + GetLeaderScheduleLimit() uint64 + GetRegionScheduleLimit() uint64 + GetReplicaScheduleLimit() uint64 + + GetMaxStoreDownTime() time.Duration + + GetMaxReplicas() int +} + +// Cluster provides an overview of a cluster's regions distribution. +// TODO: This interface should be moved to a better place. +type Cluster interface { + core.RegionSetInformer + core.StoreSetInformer + core.StoreSetController + + Options + + // TODO: it should be removed. Schedulers don't need to know anything + // about peers. + AllocPeer(storeID uint64) (*metapb.Peer, error) +} diff --git a/scheduler/server/schedule/scheduler.go b/scheduler/server/schedule/scheduler.go new file mode 100644 index 00000000..90322f5c --- /dev/null +++ b/scheduler/server/schedule/scheduler.go @@ -0,0 +1,139 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedule + +import ( + "encoding/json" + "net/http" + "strings" + "time" + + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/operator" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/opt" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +// Scheduler is an interface to schedule resources. +type Scheduler interface { + http.Handler + GetName() string + // GetType should in accordance with the name passing to schedule.RegisterScheduler() + GetType() string + EncodeConfig() ([]byte, error) + GetMinInterval() time.Duration + GetNextInterval(interval time.Duration) time.Duration + Prepare(cluster opt.Cluster) error + Cleanup(cluster opt.Cluster) + Schedule(cluster opt.Cluster) *operator.Operator + IsScheduleAllowed(cluster opt.Cluster) bool +} + +// EncodeConfig encode the custom config for each scheduler. +func EncodeConfig(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// DecodeConfig decode the custom config for each scheduler. +func DecodeConfig(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// ConfigDecoder used to decode the config. +type ConfigDecoder func(v interface{}) error + +// ConfigSliceDecoderBuilder used to build slice decoder of the config. +type ConfigSliceDecoderBuilder func([]string) ConfigDecoder + +//ConfigJSONDecoder used to build a json decoder of the config. +func ConfigJSONDecoder(data []byte) ConfigDecoder { + return func(v interface{}) error { + return DecodeConfig(data, v) + } +} + +// ConfigSliceDecoder the default decode for the config. +func ConfigSliceDecoder(name string, args []string) ConfigDecoder { + builder, ok := schedulerArgsToDecoder[name] + if !ok { + return func(v interface{}) error { + return errors.Errorf("the config decoer do not register for %s", name) + } + } + return builder(args) +} + +// CreateSchedulerFunc is for creating scheduler. +type CreateSchedulerFunc func(opController *OperatorController, storage *core.Storage, dec ConfigDecoder) (Scheduler, error) + +var schedulerMap = make(map[string]CreateSchedulerFunc) +var schedulerArgsToDecoder = make(map[string]ConfigSliceDecoderBuilder) + +// RegisterScheduler binds a scheduler creator. It should be called in init() +// func of a package. +func RegisterScheduler(typ string, createFn CreateSchedulerFunc) { + if _, ok := schedulerMap[typ]; ok { + log.Fatal("duplicated scheduler", zap.String("type", typ)) + } + schedulerMap[typ] = createFn +} + +// RegisterSliceDecoderBuilder convert arguments to config. It should be called in init() +// func of package. +func RegisterSliceDecoderBuilder(typ string, builder ConfigSliceDecoderBuilder) { + if _, ok := schedulerArgsToDecoder[typ]; ok { + log.Fatal("duplicated scheduler", zap.String("type", typ)) + } + schedulerArgsToDecoder[typ] = builder +} + +// IsSchedulerRegistered check where the named scheduler type is registered. +func IsSchedulerRegistered(name string) bool { + _, ok := schedulerMap[name] + return ok +} + +// CreateScheduler creates a scheduler with registered creator func. +func CreateScheduler(typ string, opController *OperatorController, storage *core.Storage, dec ConfigDecoder) (Scheduler, error) { + fn, ok := schedulerMap[typ] + if !ok { + return nil, errors.Errorf("create func of %v is not registered", typ) + } + + s, err := fn(opController, storage, dec) + if err != nil { + return nil, err + } + data, err := s.EncodeConfig() + if err != nil { + return nil, err + } + err = storage.SaveScheduleConfig(s.GetName(), data) + return s, err +} + +// FindSchedulerTypeByName finds the type of the specified name. +func FindSchedulerTypeByName(name string) string { + var typ string + for registerdType := range schedulerMap { + if strings.Index(name, registerdType) != -1 { + if len(registerdType) > len(typ) { + typ = registerdType + } + } + } + return typ +} diff --git a/scheduler/server/schedule/selector/selector.go b/scheduler/server/schedule/selector/selector.go new file mode 100644 index 00000000..cd2623f2 --- /dev/null +++ b/scheduler/server/schedule/selector/selector.go @@ -0,0 +1,180 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package selector + +import ( + "math/rand" + + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/filter" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/opt" +) + +// BalanceSelector selects source/target from store candidates based on their +// resource scores. +type BalanceSelector struct { + kind core.ScheduleKind + filters []filter.Filter +} + +// NewBalanceSelector creates a BalanceSelector instance. +func NewBalanceSelector(kind core.ScheduleKind, filters []filter.Filter) *BalanceSelector { + return &BalanceSelector{ + kind: kind, + filters: filters, + } +} + +// SelectSource selects the store that can pass all filters and has the maximal +// resource score. +func (s *BalanceSelector) SelectSource(opt opt.Options, stores []*core.StoreInfo, filters ...filter.Filter) *core.StoreInfo { + filters = append(filters, s.filters...) + var result *core.StoreInfo + for _, store := range stores { + if filter.Source(opt, store, filters) { + continue + } + if result == nil || + result.ResourceScore(s.kind, 0) < + store.ResourceScore(s.kind, 0) { + result = store + } + } + return result +} + +// SelectTarget selects the store that can pass all filters and has the minimal +// resource score. +func (s *BalanceSelector) SelectTarget(opt opt.Options, stores []*core.StoreInfo, filters ...filter.Filter) *core.StoreInfo { + filters = append(filters, s.filters...) + var result *core.StoreInfo + for _, store := range stores { + if filter.Target(opt, store, filters) { + continue + } + if result == nil || + result.ResourceScore(s.kind, 0) > + store.ResourceScore(s.kind, 0) { + result = store + } + } + return result +} + +// ReplicaSelector selects source/target store candidates based on their +// distinct scores based on a region's peer stores. +type ReplicaSelector struct { + regionStores []*core.StoreInfo + filters []filter.Filter +} + +// NewReplicaSelector creates a ReplicaSelector instance. +func NewReplicaSelector(regionStores []*core.StoreInfo, filters ...filter.Filter) *ReplicaSelector { + return &ReplicaSelector{ + regionStores: regionStores, + filters: filters, + } +} + +// SelectSource selects the store that can pass all filters and has the minimal +// distinct score. +func (s *ReplicaSelector) SelectSource(opt opt.Options, stores []*core.StoreInfo) *core.StoreInfo { + var ( + best *core.StoreInfo + ) + for _, store := range stores { + if best == nil || compareStoreScore(store, best) < 0 { + best = store + } + } + if best == nil || filter.Source(opt, best, s.filters) { + return nil + } + return best +} + +// SelectTarget selects the store that can pass all filters and has the maximal +// distinct score. +func (s *ReplicaSelector) SelectTarget(opt opt.Options, stores []*core.StoreInfo, filters ...filter.Filter) *core.StoreInfo { + var ( + best *core.StoreInfo + ) + for _, store := range stores { + if filter.Target(opt, store, filters) { + continue + } + if best == nil || compareStoreScore(store, best) > 0 { + best = store + } + } + if best == nil || filter.Target(opt, best, s.filters) { + return nil + } + return best +} + +// compareStoreScore compares which store is better for replication. +// Returns 0 if store A is as good as store B. +// Returns 1 if store A is better than store B. +// Returns -1 if store B is better than store A. +func compareStoreScore(storeA *core.StoreInfo, storeB *core.StoreInfo) int { + // The store with lower region score is better. + if storeA.RegionScore() < + storeB.RegionScore() { + return 1 + } + if storeA.RegionScore() > + storeB.RegionScore() { + return -1 + } + return 0 +} + +// RandomSelector selects source/target store randomly. +type RandomSelector struct { + filters []filter.Filter +} + +func (s *RandomSelector) randStore(stores []*core.StoreInfo) *core.StoreInfo { + if len(stores) == 0 { + return nil + } + return stores[rand.Int()%len(stores)] +} + +// SelectSource randomly selects a source store from those can pass all filters. +func (s *RandomSelector) SelectSource(opt opt.Options, stores []*core.StoreInfo) *core.StoreInfo { + var candidates []*core.StoreInfo + for _, store := range stores { + if filter.Source(opt, store, s.filters) { + continue + } + candidates = append(candidates, store) + } + return s.randStore(candidates) +} + +// SelectTarget randomly selects a target store from those can pass all filters. +func (s *RandomSelector) SelectTarget(opt opt.Options, stores []*core.StoreInfo, filters ...filter.Filter) *core.StoreInfo { + filters = append(filters, s.filters...) + + var candidates []*core.StoreInfo + for _, store := range stores { + if filter.Target(opt, store, filters) { + continue + } + candidates = append(candidates, store) + } + return s.randStore(candidates) +} diff --git a/scheduler/server/schedule/selector/selector_test.go b/scheduler/server/schedule/selector/selector_test.go new file mode 100644 index 00000000..75bdb986 --- /dev/null +++ b/scheduler/server/schedule/selector/selector_test.go @@ -0,0 +1,83 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package selector + +import ( + "testing" + + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockcluster" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockoption" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/filter" + . "github.com/pingcap/check" +) + +func Test(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testSelectorSuite{}) + +type testSelectorSuite struct { + tc *mockcluster.Cluster +} + +func (s *testSelectorSuite) SetUpSuite(c *C) { + opt := mockoption.NewScheduleOptions() + s.tc = mockcluster.NewCluster(opt) +} + +func (s *testSelectorSuite) TestCompareStoreScore(c *C) { + store1 := core.NewStoreInfoWithIdAndCount(1, 1) + store2 := core.NewStoreInfoWithIdAndCount(2, 1) + store3 := core.NewStoreInfoWithIdAndCount(3, 3) + + c.Assert(compareStoreScore(store1, store2), Equals, 0) + + c.Assert(compareStoreScore(store1, store3), Equals, 1) +} + +func (s *testSelectorSuite) TestScheduleConfig(c *C) { + filters := make([]filter.Filter, 0) + testScheduleConfig := func(selector *BalanceSelector, stores []*core.StoreInfo, expectSourceID, expectTargetID uint64) { + c.Assert(selector.SelectSource(s.tc, stores).GetID(), Equals, expectSourceID) + c.Assert(selector.SelectTarget(s.tc, stores).GetID(), Equals, expectTargetID) + } + + kinds := []core.ScheduleKind{{ + Resource: core.RegionKind, + }} + + for _, kind := range kinds { + selector := NewBalanceSelector(kind, filters) + stores := []*core.StoreInfo{ + core.NewStoreInfoWithSizeCount(1, 2, 3, 10, 5), + core.NewStoreInfoWithSizeCount(2, 2, 3, 4, 5), + core.NewStoreInfoWithSizeCount(3, 2, 3, 4, 5), + core.NewStoreInfoWithSizeCount(4, 2, 3, 2, 5), + } + testScheduleConfig(selector, stores, 1, 4) + } + + selector := NewBalanceSelector(core.ScheduleKind{ + Resource: core.LeaderKind, + }, filters) + stores := []*core.StoreInfo{ + core.NewStoreInfoWithSizeCount(1, 2, 20, 10, 25), + core.NewStoreInfoWithSizeCount(2, 2, 66, 10, 5), + core.NewStoreInfoWithSizeCount(3, 2, 6, 10, 5), + core.NewStoreInfoWithSizeCount(4, 2, 20, 10, 1), + } + testScheduleConfig(selector, stores, 2, 3) +} diff --git a/scheduler/server/schedule/test_util.go b/scheduler/server/schedule/test_util.go new file mode 100644 index 00000000..d61bf0cd --- /dev/null +++ b/scheduler/server/schedule/test_util.go @@ -0,0 +1,67 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedule + +import ( + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockcluster" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/operator" +) + +// ApplyOperatorStep applies operator step. Only for test purpose. +func ApplyOperatorStep(region *core.RegionInfo, op *operator.Operator) *core.RegionInfo { + if step := op.Check(region); step != nil { + switch s := step.(type) { + case operator.TransferLeader: + region = region.Clone(core.WithLeader(region.GetStorePeer(s.ToStore))) + case operator.AddPeer: + if region.GetStorePeer(s.ToStore) != nil { + panic("Add peer that exists") + } + peer := &metapb.Peer{ + Id: s.PeerID, + StoreId: s.ToStore, + } + region = region.Clone(core.WithAddPeer(peer)) + case operator.RemovePeer: + if region.GetStorePeer(s.FromStore) == nil { + panic("Remove peer that doesn't exist") + } + if region.GetLeader().GetStoreId() == s.FromStore { + panic("Cannot remove the leader peer") + } + region = region.Clone(core.WithRemoveStorePeer(s.FromStore)) + default: + panic("Unknown operator step") + } + } + return region +} + +// ApplyOperator applies operator. Only for test purpose. +func ApplyOperator(mc *mockcluster.Cluster, op *operator.Operator) { + origin := mc.GetRegion(op.RegionID()) + region := origin + for !op.IsFinish() { + region = ApplyOperatorStep(region, op) + } + mc.PutRegion(region) + for id := range region.GetStoreIds() { + mc.UpdateStoreStatus(id) + } + for id := range origin.GetStoreIds() { + mc.UpdateStoreStatus(id) + } +} diff --git a/scheduler/server/schedulers/balance_leader.go b/scheduler/server/schedulers/balance_leader.go new file mode 100644 index 00000000..15d68868 --- /dev/null +++ b/scheduler/server/schedulers/balance_leader.go @@ -0,0 +1,184 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedulers + +import ( + "sort" + + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/filter" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/operator" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/opt" + "github.com/pingcap/log" + "go.uber.org/zap" +) + +func init() { + schedule.RegisterSliceDecoderBuilder("balance-leader", func(args []string) schedule.ConfigDecoder { + return func(v interface{}) error { + return nil + } + }) + + schedule.RegisterScheduler("balance-leader", func(opController *schedule.OperatorController, storage *core.Storage, mapper schedule.ConfigDecoder) (schedule.Scheduler, error) { + return newBalanceLeaderScheduler(opController), nil + }) +} + +// balanceLeaderRetryLimit is the limit to retry schedule for selected source store and target store. +const balanceLeaderRetryLimit = 10 + +type balanceLeaderScheduler struct { + *baseScheduler + name string + opController *schedule.OperatorController + filters []filter.Filter +} + +// newBalanceLeaderScheduler creates a scheduler that tends to keep leaders on +// each store balanced. +func newBalanceLeaderScheduler(opController *schedule.OperatorController, opts ...BalanceLeaderCreateOption) schedule.Scheduler { + base := newBaseScheduler(opController) + + s := &balanceLeaderScheduler{ + baseScheduler: base, + opController: opController, + } + for _, opt := range opts { + opt(s) + } + s.filters = []filter.Filter{filter.StoreStateFilter{ActionScope: s.GetName(), TransferLeader: true}} + return s +} + +// BalanceLeaderCreateOption is used to create a scheduler with an option. +type BalanceLeaderCreateOption func(s *balanceLeaderScheduler) + +func (l *balanceLeaderScheduler) GetName() string { + if l.name != "" { + return l.name + } + return "balance-leader-scheduler" +} + +func (l *balanceLeaderScheduler) GetType() string { + return "balance-leader" +} + +func (l *balanceLeaderScheduler) IsScheduleAllowed(cluster opt.Cluster) bool { + return l.opController.OperatorCount(operator.OpLeader) < cluster.GetLeaderScheduleLimit() +} + +func (l *balanceLeaderScheduler) Schedule(cluster opt.Cluster) *operator.Operator { + stores := cluster.GetStores() + sources := filter.SelectSourceStores(stores, l.filters, cluster) + targets := filter.SelectTargetStores(stores, l.filters, cluster) + sort.Slice(sources, func(i, j int) bool { + return sources[i].LeaderScore(0) > sources[j].LeaderScore(0) + }) + sort.Slice(targets, func(i, j int) bool { + return targets[i].LeaderScore(0) < targets[j].LeaderScore(0) + }) + + for i := 0; i < len(sources) || i < len(targets); i++ { + if i < len(sources) { + source := sources[i] + sourceID := source.GetID() + log.Debug("store leader score", zap.String("scheduler", l.GetName()), zap.Uint64("source-store", sourceID)) + for j := 0; j < balanceLeaderRetryLimit; j++ { + if op := l.transferLeaderOut(cluster, source); op != nil { + return op + } + } + log.Debug("no operator created for selected stores", zap.String("scheduler", l.GetName()), zap.Uint64("source", sourceID)) + } + if i < len(targets) { + target := targets[i] + targetID := target.GetID() + log.Debug("store leader score", zap.String("scheduler", l.GetName()), zap.Uint64("target-store", targetID)) + + for j := 0; j < balanceLeaderRetryLimit; j++ { + if op := l.transferLeaderIn(cluster, target); op != nil { + return op + } + } + log.Debug("no operator created for selected stores", zap.String("scheduler", l.GetName()), zap.Uint64("target", targetID)) + } + } + return nil +} + +// transferLeaderOut transfers leader from the source store. +// It randomly selects a health region from the source store, then picks +// the best follower peer and transfers the leader. +func (l *balanceLeaderScheduler) transferLeaderOut(cluster opt.Cluster, source *core.StoreInfo) *operator.Operator { + sourceID := source.GetID() + region := cluster.RandLeaderRegion(sourceID, core.HealthRegion()) + if region == nil { + log.Debug("store has no leader", zap.String("scheduler", l.GetName()), zap.Uint64("store-id", sourceID)) + return nil + } + targets := cluster.GetFollowerStores(region) + targets = filter.SelectTargetStores(targets, l.filters, cluster) + sort.Slice(targets, func(i, j int) bool { + return targets[i].LeaderScore(0) < targets[j].LeaderScore(0) + }) + for _, target := range targets { + if op := l.createOperator(cluster, region, source, target); op != nil { + return op + } + } + log.Debug("region has no target store", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID())) + return nil +} + +// transferLeaderIn transfers leader to the target store. +// It randomly selects a health region from the target store, then picks +// the worst follower peer and transfers the leader. +func (l *balanceLeaderScheduler) transferLeaderIn(cluster opt.Cluster, target *core.StoreInfo) *operator.Operator { + targetID := target.GetID() + region := cluster.RandFollowerRegion(targetID, core.HealthRegion()) + if region == nil { + log.Debug("store has no follower", zap.String("scheduler", l.GetName()), zap.Uint64("store-id", targetID)) + return nil + } + leaderStoreID := region.GetLeader().GetStoreId() + source := cluster.GetStore(leaderStoreID) + if source == nil { + log.Debug("region has no leader or leader store cannot be found", + zap.String("scheduler", l.GetName()), + zap.Uint64("region-id", region.GetID()), + zap.Uint64("store-id", leaderStoreID), + ) + return nil + } + return l.createOperator(cluster, region, source, target) +} + +// createOperator creates the operator according to the source and target store. +// If the difference between the two stores is tolerable, then +// no new operator need to be created, otherwise create an operator that transfers +// the leader from the source store to the target store for the region. +func (l *balanceLeaderScheduler) createOperator(cluster opt.Cluster, region *core.RegionInfo, source, target *core.StoreInfo) *operator.Operator { + targetID := target.GetID() + + kind := core.NewScheduleKind(core.LeaderKind) + if !shouldBalance(cluster, source, target, region, kind, l.GetName()) { + return nil + } + + op := operator.CreateTransferLeaderOperator("balance-leader", region, region.GetLeader().GetStoreId(), targetID, operator.OpBalance) + return op +} diff --git a/scheduler/server/schedulers/balance_region.go b/scheduler/server/schedulers/balance_region.go new file mode 100644 index 00000000..ccdc4028 --- /dev/null +++ b/scheduler/server/schedulers/balance_region.go @@ -0,0 +1,80 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedulers + +import ( + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/operator" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/opt" +) + +func init() { + schedule.RegisterSliceDecoderBuilder("balance-region", func(args []string) schedule.ConfigDecoder { + return func(v interface{}) error { + return nil + } + }) + schedule.RegisterScheduler("balance-region", func(opController *schedule.OperatorController, storage *core.Storage, decoder schedule.ConfigDecoder) (schedule.Scheduler, error) { + return newBalanceRegionScheduler(opController), nil + }) +} + +const ( + // balanceRegionRetryLimit is the limit to retry schedule for selected store. + balanceRegionRetryLimit = 10 + balanceRegionName = "balance-region-scheduler" +) + +type balanceRegionScheduler struct { + *baseScheduler + name string + opController *schedule.OperatorController +} + +// newBalanceRegionScheduler creates a scheduler that tends to keep regions on +// each store balanced. +func newBalanceRegionScheduler(opController *schedule.OperatorController, opts ...BalanceRegionCreateOption) schedule.Scheduler { + base := newBaseScheduler(opController) + s := &balanceRegionScheduler{ + baseScheduler: base, + opController: opController, + } + for _, opt := range opts { + opt(s) + } + return s +} + +// BalanceRegionCreateOption is used to create a scheduler with an option. +type BalanceRegionCreateOption func(s *balanceRegionScheduler) + +func (s *balanceRegionScheduler) GetName() string { + if s.name != "" { + return s.name + } + return balanceRegionName +} + +func (s *balanceRegionScheduler) GetType() string { + return "balance-region" +} + +func (s *balanceRegionScheduler) IsScheduleAllowed(cluster opt.Cluster) bool { + return s.opController.OperatorCount(operator.OpRegion) < cluster.GetRegionScheduleLimit() +} + +func (s *balanceRegionScheduler) Schedule(cluster opt.Cluster) *operator.Operator { + return nil +} diff --git a/scheduler/server/schedulers/balance_test.go b/scheduler/server/schedulers/balance_test.go new file mode 100644 index 00000000..5c980d00 --- /dev/null +++ b/scheduler/server/schedulers/balance_test.go @@ -0,0 +1,610 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedulers + +import ( + "context" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockcluster" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/mock/mockoption" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/testutil" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/kv" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/checker" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/operator" + . "github.com/pingcap/check" +) + +func newTestReplication(mso *mockoption.ScheduleOptions, maxReplicas int) { + mso.MaxReplicas = maxReplicas +} + +var _ = Suite(&testBalanceRegionSchedulerSuite{}) + +type testBalanceRegionSchedulerSuite struct { + ctx context.Context + cancel context.CancelFunc +} + +func (s *testBalanceRegionSchedulerSuite) SetUpSuite(c *C) { + s.ctx, s.cancel = context.WithCancel(context.Background()) +} + +func (s *testBalanceRegionSchedulerSuite) TearDownSuite(c *C) { + s.cancel() +} + +func (s *testBalanceRegionSchedulerSuite) TestReplicas13C(c *C) { + opt := mockoption.NewScheduleOptions() + tc := mockcluster.NewCluster(opt) + oc := schedule.NewOperatorController(s.ctx, nil, nil) + + sb, err := schedule.CreateScheduler("balance-region", oc, core.NewStorage(kv.NewMemoryKV()), nil) + c.Assert(err, IsNil) + + opt.SetMaxReplicas(1) + + // Add stores 1,2,3,4. + tc.AddRegionStore(1, 6) + tc.AddRegionStore(2, 8) + tc.AddRegionStore(3, 8) + tc.AddRegionStore(4, 16) + // Add region 1 with leader in store 4. + tc.AddLeaderRegion(1, 4) + testutil.CheckTransferPeerWithLeaderTransfer(c, sb.Schedule(tc), operator.OpBalance, 4, 1) + + // Test stateFilter. + tc.SetStoreOffline(1) + tc.UpdateRegionCount(2, 6) + + // When store 1 is offline, it will be filtered, + // store 2 becomes the store with least regions. + testutil.CheckTransferPeerWithLeaderTransfer(c, sb.Schedule(tc), operator.OpBalance, 4, 2) + opt.SetMaxReplicas(3) + c.Assert(sb.Schedule(tc), IsNil) + + opt.SetMaxReplicas(1) + c.Assert(sb.Schedule(tc), NotNil) +} + +func (s *testBalanceRegionSchedulerSuite) TestReplicas33C(c *C) { + opt := mockoption.NewScheduleOptions() + tc := mockcluster.NewCluster(opt) + oc := schedule.NewOperatorController(s.ctx, nil, nil) + + newTestReplication(opt, 3) + + sb, err := schedule.CreateScheduler("balance-region", oc, core.NewStorage(kv.NewMemoryKV()), nil) + c.Assert(err, IsNil) + + // Store 1 has the largest region score, so the balancer try to replace peer in store 1. + tc.AddRegionStore(1, 16) + tc.AddRegionStore(2, 15) + tc.AddRegionStore(3, 14) + + tc.AddLeaderRegion(1, 1, 2, 3) + // This schedule try to replace peer in store 1, but we have no other stores. + c.Assert(sb.Schedule(tc), IsNil) + + // Store 4 has smaller region score than store 1. + tc.AddRegionStore(4, 2) + testutil.CheckTransferPeer(c, sb.Schedule(tc), operator.OpBalance, 1, 4) + + // Store 5 has smaller region score than store 4. + tc.AddRegionStore(5, 1) + testutil.CheckTransferPeer(c, sb.Schedule(tc), operator.OpBalance, 1, 5) + + // Store 6 has smaller region score with store 6. + tc.AddRegionStore(6, 0) + testutil.CheckTransferPeer(c, sb.Schedule(tc), operator.OpBalance, 1, 6) + + // If store 6 is not available, will choose store 5. + tc.SetStoreDown(6) + testutil.CheckTransferPeer(c, sb.Schedule(tc), operator.OpBalance, 1, 5) + + // Take down 4,5,6 + tc.SetStoreDown(4) + tc.SetStoreDown(5) + tc.SetStoreDown(6) + + // Store 7 has different zone with other stores but larger region score than store 1. + tc.AddRegionStore(7, 20) + c.Assert(sb.Schedule(tc), IsNil) +} + +func (s *testBalanceRegionSchedulerSuite) TestReplicas53C(c *C) { + opt := mockoption.NewScheduleOptions() + tc := mockcluster.NewCluster(opt) + oc := schedule.NewOperatorController(s.ctx, nil, nil) + + newTestReplication(opt, 5) + + sb, err := schedule.CreateScheduler("balance-region", oc, core.NewStorage(kv.NewMemoryKV()), nil) + c.Assert(err, IsNil) + + tc.AddRegionStore(1, 4) + tc.AddRegionStore(2, 5) + tc.AddRegionStore(3, 6) + tc.AddRegionStore(4, 7) + tc.AddRegionStore(5, 28) + + tc.AddLeaderRegion(1, 1, 2, 3, 4, 5) + + // Store 6 has smaller region score. + tc.AddRegionStore(6, 1) + testutil.CheckTransferPeer(c, sb.Schedule(tc), operator.OpBalance, 5, 6) + + // Store 7 has larger region score and same distinct score with store 6. + tc.AddRegionStore(7, 5) + testutil.CheckTransferPeer(c, sb.Schedule(tc), operator.OpBalance, 5, 6) + + // Store 1 has smaller region score and higher distinct score. + tc.AddLeaderRegion(1, 2, 3, 4, 5, 6) + testutil.CheckTransferPeer(c, sb.Schedule(tc), operator.OpBalance, 5, 1) + + // Store 6 has smaller region score and higher distinct score. + tc.AddRegionStore(11, 29) + tc.AddRegionStore(12, 8) + tc.AddRegionStore(13, 7) + tc.AddLeaderRegion(1, 2, 3, 11, 12, 13) + testutil.CheckTransferPeer(c, sb.Schedule(tc), operator.OpBalance, 11, 6) +} + +func (s *testBalanceRegionSchedulerSuite) TestStoreWeight3C(c *C) { + opt := mockoption.NewScheduleOptions() + tc := mockcluster.NewCluster(opt) + oc := schedule.NewOperatorController(s.ctx, nil, nil) + + sb, err := schedule.CreateScheduler("balance-region", oc, core.NewStorage(kv.NewMemoryKV()), nil) + c.Assert(err, IsNil) + opt.SetMaxReplicas(1) + + tc.AddRegionStore(1, 10) + tc.AddRegionStore(2, 10) + tc.AddRegionStore(3, 10) + tc.AddRegionStore(4, 10) + tc.UpdateStoreRegionWeight(1, 0.5) + tc.UpdateStoreRegionWeight(2, 0.9) + tc.UpdateStoreRegionWeight(3, 1.0) + tc.UpdateStoreRegionWeight(4, 2.0) + + tc.AddLeaderRegion(1, 1) + testutil.CheckTransferPeer(c, sb.Schedule(tc), operator.OpBalance, 1, 4) + + tc.UpdateRegionCount(4, 30) + testutil.CheckTransferPeer(c, sb.Schedule(tc), operator.OpBalance, 1, 3) +} + +func (s *testBalanceRegionSchedulerSuite) TestReplacePendingRegion3C(c *C) { + opt := mockoption.NewScheduleOptions() + tc := mockcluster.NewCluster(opt) + oc := schedule.NewOperatorController(s.ctx, nil, nil) + + newTestReplication(opt, 3) + + sb, err := schedule.CreateScheduler("balance-region", oc, core.NewStorage(kv.NewMemoryKV()), nil) + c.Assert(err, IsNil) + + // Store 1 has the largest region score, so the balancer try to replace peer in store 1. + tc.AddRegionStore(1, 16) + tc.AddRegionStore(2, 7) + tc.AddRegionStore(3, 15) + // Store 4 has smaller region score than store 1 and more better place than store 2. + tc.AddRegionStore(4, 10) + + // set pending peer + tc.AddLeaderRegion(1, 1, 2, 3) + tc.AddLeaderRegion(2, 1, 2, 3) + tc.AddLeaderRegion(3, 2, 1, 3) + region := tc.GetRegion(3) + region = region.Clone(core.WithPendingPeers([]*metapb.Peer{region.GetStorePeer(1)})) + tc.PutRegion(region) + + c.Assert(sb.Schedule(tc).RegionID(), Equals, uint64(3)) + testutil.CheckTransferPeer(c, sb.Schedule(tc), operator.OpBalance, 1, 4) +} + +var _ = Suite(&testBalanceSpeedSuite{}) + +type testBalanceSpeedSuite struct{} + +type testBalanceSpeedCase struct { + sourceCount uint64 + targetCount uint64 + regionSize int64 + expectedResult bool +} + +func (s *testBalanceSpeedSuite) TestShouldBalance3C(c *C) { + tests := []testBalanceSpeedCase{ + // all store capacity is 1024MB + // size = count * 10 + + // target size is zero + {2, 0, 1, true}, + {2, 0, 10, true}, + // all in high space stage + {10, 5, 1, true}, + {10, 5, 20, true}, + {10, 10, 1, false}, + {10, 10, 20, false}, + // all in transition stage + {70, 50, 1, true}, + {70, 50, 50, true}, + {70, 70, 1, false}, + // all in low space stage + {90, 80, 1, true}, + {90, 80, 50, true}, + {90, 90, 1, false}, + // one in high space stage, other in transition stage + {65, 55, 5, true}, + {65, 50, 50, true}, + // one in transition space stage, other in low space stage + {80, 70, 5, true}, + {80, 70, 50, true}, + } + + opt := mockoption.NewScheduleOptions() + tc := mockcluster.NewCluster(opt) + // create a region to control average region size. + tc.AddLeaderRegion(1, 1, 2) + + for _, t := range tests { + tc.AddRegionStore(1, int(t.sourceCount)) + tc.AddRegionStore(2, int(t.targetCount)) + source := tc.GetStore(1) + target := tc.GetStore(2) + region := tc.GetRegion(1).Clone(core.SetApproximateSize(t.regionSize)) + tc.PutRegion(region) + kind := core.NewScheduleKind(core.RegionKind) + c.Assert(shouldBalance(tc, source, target, region, kind, ""), Equals, t.expectedResult) + } +} + +func (s *testBalanceSpeedSuite) TestTolerantRatio3C(c *C) { + opt := mockoption.NewScheduleOptions() + tc := mockcluster.NewCluster(opt) + // create a region to control average region size. + tc.AddLeaderRegion(1, 1, 2) + regionSize := int64(96 * 1024) + region := tc.GetRegion(1).Clone(core.SetApproximateSize(regionSize)) + + c.Assert(getTolerantResource(tc, region, core.ScheduleKind{Resource: core.LeaderKind}), Equals, int64(leaderTolerantSizeRatio)) + c.Assert(getTolerantResource(tc, region, core.ScheduleKind{Resource: core.RegionKind}), Equals, int64(getTolerantRatio(tc)*float64(regionSize))) +} + +var _ = Suite(&testBalanceLeaderSchedulerSuite{}) + +type testBalanceLeaderSchedulerSuite struct { + ctx context.Context + cancel context.CancelFunc + tc *mockcluster.Cluster + lb schedule.Scheduler + oc *schedule.OperatorController +} + +func (s *testBalanceLeaderSchedulerSuite) SetUpTest(c *C) { + s.ctx, s.cancel = context.WithCancel(context.Background()) + opt := mockoption.NewScheduleOptions() + s.tc = mockcluster.NewCluster(opt) + s.oc = schedule.NewOperatorController(s.ctx, nil, nil) + lb, err := schedule.CreateScheduler("balance-leader", s.oc, core.NewStorage(kv.NewMemoryKV()), nil) + c.Assert(err, IsNil) + s.lb = lb +} + +func (s *testBalanceLeaderSchedulerSuite) TearDownTest(c *C) { + s.cancel() +} + +func (s *testBalanceLeaderSchedulerSuite) schedule() *operator.Operator { + return s.lb.Schedule(s.tc) +} + +func (s *testBalanceLeaderSchedulerSuite) TestBalanceLimit(c *C) { + // Stores: 1 2 3 4 + // Leaders: 1 0 0 0 + // Region1: L F F F + s.tc.AddLeaderStore(1, 1) + s.tc.AddLeaderStore(2, 0) + s.tc.AddLeaderStore(3, 0) + s.tc.AddLeaderStore(4, 0) + s.tc.AddLeaderRegion(1, 1, 2, 3, 4) + c.Check(s.schedule(), IsNil) + + // Stores: 1 2 3 4 + // Leaders: 16 0 0 0 + // Region1: L F F F + s.tc.UpdateLeaderCount(1, 16) + c.Check(s.schedule(), NotNil) + + // Stores: 1 2 3 4 + // Leaders: 7 8 9 10 + // Region1: F F F L + s.tc.UpdateLeaderCount(1, 7) + s.tc.UpdateLeaderCount(2, 8) + s.tc.UpdateLeaderCount(3, 9) + s.tc.UpdateLeaderCount(4, 10) + s.tc.AddLeaderRegion(1, 4, 1, 2, 3) + c.Check(s.schedule(), IsNil) + + // Stores: 1 2 3 4 + // Leaders: 7 8 9 18 + // Region1: F F F L + s.tc.UpdateLeaderCount(4, 18) + c.Check(s.schedule(), NotNil) +} + +func (s *testBalanceLeaderSchedulerSuite) TestBalanceLeaderScheduleStrategy(c *C) { + // Stores: 1 2 3 4 + // Leader Count: 10 10 10 10 + // Leader Size : 10000 100 100 100 + // Region1: L F F F + s.tc.AddLeaderStore(1, 10, 10000) + s.tc.AddLeaderStore(2, 10, 100) + s.tc.AddLeaderStore(3, 10, 100) + s.tc.AddLeaderStore(4, 10, 100) + s.tc.AddLeaderRegion(1, 1, 2, 3, 4) + c.Check(s.schedule(), IsNil) +} + +func (s *testBalanceLeaderSchedulerSuite) TestBalanceLeaderTolerantRatio(c *C) { + // default leader tolerant ratio is 5, when schedule by count + // Stores: 1 2 3 4 + // Leader Count: 14->21 10 10 10 + // Leader Size : 100 100 100 100 + // Region1: L F F F + s.tc.AddLeaderStore(1, 14, 100) + s.tc.AddLeaderStore(2, 10, 100) + s.tc.AddLeaderStore(3, 10, 100) + s.tc.AddLeaderStore(4, 10, 100) + s.tc.AddLeaderRegion(1, 1, 2, 3, 4) + c.Check(s.schedule(), IsNil) + c.Assert(s.tc.GetStore(1).GetLeaderCount(), Equals, 14) + s.tc.AddLeaderStore(1, 21, 100) + c.Assert(s.tc.GetStore(1).GetLeaderCount(), Equals, 21) + + c.Check(s.schedule(), NotNil) +} + +func (s *testBalanceLeaderSchedulerSuite) TestBalanceFilter(c *C) { + // Stores: 1 2 3 4 + // Leaders: 1 2 3 16 + // Region1: F F F L + s.tc.AddLeaderStore(1, 1) + s.tc.AddLeaderStore(2, 2) + s.tc.AddLeaderStore(3, 3) + s.tc.AddLeaderStore(4, 16) + s.tc.AddLeaderRegion(1, 4, 1, 2, 3) + + testutil.CheckTransferLeader(c, s.schedule(), operator.OpBalance, 4, 1) + // Test stateFilter. + // if store 4 is offline, we should consider it + // because it still provides services + s.tc.SetStoreOffline(4) + testutil.CheckTransferLeader(c, s.schedule(), operator.OpBalance, 4, 1) + // If store 1 is down, it will be filtered, + // store 2 becomes the store with least leaders. + s.tc.SetStoreDown(1) + testutil.CheckTransferLeader(c, s.schedule(), operator.OpBalance, 4, 2) + + // Test healthFilter. + // If store 2 is busy, it will be filtered, + // store 3 becomes the store with least leaders. + s.tc.SetStoreBusy(2, true) + testutil.CheckTransferLeader(c, s.schedule(), operator.OpBalance, 4, 3) + + // Test disconnectFilter. + // If store 3 is disconnected, no operator can be created. + s.tc.SetStoreDisconnect(3) + c.Assert(s.schedule(), IsNil) +} + +func (s *testBalanceLeaderSchedulerSuite) TestLeaderWeight(c *C) { + // Stores: 1 2 3 4 + // Leaders: 16 16 16 16->48 + // Weight: 0.5 0.9 1 2 + // Region1: L F F F + s.tc.AddLeaderStore(1, 16) + s.tc.AddLeaderStore(2, 16) + s.tc.AddLeaderStore(3, 16) + s.tc.AddLeaderStore(4, 16) + s.tc.UpdateStoreLeaderWeight(1, 0.5) + s.tc.UpdateStoreLeaderWeight(2, 0.9) + s.tc.UpdateStoreLeaderWeight(3, 1) + s.tc.UpdateStoreLeaderWeight(4, 2) + s.tc.AddLeaderRegion(1, 1, 2, 3, 4) + testutil.CheckTransferLeader(c, s.schedule(), operator.OpBalance, 1, 4) + s.tc.UpdateLeaderCount(4, 48) + testutil.CheckTransferLeader(c, s.schedule(), operator.OpBalance, 1, 3) +} + +func (s *testBalanceLeaderSchedulerSuite) TestBalanceSelector(c *C) { + // Stores: 1 2 3 4 + // Leaders: 1 2 3 16 + // Region1: - F F L + // Region2: F F L - + s.tc.AddLeaderStore(1, 1) + s.tc.AddLeaderStore(2, 2) + s.tc.AddLeaderStore(3, 3) + s.tc.AddLeaderStore(4, 16) + s.tc.AddLeaderRegion(1, 4, 2, 3) + s.tc.AddLeaderRegion(2, 3, 1, 2) + // store4 has max leader score, store1 has min leader score. + // The scheduler try to move a leader out of 16 first. + testutil.CheckTransferLeader(c, s.schedule(), operator.OpBalance, 4, 2) + + // Stores: 1 2 3 4 + // Leaders: 1 14 15 16 + // Region1: - F F L + // Region2: F F L - + s.tc.UpdateLeaderCount(2, 14) + s.tc.UpdateLeaderCount(3, 15) + // Cannot move leader out of store4, move a leader into store1. + testutil.CheckTransferLeader(c, s.schedule(), operator.OpBalance, 3, 1) + + // Stores: 1 2 3 4 + // Leaders: 1 2 15 16 + // Region1: - F L F + // Region2: L F F - + s.tc.AddLeaderStore(2, 2) + s.tc.AddLeaderRegion(1, 3, 2, 4) + s.tc.AddLeaderRegion(2, 1, 2, 3) + // No leader in store16, no follower in store1. Now source and target are store3 and store2. + testutil.CheckTransferLeader(c, s.schedule(), operator.OpBalance, 3, 2) + + // Stores: 1 2 3 4 + // Leaders: 9 10 10 11 + // Region1: - F F L + // Region2: L F F - + s.tc.AddLeaderStore(1, 10) + s.tc.AddLeaderStore(2, 10) + s.tc.AddLeaderStore(3, 10) + s.tc.AddLeaderStore(4, 10) + s.tc.AddLeaderRegion(1, 4, 2, 3) + s.tc.AddLeaderRegion(2, 1, 2, 3) + // The cluster is balanced. + c.Assert(s.schedule(), IsNil) + c.Assert(s.schedule(), IsNil) + + // store3's leader drops: + // Stores: 1 2 3 4 + // Leaders: 11 13 0 16 + // Region1: - F F L + // Region2: L F F - + s.tc.AddLeaderStore(1, 11) + s.tc.AddLeaderStore(2, 13) + s.tc.AddLeaderStore(3, 0) + s.tc.AddLeaderStore(4, 16) + testutil.CheckTransferLeader(c, s.schedule(), operator.OpBalance, 4, 3) +} + +var _ = Suite(&testReplicaCheckerSuite{}) + +type testReplicaCheckerSuite struct{} + +func (s *testReplicaCheckerSuite) TestBasic(c *C) { + opt := mockoption.NewScheduleOptions() + tc := mockcluster.NewCluster(opt) + + rc := checker.NewReplicaChecker(tc) + + opt.MaxSnapshotCount = 2 + + // Add stores 1,2,3,4. + tc.AddRegionStore(1, 4) + tc.AddRegionStore(2, 3) + tc.AddRegionStore(3, 2) + tc.AddRegionStore(4, 1) + // Add region 1 with leader in store 1 and follower in store 2. + tc.AddLeaderRegion(1, 1, 2) + + // Region has 2 peers, we need to add a new peer. + region := tc.GetRegion(1) + testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 4) + + // Test healthFilter. + // If store 4 is down, we add to store 3. + tc.SetStoreDown(4) + testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 3) + tc.SetStoreUp(4) + testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 4) + + // Add peer in store 4, and we have enough replicas. + peer4, _ := tc.AllocPeer(4) + region = region.Clone(core.WithAddPeer(peer4)) + c.Assert(rc.Check(region), IsNil) + + // Add peer in store 3, and we have redundant replicas. + peer3, _ := tc.AllocPeer(3) + region = region.Clone(core.WithAddPeer(peer3)) + testutil.CheckRemovePeer(c, rc.Check(region), 1) + + region = region.Clone(core.WithRemoveStorePeer(1)) + + // Peer in store 3 is offline, transfer peer to store 1. + tc.SetStoreOffline(3) + testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 3, 1) +} + +func (s *testReplicaCheckerSuite) TestLostStore(c *C) { + opt := mockoption.NewScheduleOptions() + tc := mockcluster.NewCluster(opt) + + tc.AddRegionStore(1, 1) + tc.AddRegionStore(2, 1) + + rc := checker.NewReplicaChecker(tc) + + // now region peer in store 1,2,3.but we just have store 1,2 + // This happens only in recovering the PD tc + // should not panic + tc.AddLeaderRegion(1, 1, 2, 3) + region := tc.GetRegion(1) + op := rc.Check(region) + c.Assert(op, IsNil) +} + +func (s *testReplicaCheckerSuite) TestOffline(c *C) { + opt := mockoption.NewScheduleOptions() + tc := mockcluster.NewCluster(opt) + + newTestReplication(opt, 3) + + rc := checker.NewReplicaChecker(tc) + + tc.AddRegionStore(1, 1) + tc.AddRegionStore(2, 2) + tc.AddRegionStore(3, 3) + tc.AddRegionStore(4, 4) + + tc.AddLeaderRegion(1, 1) + region := tc.GetRegion(1) + + // Store 2 has different zone and smallest region score. + testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 2) + peer2, _ := tc.AllocPeer(2) + region = region.Clone(core.WithAddPeer(peer2)) + + // Store 3 has different zone and smallest region score. + testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 3) + peer3, _ := tc.AllocPeer(3) + region = region.Clone(core.WithAddPeer(peer3)) + + // Store 4 has the same zone with store 3 and larger region score. + peer4, _ := tc.AllocPeer(4) + region = region.Clone(core.WithAddPeer(peer4)) + testutil.CheckRemovePeer(c, rc.Check(region), 4) + + // Test healthFilter. + tc.SetStoreBusy(4, true) + c.Assert(rc.Check(region), IsNil) + tc.SetStoreBusy(4, false) + testutil.CheckRemovePeer(c, rc.Check(region), 4) + + // Test offline + // the number of region peers more than the maxReplicas + // remove the peer + tc.SetStoreOffline(3) + testutil.CheckRemovePeer(c, rc.Check(region), 3) + region = region.Clone(core.WithRemoveStorePeer(4)) + // the number of region peers equals the maxReplicas + // Transfer peer to store 4. + testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 3, 4) + + // Store 5 has smaller region score than store 4, we will choose store 5. + tc.AddRegionStore(5, 3) + testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 3, 5) +} diff --git a/scheduler/server/schedulers/base_scheduler.go b/scheduler/server/schedulers/base_scheduler.go new file mode 100644 index 00000000..b4361f21 --- /dev/null +++ b/scheduler/server/schedulers/base_scheduler.go @@ -0,0 +1,84 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedulers + +import ( + "fmt" + "net/http" + "time" + + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/opt" + "github.com/pingcap/log" +) + +// options for interval of schedulers +const ( + MaxScheduleInterval = time.Second * 5 + MinScheduleInterval = time.Millisecond * 10 + MinSlowScheduleInterval = time.Second * 3 + + ScheduleIntervalFactor = 1.3 +) + +type intervalGrowthType int + +const ( + exponentailGrowth intervalGrowthType = iota + linearGrowth + zeroGrowth +) + +// intervalGrow calculates the next interval of balance. +func intervalGrow(x time.Duration, maxInterval time.Duration, typ intervalGrowthType) time.Duration { + switch typ { + case exponentailGrowth: + return minDuration(time.Duration(float64(x)*ScheduleIntervalFactor), maxInterval) + case linearGrowth: + return minDuration(x+MinSlowScheduleInterval, maxInterval) + case zeroGrowth: + return x + default: + log.Fatal("unknown interval growth type") + } + return 0 +} + +type baseScheduler struct { + opController *schedule.OperatorController +} + +func newBaseScheduler(opController *schedule.OperatorController) *baseScheduler { + return &baseScheduler{opController: opController} +} + +func (s *baseScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "not implements") +} + +func (s *baseScheduler) GetMinInterval() time.Duration { + return MinScheduleInterval +} + +func (s *baseScheduler) EncodeConfig() ([]byte, error) { + return schedule.EncodeConfig(nil) +} + +func (s *baseScheduler) GetNextInterval(interval time.Duration) time.Duration { + return intervalGrow(interval, MaxScheduleInterval, exponentailGrowth) +} + +func (s *baseScheduler) Prepare(cluster opt.Cluster) error { return nil } + +func (s *baseScheduler) Cleanup(cluster opt.Cluster) {} diff --git a/scheduler/server/schedulers/utils.go b/scheduler/server/schedulers/utils.go new file mode 100644 index 00000000..fd44b567 --- /dev/null +++ b/scheduler/server/schedulers/utils.go @@ -0,0 +1,114 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedulers + +import ( + "time" + + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/schedule/opt" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +const ( + // adjustRatio is used to adjust TolerantSizeRatio according to region count. + adjustRatio float64 = 0.005 + leaderTolerantSizeRatio float64 = 5.0 + minTolerantSizeRatio float64 = 1.0 +) + +// ErrScheduleConfigNotExist the config is not correct. +var ErrScheduleConfigNotExist = errors.New("the config does not exist") + +func minUint64(a, b uint64) uint64 { + if a < b { + return a + } + return b +} + +func maxUint64(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func minDuration(a, b time.Duration) time.Duration { + if a < b { + return a + } + return b +} + +func isRegionUnhealthy(region *core.RegionInfo) bool { + return len(region.GetLearners()) != 0 +} + +func shouldBalance(cluster opt.Cluster, source, target *core.StoreInfo, region *core.RegionInfo, kind core.ScheduleKind, scheduleName string) bool { + // The reason we use max(regionSize, averageRegionSize) to check is: + // 1. prevent moving small regions between stores with close scores, leading to unnecessary balance. + // 2. prevent moving huge regions, leading to over balance. + sourceID := source.GetID() + targetID := target.GetID() + tolerantResource := getTolerantResource(cluster, region, kind) + sourceScore := source.ResourceScore(kind, -tolerantResource) + targetScore := target.ResourceScore(kind, tolerantResource) + + // Make sure after move, source score is still greater than target score. + shouldBalance := sourceScore > targetScore + + if !shouldBalance { + log.Debug("skip balance "+kind.Resource.String(), + zap.String("scheduler", scheduleName), zap.Uint64("region-id", region.GetID()), zap.Uint64("source-store", sourceID), zap.Uint64("target-store", targetID), + zap.Int64("source-size", source.GetRegionSize()), zap.Float64("source-score", sourceScore), + zap.Int64("target-size", target.GetRegionSize()), zap.Float64("target-score", targetScore), + zap.Int64("average-region-size", cluster.GetAverageRegionSize()), + zap.Int64("tolerant-resource", tolerantResource)) + } + return shouldBalance +} + +func getTolerantResource(cluster opt.Cluster, region *core.RegionInfo, kind core.ScheduleKind) int64 { + if kind.Resource == core.LeaderKind { + leaderCount := int64(1.0 * leaderTolerantSizeRatio) + return leaderCount + } + + regionSize := region.GetApproximateSize() + if regionSize < cluster.GetAverageRegionSize() { + regionSize = cluster.GetAverageRegionSize() + } + regionSize = int64(float64(regionSize) * getTolerantRatio(cluster)) + return regionSize +} + +func getTolerantRatio(cluster opt.Cluster) float64 { + var maxRegionCount float64 + stores := cluster.GetStores() + for _, store := range stores { + regionCount := float64(cluster.GetStoreRegionCount(store.GetID())) + if maxRegionCount < regionCount { + maxRegionCount = regionCount + } + } + tolerantSizeRatio := maxRegionCount * adjustRatio + if tolerantSizeRatio < minTolerantSizeRatio { + tolerantSizeRatio = minTolerantSizeRatio + } + + return tolerantSizeRatio +} diff --git a/scheduler/server/schedulers/utils_test.go b/scheduler/server/schedulers/utils_test.go new file mode 100644 index 00000000..f1c3aa0c --- /dev/null +++ b/scheduler/server/schedulers/utils_test.go @@ -0,0 +1,75 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedulers + +import ( + "testing" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + . "github.com/pingcap/check" +) + +func TestSchedulers(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testMinMaxSuite{}) + +type testMinMaxSuite struct{} + +func (s *testMinMaxSuite) TestMinUint64(c *C) { + c.Assert(minUint64(1, 2), Equals, uint64(1)) + c.Assert(minUint64(2, 1), Equals, uint64(1)) + c.Assert(minUint64(1, 1), Equals, uint64(1)) +} + +func (s *testMinMaxSuite) TestMaxUint64(c *C) { + c.Assert(maxUint64(1, 2), Equals, uint64(2)) + c.Assert(maxUint64(2, 1), Equals, uint64(2)) + c.Assert(maxUint64(1, 1), Equals, uint64(1)) +} + +func (s *testMinMaxSuite) TestMinDuration(c *C) { + c.Assert(minDuration(time.Minute, time.Second), Equals, time.Second) + c.Assert(minDuration(time.Second, time.Minute), Equals, time.Second) + c.Assert(minDuration(time.Second, time.Second), Equals, time.Second) +} + +var _ = Suite(&testRegionUnhealthySuite{}) + +type testRegionUnhealthySuite struct{} + +func (s *testRegionUnhealthySuite) TestIsRegionUnhealthy(c *C) { + peers := make([]*metapb.Peer, 0, 3) + for i := uint64(0); i < 2; i++ { + p := &metapb.Peer{ + Id: i, + StoreId: i, + } + peers = append(peers, p) + } + peers = append(peers, &metapb.Peer{ + Id: 2, + StoreId: 2, + }) + + r1 := core.NewRegionInfo(&metapb.Region{Peers: peers[:2]}, peers[0], core.WithLearners([]*metapb.Peer{peers[1]})) + r2 := core.NewRegionInfo(&metapb.Region{Peers: peers[:2]}, peers[0], core.WithPendingPeers([]*metapb.Peer{peers[1]})) + r4 := core.NewRegionInfo(&metapb.Region{Peers: peers[:2]}, peers[0]) + c.Assert(isRegionUnhealthy(r1), IsTrue) + c.Assert(isRegionUnhealthy(r2), IsFalse) + c.Assert(isRegionUnhealthy(r4), IsFalse) +} diff --git a/scheduler/server/server.go b/scheduler/server/server.go new file mode 100644 index 00000000..440686ad --- /dev/null +++ b/scheduler/server/server.go @@ -0,0 +1,687 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "fmt" + "math/rand" + "net/http" + "path" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/etcdutil" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/logutil" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/typeutil" + "github.com/pingcap-incubator/tinykv/scheduler/server/config" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/id" + "github.com/pingcap-incubator/tinykv/scheduler/server/kv" + "github.com/pingcap-incubator/tinykv/scheduler/server/member" + "github.com/pingcap-incubator/tinykv/scheduler/server/tso" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/embed" + "go.etcd.io/etcd/pkg/types" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +var ( + // ErrNotBootstrapped is error info for cluster not bootstrapped. + ErrNotBootstrapped = errors.New("TiKV cluster not bootstrapped, please start TiKV first") + // ErrServerNotStarted is error info for server not started. + ErrServerNotStarted = errors.New("The server has not been started") + // ErrOperatorNotFound is error info for operator not found. + ErrOperatorNotFound = errors.New("operator not found") + // ErrAddOperator is error info for already have an operator when adding operator. + ErrAddOperator = errors.New("failed to add operator, maybe already have one") + // ErrRegionNotAdjacent is error info for region not adjacent. + ErrRegionNotAdjacent = errors.New("two regions are not adjacent") + // ErrRegionNotFound is error info for region not found. + ErrRegionNotFound = func(regionID uint64) error { + return errors.Errorf("region %v not found", regionID) + } + // ErrRegionAbnormalPeer is error info for region has abonormal peer. + ErrRegionAbnormalPeer = func(regionID uint64) error { + return errors.Errorf("region %v has abnormal peer", regionID) + } + // ErrRegionIsStale is error info for region is stale. + ErrRegionIsStale = func(region *metapb.Region, origin *metapb.Region) error { + return errors.Errorf("region is stale: region %v origin %v", region, origin) + } + // ErrStoreNotFound is error info for store not found. + ErrStoreNotFound = func(storeID uint64) error { + return errors.Errorf("store %v not found", storeID) + } +) + +const ( + etcdTimeout = time.Second * 3 + etcdStartTimeout = time.Minute * 5 + leaderTickInterval = 50 * time.Millisecond + // pdRootPath for all pd servers. + pdRootPath = "/pd" + pdClusterIDPath = "/pd/cluster_id" +) + +// EnableZap enable the zap logger in embed etcd. +var EnableZap = false + +// Server is the pd server. +type Server struct { + // Server state. + isServing int64 + + // Configs and initial fields. + cfg *config.Config + etcdCfg *embed.Config + scheduleOpt *config.ScheduleOption + + serverLoopCtx context.Context + serverLoopCancel func() + serverLoopWg sync.WaitGroup + + member *member.Member + client *clientv3.Client + clusterID uint64 // pd cluster id. + rootPath string + + // Server services. + // for id allocator, we can use one allocator for + // store, region and peer, because we just need + // a unique ID. + idAllocator *id.AllocatorImpl + // for storage operation. + storage *core.Storage + // for tso. + tso *tso.TimestampOracle + // for raft cluster + cluster *RaftCluster + // For async region heartbeat. + hbStreams *heartbeatStreams + // Zap logger + lg *zap.Logger + logProps *log.ZapProperties +} + +// CreateServer creates the UNINITIALIZED pd server with given configuration. +func CreateServer(cfg *config.Config) (*Server, error) { + log.Info("PD Config", zap.Reflect("config", cfg)) + rand.Seed(time.Now().UnixNano()) + + s := &Server{ + cfg: cfg, + scheduleOpt: config.NewScheduleOption(cfg), + member: &member.Member{}, + } + + // Adjust etcd config. + etcdCfg, err := s.cfg.GenEmbedEtcdConfig() + if err != nil { + return nil, err + } + etcdCfg.ServiceRegister = func(gs *grpc.Server) { schedulerpb.RegisterSchedulerServer(gs, s) } + s.etcdCfg = etcdCfg + if EnableZap { + // The etcd master version has removed embed.Config.SetupLogging. + // Now logger is set up automatically based on embed.Config.Logger, + // Use zap logger in the test, otherwise will panic. + // Reference: https://go.etcd.io/etcd/blob/master/embed/config_logging.go#L45 + s.etcdCfg.Logger = "zap" + s.etcdCfg.LogOutputs = []string{"stdout"} + } + s.lg = cfg.GetZapLogger() + s.logProps = cfg.GetZapLogProperties() + return s, nil +} + +func (s *Server) startEtcd(ctx context.Context) error { + log.Info("start embed etcd") + ctx, cancel := context.WithTimeout(ctx, etcdStartTimeout) + defer cancel() + + etcd, err := embed.StartEtcd(s.etcdCfg) + if err != nil { + return errors.WithStack(err) + } + + // Check cluster ID + urlmap, err := types.NewURLsMap(s.cfg.InitialCluster) + if err != nil { + return errors.WithStack(err) + } + tlsConfig, err := s.cfg.Security.ToTLSConfig() + if err != nil { + return err + } + if err = etcdutil.CheckClusterID(etcd.Server.Cluster().ID(), urlmap, tlsConfig); err != nil { + return err + } + + select { + // Wait etcd until it is ready to use + case <-etcd.Server.ReadyNotify(): + case <-ctx.Done(): + return errors.Errorf("canceled when waiting embed etcd to be ready") + } + + endpoints := []string{s.etcdCfg.ACUrls[0].String()} + log.Info("create etcd v3 client", zap.Strings("endpoints", endpoints)) + + client, err := clientv3.New(clientv3.Config{ + Endpoints: endpoints, + DialTimeout: etcdTimeout, + TLS: tlsConfig, + }) + if err != nil { + return errors.WithStack(err) + } + + etcdServerID := uint64(etcd.Server.ID()) + + // update advertise peer urls. + etcdMembers, err := etcdutil.ListEtcdMembers(client) + if err != nil { + return err + } + for _, m := range etcdMembers.Members { + if etcdServerID == m.ID { + etcdPeerURLs := strings.Join(m.PeerURLs, ",") + if s.cfg.AdvertisePeerUrls != etcdPeerURLs { + log.Info("update advertise peer urls", zap.String("from", s.cfg.AdvertisePeerUrls), zap.String("to", etcdPeerURLs)) + s.cfg.AdvertisePeerUrls = etcdPeerURLs + } + } + } + s.client = client + s.member = member.NewMember(etcd, client, etcdServerID) + return nil +} + +func (s *Server) startServer(ctx context.Context) error { + var err error + if err = s.initClusterID(); err != nil { + return err + } + log.Info("init cluster id", zap.Uint64("cluster-id", s.clusterID)) + + s.rootPath = path.Join(pdRootPath, strconv.FormatUint(s.clusterID, 10)) + s.member.MemberInfo(s.cfg, s.Name(), s.rootPath) + + s.idAllocator = id.NewAllocatorImpl(s.client, s.rootPath, s.member.MemberValue()) + s.tso = tso.NewTimestampOracle( + s.client, + s.rootPath, + s.member.MemberValue(), + s.cfg.TsoSaveInterval.Duration, + func() time.Duration { return s.scheduleOpt.LoadPDServerConfig().MaxResetTSGap }, + ) + kvBase := kv.NewEtcdKVBase(s.client, s.rootPath) + s.storage = core.NewStorage(kvBase) + s.cluster = newRaftCluster(ctx, s, s.clusterID) + s.hbStreams = newHeartbeatStreams(ctx, s.clusterID, s.cluster) + // Server has started. + atomic.StoreInt64(&s.isServing, 1) + return nil +} + +func (s *Server) initClusterID() error { + // Get any cluster key to parse the cluster ID. + resp, err := etcdutil.EtcdKVGet(s.client, pdClusterIDPath) + if err != nil { + return err + } + + // If no key exist, generate a random cluster ID. + if len(resp.Kvs) == 0 { + s.clusterID, err = initOrGetClusterID(s.client, pdClusterIDPath) + return err + } + s.clusterID, err = typeutil.BytesToUint64(resp.Kvs[0].Value) + return err +} + +// Close closes the server. +func (s *Server) Close() { + if !atomic.CompareAndSwapInt64(&s.isServing, 1, 0) { + // server is already closed + return + } + + log.Info("closing server") + + s.stopServerLoop() + + if s.client != nil { + s.client.Close() + } + + if s.member.Etcd() != nil { + s.member.Close() + } + + if s.hbStreams != nil { + s.hbStreams.Close() + } + if err := s.storage.Close(); err != nil { + log.Error("close storage meet error", zap.Error(err)) + } + + log.Info("close server") +} + +// IsClosed checks whether server is closed or not. +func (s *Server) IsClosed() bool { + return atomic.LoadInt64(&s.isServing) == 0 +} + +// Run runs the pd server. +func (s *Server) Run(ctx context.Context) error { + go StartMonitor(ctx, time.Now, func() { + log.Error("system time jumps backward") + }) + + if err := s.startEtcd(ctx); err != nil { + return err + } + + if err := s.startServer(ctx); err != nil { + return err + } + + s.startServerLoop(ctx) + + return nil +} + +// Context returns the loop context of server. +func (s *Server) Context() context.Context { + return s.serverLoopCtx +} + +func (s *Server) startServerLoop(ctx context.Context) { + s.serverLoopCtx, s.serverLoopCancel = context.WithCancel(ctx) + s.serverLoopWg.Add(2) + go s.leaderLoop() + go s.etcdLeaderLoop() +} + +func (s *Server) stopServerLoop() { + s.serverLoopCancel() + s.serverLoopWg.Wait() +} + +func (s *Server) bootstrapCluster(req *schedulerpb.BootstrapRequest) (*schedulerpb.BootstrapResponse, error) { + clusterID := s.clusterID + + log.Info("try to bootstrap raft cluster", + zap.Uint64("cluster-id", clusterID), + zap.String("request", fmt.Sprintf("%v", req))) + + if err := checkBootstrapRequest(clusterID, req); err != nil { + return nil, err + } + + clusterMeta := metapb.Cluster{ + Id: clusterID, + MaxPeerCount: uint32(s.scheduleOpt.GetReplication().GetMaxReplicas()), + } + + // Set cluster meta + clusterValue, err := clusterMeta.Marshal() + if err != nil { + return nil, errors.WithStack(err) + } + clusterRootPath := s.getClusterRootPath() + + var ops []clientv3.Op + ops = append(ops, clientv3.OpPut(clusterRootPath, string(clusterValue))) + + // Set bootstrap time + bootstrapKey := makeBootstrapTimeKey(clusterRootPath) + nano := time.Now().UnixNano() + + timeData := typeutil.Uint64ToBytes(uint64(nano)) + ops = append(ops, clientv3.OpPut(bootstrapKey, string(timeData))) + + // Set store meta + storeMeta := req.GetStore() + storePath := makeStoreKey(clusterRootPath, storeMeta.GetId()) + storeValue, err := storeMeta.Marshal() + if err != nil { + return nil, errors.WithStack(err) + } + ops = append(ops, clientv3.OpPut(storePath, string(storeValue))) + + // TODO: we must figure out a better way to handle bootstrap failed, maybe intervene manually. + bootstrapCmp := clientv3.Compare(clientv3.CreateRevision(clusterRootPath), "=", 0) + resp, err := kv.NewSlowLogTxn(s.client).If(bootstrapCmp).Then(ops...).Commit() + if err != nil { + return nil, errors.WithStack(err) + } + if !resp.Succeeded { + log.Warn("cluster already bootstrapped", zap.Uint64("cluster-id", clusterID)) + return nil, errors.Errorf("cluster %d already bootstrapped", clusterID) + } + + log.Info("bootstrap cluster ok", zap.Uint64("cluster-id", clusterID)) + if err := s.cluster.start(); err != nil { + return nil, err + } + + return &schedulerpb.BootstrapResponse{}, nil +} + +func (s *Server) createRaftCluster() error { + if s.cluster.isRunning() { + return nil + } + + return s.cluster.start() +} + +func (s *Server) stopRaftCluster() { + s.cluster.stop() +} + +// GetAddr returns the server urls for clients. +func (s *Server) GetAddr() string { + return s.cfg.AdvertiseClientUrls +} + +// GetMemberInfo returns the server member information. +func (s *Server) GetMemberInfo() *schedulerpb.Member { + return proto.Clone(s.member.Member()).(*schedulerpb.Member) +} + +// GetEndpoints returns the etcd endpoints for outer use. +func (s *Server) GetEndpoints() []string { + return s.client.Endpoints() +} + +// GetClient returns builtin etcd client. +func (s *Server) GetClient() *clientv3.Client { + return s.client +} + +// GetLeader returns leader of etcd. +func (s *Server) GetLeader() *schedulerpb.Member { + return s.member.GetLeader() +} + +// GetMember returns the member of server. +func (s *Server) GetMember() *member.Member { + return s.member +} + +// GetStorage returns the backend storage of server. +func (s *Server) GetStorage() *core.Storage { + return s.storage +} + +// GetAllocator returns the ID allocator of server. +func (s *Server) GetAllocator() *id.AllocatorImpl { + return s.idAllocator +} + +// Name returns the unique etcd Name for this server in etcd cluster. +func (s *Server) Name() string { + return s.cfg.Name +} + +// ClusterID returns the cluster ID of this server. +func (s *Server) ClusterID() uint64 { + return s.clusterID +} + +// GetConfig gets the config information. +func (s *Server) GetConfig() *config.Config { + cfg := s.cfg.Clone() + cfg.Schedule = *s.scheduleOpt.Load() + cfg.Replication = *s.scheduleOpt.GetReplication().Load() + cfg.PDServerCfg = *s.scheduleOpt.LoadPDServerConfig() + storage := s.GetStorage() + if storage == nil { + return cfg + } + sches, configs, err := storage.LoadAllScheduleConfig() + if err != nil { + return cfg + } + payload := make(map[string]string) + for i, sche := range sches { + payload[sche] = configs[i] + } + cfg.Schedule.SchedulersPayload = payload + return cfg +} + +// GetScheduleConfig gets the balance config information. +func (s *Server) GetScheduleConfig() *config.ScheduleConfig { + cfg := &config.ScheduleConfig{} + *cfg = *s.scheduleOpt.Load() + return cfg +} + +// GetReplicationConfig get the replication config. +func (s *Server) GetReplicationConfig() *config.ReplicationConfig { + cfg := &config.ReplicationConfig{} + *cfg = *s.scheduleOpt.GetReplication().Load() + return cfg +} + +// SetReplicationConfig sets the replication config. +func (s *Server) SetReplicationConfig(cfg config.ReplicationConfig) error { + old := s.scheduleOpt.GetReplication().Load() + s.scheduleOpt.GetReplication().Store(&cfg) + log.Info("replication config is updated", zap.Reflect("new", cfg), zap.Reflect("old", old)) + return nil +} + +// GetSecurityConfig get the security config. +func (s *Server) GetSecurityConfig() *config.SecurityConfig { + return &s.cfg.Security +} + +func (s *Server) getClusterRootPath() string { + return path.Join(s.rootPath, "raft") +} + +// GetRaftCluster gets Raft cluster. +// If cluster has not been bootstrapped, return nil. +func (s *Server) GetRaftCluster() *RaftCluster { + if s.IsClosed() || !s.cluster.isRunning() { + return nil + } + return s.cluster +} + +// GetCluster gets cluster. +func (s *Server) GetCluster() *metapb.Cluster { + return &metapb.Cluster{ + Id: s.clusterID, + MaxPeerCount: uint32(s.scheduleOpt.GetReplication().GetMaxReplicas()), + } +} + +// GetMetaRegions gets meta regions from cluster. +func (s *Server) GetMetaRegions() []*metapb.Region { + cluster := s.GetRaftCluster() + if cluster != nil { + return cluster.GetMetaRegions() + } + return nil +} + +// GetClusterStatus gets cluster status. +func (s *Server) GetClusterStatus() (*ClusterStatus, error) { + s.cluster.Lock() + defer s.cluster.Unlock() + return s.cluster.loadClusterStatus() +} + +// SetLogLevel sets log level. +func (s *Server) SetLogLevel(level string) { + s.cfg.Log.Level = level + log.SetLevel(logutil.StringToZapLogLevel(level)) + log.Warn("log level changed", zap.String("level", log.GetLevel().String())) +} + +var healthURL = "/pd/ping" + +// CheckHealth checks if members are healthy. +func (s *Server) CheckHealth(members []*schedulerpb.Member) map[uint64]*schedulerpb.Member { + unhealthMembers := make(map[uint64]*schedulerpb.Member) + for _, member := range members { + for _, cURL := range member.ClientUrls { + resp, err := dialClient.Get(fmt.Sprintf("%s%s", cURL, healthURL)) + if resp != nil { + resp.Body.Close() + } + if err != nil || resp.StatusCode != http.StatusOK { + unhealthMembers[member.GetMemberId()] = member + break + } + } + } + return unhealthMembers +} + +func (s *Server) leaderLoop() { + defer logutil.LogPanic() + defer s.serverLoopWg.Done() + + for { + if s.IsClosed() { + log.Info("server is closed, return leader loop") + return + } + + leader, rev, checkAgain := s.member.CheckLeader(s.Name()) + if checkAgain { + continue + } + if leader != nil { + log.Info("start watch leader", zap.Stringer("leader", leader)) + s.member.WatchLeader(s.serverLoopCtx, leader, rev) + log.Info("leader changed, try to campaign leader") + } + + etcdLeader := s.member.GetEtcdLeader() + if etcdLeader != s.member.ID() { + log.Info("skip campaign leader and check later", + zap.String("server-name", s.Name()), + zap.Uint64("etcd-leader-id", etcdLeader)) + time.Sleep(200 * time.Millisecond) + continue + } + s.campaignLeader() + } +} + +func (s *Server) campaignLeader() { + log.Info("start to campaign leader", zap.String("campaign-leader-name", s.Name())) + + lease := member.NewLeaderLease(s.client) + defer lease.Close() + if err := s.member.CampaignLeader(lease, s.cfg.LeaderLease); err != nil { + log.Error("campaign leader meet error", zap.Error(err)) + return + } + + // Start keepalive and enable TSO service. + // TSO service is strictly enabled/disabled by leader lease for 2 reasons: + // 1. lease based approach is not affected by thread pause, slow runtime schedule, etc. + // 2. load region could be slow. Based on lease we can recover TSO service faster. + + ctx, cancel := context.WithCancel(s.serverLoopCtx) + defer cancel() + go lease.KeepAlive(ctx) + log.Info("campaign leader ok", zap.String("campaign-leader-name", s.Name())) + + log.Debug("sync timestamp for tso") + if err := s.tso.SyncTimestamp(lease); err != nil { + log.Error("failed to sync timestamp", zap.Error(err)) + return + } + defer s.tso.ResetTimestamp() + + // Try to create raft cluster. + err := s.createRaftCluster() + if err != nil { + log.Error("failed to create raft cluster", zap.Error(err)) + return + } + defer s.stopRaftCluster() + + s.member.EnableLeader() + defer s.member.DisableLeader() + + log.Info("PD cluster leader is ready to serve", zap.String("leader-name", s.Name())) + + tsTicker := time.NewTicker(tso.UpdateTimestampStep) + defer tsTicker.Stop() + leaderTicker := time.NewTicker(leaderTickInterval) + defer leaderTicker.Stop() + + for { + select { + case <-leaderTicker.C: + if lease.IsExpired() { + log.Info("lease expired, leader step down") + return + } + etcdLeader := s.member.GetEtcdLeader() + if etcdLeader != s.member.ID() { + log.Info("etcd leader changed, resigns leadership", zap.String("old-leader-name", s.Name())) + return + } + case <-tsTicker.C: + if err = s.tso.UpdateTimestamp(); err != nil { + log.Error("failed to update timestamp", zap.Error(err)) + return + } + case <-ctx.Done(): + // Server is closed and it should return nil. + log.Info("server is closed") + return + } + } +} + +func (s *Server) etcdLeaderLoop() { + defer logutil.LogPanic() + defer s.serverLoopWg.Done() + + ctx, cancel := context.WithCancel(s.serverLoopCtx) + defer cancel() + for { + select { + case <-time.After(s.cfg.LeaderPriorityCheckInterval.Duration): + s.member.CheckPriority(ctx) + case <-ctx.Done(): + log.Info("server is closed, exit etcd leader loop") + return + } + } +} diff --git a/scheduler/server/server_test.go b/scheduler/server/server_test.go new file mode 100644 index 00000000..c3636c2c --- /dev/null +++ b/scheduler/server/server_test.go @@ -0,0 +1,166 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "fmt" + "testing" + + "github.com/pingcap-incubator/tinykv/scheduler/pkg/testutil" + "github.com/pingcap-incubator/tinykv/scheduler/server/config" + . "github.com/pingcap/check" +) + +func TestServer(t *testing.T) { + EnableZap = true + TestingT(t) +} + +func mustRunTestServer(c *C) (*Server, CleanupFunc) { + var err error + server, cleanup, err := NewTestServer(c) + c.Assert(err, IsNil) + mustWaitLeader(c, []*Server{server}) + return server, cleanup +} + +func mustWaitLeader(c *C, svrs []*Server) *Server { + var leader *Server + testutil.WaitUntil(c, func(c *C) bool { + for _, s := range svrs { + if !s.IsClosed() && s.member.IsLeader() { + leader = s + return true + } + } + return false + }) + return leader +} + +var _ = Suite(&testLeaderServerSuite{}) + +type testLeaderServerSuite struct { + ctx context.Context + cancel context.CancelFunc + svrs map[string]*Server + leaderPath string +} + +func (s *testLeaderServerSuite) SetUpSuite(c *C) { + s.ctx, s.cancel = context.WithCancel(context.Background()) + s.svrs = make(map[string]*Server) + + cfgs := NewTestMultiConfig(c, 3) + + ch := make(chan *Server, 3) + for i := 0; i < 3; i++ { + cfg := cfgs[i] + + go func() { + svr, err := CreateServer(cfg) + c.Assert(err, IsNil) + err = svr.Run(s.ctx) + c.Assert(err, IsNil) + ch <- svr + }() + } + + for i := 0; i < 3; i++ { + svr := <-ch + s.svrs[svr.GetAddr()] = svr + s.leaderPath = svr.GetMember().GetLeaderPath() + } +} + +func (s *testLeaderServerSuite) TearDownSuite(c *C) { + s.cancel() + for _, svr := range s.svrs { + svr.Close() + testutil.CleanServer(svr.cfg) + } +} + +var _ = Suite(&testServerSuite{}) + +type testServerSuite struct{} + +func newTestServersWithCfgs(ctx context.Context, c *C, cfgs []*config.Config) ([]*Server, CleanupFunc) { + svrs := make([]*Server, 0, len(cfgs)) + + ch := make(chan *Server) + for _, cfg := range cfgs { + go func(cfg *config.Config) { + svr, err := CreateServer(cfg) + c.Assert(err, IsNil) + err = svr.Run(ctx) + c.Assert(err, IsNil) + ch <- svr + }(cfg) + } + + for i := 0; i < len(cfgs); i++ { + svrs = append(svrs, <-ch) + } + mustWaitLeader(c, svrs) + + cleanup := func() { + for _, svr := range svrs { + svr.Close() + } + for _, cfg := range cfgs { + testutil.CleanServer(cfg) + } + } + + return svrs, cleanup +} + +func (s *testServerSuite) TestCheckClusterID(c *C) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfgs := NewTestMultiConfig(c, 2) + for i, cfg := range cfgs { + cfg.DataDir = fmt.Sprintf("/tmp/test_pd_check_clusterID_%d", i) + // Clean up before testing. + testutil.CleanServer(cfg) + } + originInitial := cfgs[0].InitialCluster + for _, cfg := range cfgs { + cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, cfg.PeerUrls) + } + + cfgA, cfgB := cfgs[0], cfgs[1] + // Start a standalone cluster + // TODO: clean up. For now tests failed because: + // etcdserver: failed to purge snap file ... + svrsA, cleanA := newTestServersWithCfgs(ctx, c, []*config.Config{cfgA}) + defer cleanA() + // Close it. + for _, svr := range svrsA { + svr.Close() + } + + // Start another cluster. + _, cleanB := newTestServersWithCfgs(ctx, c, []*config.Config{cfgB}) + defer cleanB() + + // Start previous cluster, expect an error. + cfgA.InitialCluster = originInitial + svr, err := CreateServer(cfgA) + c.Assert(err, IsNil) + err = svr.Run(ctx) + c.Assert(err, NotNil) +} diff --git a/scheduler/server/systime_mon.go b/scheduler/server/systime_mon.go new file mode 100644 index 00000000..db39fed2 --- /dev/null +++ b/scheduler/server/systime_mon.go @@ -0,0 +1,41 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "time" + + "github.com/pingcap/log" + "go.uber.org/zap" +) + +// StartMonitor calls systimeErrHandler if system time jump backward. +func StartMonitor(ctx context.Context, now func() time.Time, systimeErrHandler func()) { + log.Info("start system time monitor") + tick := time.NewTicker(100 * time.Millisecond) + defer tick.Stop() + for { + last := now().UnixNano() + select { + case <-tick.C: + if now().UnixNano() < last { + log.Error("system time jump backward", zap.Int64("last", last)) + systimeErrHandler() + } + case <-ctx.Done(): + return + } + } +} diff --git a/scheduler/server/systime_mon_test.go b/scheduler/server/systime_mon_test.go new file mode 100644 index 00000000..4bab0edd --- /dev/null +++ b/scheduler/server/systime_mon_test.go @@ -0,0 +1,46 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "sync/atomic" + "testing" + "time" +) + +func TestSystimeMonitor(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var jumpForward int32 + + trigged := false + go StartMonitor(ctx, + func() time.Time { + if !trigged { + trigged = true + return time.Now() + } + + return time.Now().Add(-2 * time.Second) + }, func() { + atomic.StoreInt32(&jumpForward, 1) + }) + + time.Sleep(1 * time.Second) + + if atomic.LoadInt32(&jumpForward) != 1 { + t.Error("should detect time error") + } +} diff --git a/scheduler/server/testutil.go b/scheduler/server/testutil.go new file mode 100644 index 00000000..bf659153 --- /dev/null +++ b/scheduler/server/testutil.go @@ -0,0 +1,117 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + "sync" + "time" + + "github.com/pingcap-incubator/tinykv/scheduler/pkg/tempurl" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/testutil" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/typeutil" + "github.com/pingcap-incubator/tinykv/scheduler/server/config" + "github.com/pingcap/check" + "github.com/pingcap/log" + "go.etcd.io/etcd/embed" + + // Register schedulers + _ "github.com/pingcap-incubator/tinykv/scheduler/server/schedulers" +) + +// CleanupFunc closes test pd server(s) and deletes any files left behind. +type CleanupFunc func() + +// NewTestServer creates a pd server for testing. +func NewTestServer(c *check.C) (*Server, CleanupFunc, error) { + ctx, cancel := context.WithCancel(context.Background()) + cfg := NewTestSingleConfig(c) + s, err := CreateServer(cfg) + if err != nil { + cancel() + return nil, nil, err + } + if err = s.Run(ctx); err != nil { + cancel() + return nil, nil, err + } + + cleanup := func() { + cancel() + s.Close() + testutil.CleanServer(cfg) + } + return s, cleanup, nil +} + +var zapLogOnce sync.Once + +// NewTestSingleConfig is only for test to create one pd. +// Because PD client also needs this, so export here. +func NewTestSingleConfig(c *check.C) *config.Config { + cfg := &config.Config{ + Name: "pd", + ClientUrls: tempurl.Alloc(), + PeerUrls: tempurl.Alloc(), + + InitialClusterState: embed.ClusterStateFlagNew, + + LeaderLease: 1, + TsoSaveInterval: typeutil.NewDuration(200 * time.Millisecond), + } + + cfg.AdvertiseClientUrls = cfg.ClientUrls + cfg.AdvertisePeerUrls = cfg.PeerUrls + cfg.DataDir, _ = ioutil.TempDir("/tmp", "test_pd") + cfg.InitialCluster = fmt.Sprintf("pd=%s", cfg.PeerUrls) + cfg.DisableStrictReconfigCheck = true + cfg.TickInterval = typeutil.NewDuration(100 * time.Millisecond) + cfg.ElectionInterval = typeutil.NewDuration(3 * time.Second) + cfg.LeaderPriorityCheckInterval = typeutil.NewDuration(100 * time.Millisecond) + err := cfg.SetupLogger() + c.Assert(err, check.IsNil) + zapLogOnce.Do(func() { + log.ReplaceGlobals(cfg.GetZapLogger(), cfg.GetZapLogProperties()) + }) + + c.Assert(cfg.Adjust(nil), check.IsNil) + + return cfg +} + +// NewTestMultiConfig is only for test to create multiple pd configurations. +// Because PD client also needs this, so export here. +func NewTestMultiConfig(c *check.C, count int) []*config.Config { + cfgs := make([]*config.Config, count) + + var clusters []string + for i := 1; i <= count; i++ { + cfg := NewTestSingleConfig(c) + cfg.Name = fmt.Sprintf("pd%d", i) + + clusters = append(clusters, fmt.Sprintf("%s=%s", cfg.Name, cfg.PeerUrls)) + + cfgs[i-1] = cfg + } + + initialCluster := strings.Join(clusters, ",") + for _, cfg := range cfgs { + cfg.InitialCluster = initialCluster + } + + return cfgs +} diff --git a/scheduler/server/tso/tso.go b/scheduler/server/tso/tso.go new file mode 100644 index 00000000..5fbe7988 --- /dev/null +++ b/scheduler/server/tso/tso.go @@ -0,0 +1,264 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tso + +import ( + "path" + "sync/atomic" + "time" + "unsafe" + + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/etcdutil" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/tsoutil" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/typeutil" + "github.com/pingcap-incubator/tinykv/scheduler/server/kv" + "github.com/pingcap-incubator/tinykv/scheduler/server/member" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.etcd.io/etcd/clientv3" + "go.uber.org/zap" +) + +const ( + // UpdateTimestampStep is used to update timestamp. + UpdateTimestampStep = 50 * time.Millisecond + updateTimestampGuard = time.Millisecond + maxLogical = int64(1 << 18) +) + +// TimestampOracle is used to maintain the logic of tso. +type TimestampOracle struct { + // For tso, set after pd becomes leader. + ts unsafe.Pointer + lastSavedTime time.Time + lease *member.LeaderLease + + rootPath string + member string + client *clientv3.Client + saveInterval time.Duration + maxResetTsGap func() time.Duration +} + +// NewTimestampOracle creates a new TimestampOracle. +// TODO: remove saveInterval +func NewTimestampOracle(client *clientv3.Client, rootPath string, member string, saveInterval time.Duration, maxResetTsGap func() time.Duration) *TimestampOracle { + return &TimestampOracle{ + rootPath: rootPath, + client: client, + saveInterval: saveInterval, + maxResetTsGap: maxResetTsGap, + member: member, + } +} + +type atomicObject struct { + physical time.Time + logical int64 +} + +func (t *TimestampOracle) getTimestampPath() string { + return path.Join(t.rootPath, "timestamp") +} + +func (t *TimestampOracle) loadTimestamp() (time.Time, error) { + data, err := etcdutil.GetValue(t.client, t.getTimestampPath()) + if err != nil { + return typeutil.ZeroTime, err + } + if len(data) == 0 { + return typeutil.ZeroTime, nil + } + return typeutil.ParseTimestamp(data) +} + +// save timestamp, if lastTs is 0, we think the timestamp doesn't exist, so create it, +// otherwise, update it. +func (t *TimestampOracle) saveTimestamp(ts time.Time) error { + data := typeutil.Uint64ToBytes(uint64(ts.UnixNano())) + key := t.getTimestampPath() + + leaderPath := path.Join(t.rootPath, "leader") + txn := kv.NewSlowLogTxn(t.client).If(append([]clientv3.Cmp{}, clientv3.Compare(clientv3.Value(leaderPath), "=", t.member))...) + resp, err := txn.Then(clientv3.OpPut(key, string(data))).Commit() + if err != nil { + return errors.WithStack(err) + } + if !resp.Succeeded { + return errors.New("save timestamp failed, maybe we lost leader") + } + + t.lastSavedTime = ts + + return nil +} + +// SyncTimestamp is used to synchronize the timestamp. +func (t *TimestampOracle) SyncTimestamp(lease *member.LeaderLease) error { + last, err := t.loadTimestamp() + if err != nil { + return err + } + + next := time.Now() + + // If the current system time minus the saved etcd timestamp is less than `updateTimestampGuard`, + // the timestamp allocation will start from the saved etcd timestamp temporarily. + if typeutil.SubTimeByWallClock(next, last) < updateTimestampGuard { + log.Error("system time may be incorrect", zap.Time("last", last), zap.Time("next", next)) + next = last.Add(updateTimestampGuard) + } + + save := next.Add(t.saveInterval) + if err = t.saveTimestamp(save); err != nil { + return err + } + + log.Info("sync and save timestamp", zap.Time("last", last), zap.Time("save", save), zap.Time("next", next)) + + current := &atomicObject{ + physical: next, + } + t.lease = lease + atomic.StorePointer(&t.ts, unsafe.Pointer(current)) + + return nil +} + +// ResetUserTimestamp update the physical part with specified tso. +func (t *TimestampOracle) ResetUserTimestamp(tso uint64) error { + if t.lease == nil || t.lease.IsExpired() { + return errors.New("Setup timestamp failed, lease expired") + } + physical, _ := tsoutil.ParseTS(tso) + next := physical.Add(time.Millisecond) + prev := (*atomicObject)(atomic.LoadPointer(&t.ts)) + + // do not update + if typeutil.SubTimeByWallClock(next, prev.physical) <= 3*updateTimestampGuard { + return errors.New("the specified ts too small than now") + } + + if typeutil.SubTimeByWallClock(next, prev.physical) >= t.maxResetTsGap() { + return errors.New("the specified ts too large than now") + } + + save := next.Add(t.saveInterval) + if err := t.saveTimestamp(save); err != nil { + return err + } + update := &atomicObject{ + physical: next, + } + atomic.CompareAndSwapPointer(&t.ts, unsafe.Pointer(prev), unsafe.Pointer(update)) + return nil +} + +// UpdateTimestamp is used to update the timestamp. +// This function will do two things: +// 1. When the logical time is going to be used up, the current physical time needs to increase. +// 2. If the time window is not enough, which means the saved etcd time minus the next physical time +// is less than or equal to `updateTimestampGuard`, it will need to be updated and save the +// next physical time plus `TsoSaveInterval` into etcd. +// +// Here is some constraints that this function must satisfy: +// 1. The physical time is monotonically increasing. +// 2. The saved time is monotonically increasing. +// 3. The physical time is always less than the saved timestamp. +func (t *TimestampOracle) UpdateTimestamp() error { + prev := (*atomicObject)(atomic.LoadPointer(&t.ts)) + now := time.Now() + + jetLag := typeutil.SubTimeByWallClock(now, prev.physical) + if jetLag > 3*UpdateTimestampStep { + log.Warn("clock offset", zap.Duration("jet-lag", jetLag), zap.Time("prev-physical", prev.physical), zap.Time("now", now)) + } + + var next time.Time + prevLogical := atomic.LoadInt64(&prev.logical) + // If the system time is greater, it will be synchronized with the system time. + if jetLag > updateTimestampGuard { + next = now + } else if prevLogical > maxLogical/2 { + // The reason choosing maxLogical/2 here is that it's big enough for common cases. + // Because there is enough timestamp can be allocated before next update. + log.Warn("the logical time may be not enough", zap.Int64("prev-logical", prevLogical)) + next = prev.physical.Add(time.Millisecond) + } else { + // It will still use the previous physical time to alloc the timestamp. + return nil + } + + // It is not safe to increase the physical time to `next`. + // The time window needs to be updated and saved to etcd. + if typeutil.SubTimeByWallClock(t.lastSavedTime, next) <= updateTimestampGuard { + save := next.Add(t.saveInterval) + if err := t.saveTimestamp(save); err != nil { + return err + } + } + + current := &atomicObject{ + physical: next, + logical: 0, + } + + atomic.StorePointer(&t.ts, unsafe.Pointer(current)) + + return nil +} + +// ResetTimestamp is used to reset the timestamp. +func (t *TimestampOracle) ResetTimestamp() { + zero := &atomicObject{ + physical: typeutil.ZeroTime, + } + atomic.StorePointer(&t.ts, unsafe.Pointer(zero)) +} + +const maxRetryCount = 100 + +// GetRespTS is used to get a timestamp. +func (t *TimestampOracle) GetRespTS(count uint32) (schedulerpb.Timestamp, error) { + var resp schedulerpb.Timestamp + + if count == 0 { + return resp, errors.New("tso count should be positive") + } + + for i := 0; i < maxRetryCount; i++ { + current := (*atomicObject)(atomic.LoadPointer(&t.ts)) + if current.physical == typeutil.ZeroTime { + log.Error("we haven't synced timestamp ok, wait and retry", zap.Int("retry-count", i)) + time.Sleep(200 * time.Millisecond) + continue + } + + resp.Physical = current.physical.UnixNano() / int64(time.Millisecond) + resp.Logical = atomic.AddInt64(¤t.logical, int64(count)) + if resp.Logical >= maxLogical { + log.Error("logical part outside of max logical interval, please check ntp time", + zap.Reflect("response", resp), + zap.Int("retry-count", i)) + time.Sleep(UpdateTimestampStep) + continue + } + if t.lease == nil || t.lease.IsExpired() { + return schedulerpb.Timestamp{}, errors.New("alloc timestamp failed, lease expired") + } + return resp, nil + } + return resp, errors.New("can not get timestamp") +} diff --git a/scheduler/server/util.go b/scheduler/server/util.go new file mode 100644 index 00000000..73a78b3c --- /dev/null +++ b/scheduler/server/util.go @@ -0,0 +1,158 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "fmt" + "math/rand" + "net/http" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/etcdutil" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/typeutil" + "github.com/pingcap-incubator/tinykv/scheduler/server/config" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.etcd.io/etcd/clientv3" + "go.uber.org/zap" +) + +const ( + clientTimeout = 3 * time.Second + requestTimeout = etcdutil.DefaultRequestTimeout +) + +// Version information. +var ( + PDReleaseVersion = "None" + PDBuildTS = "None" + PDGitHash = "None" + PDGitBranch = "None" +) + +// dialClient used to dail http request. +var dialClient = &http.Client{ + Timeout: clientTimeout, + Transport: &http.Transport{ + DisableKeepAlives: true, + }, +} + +// LogPDInfo prints the PD version information. +func LogPDInfo() { + log.Info("Welcome to Placement Driver (PD)") + log.Info("PD", zap.String("release-version", PDReleaseVersion)) + log.Info("PD", zap.String("git-hash", PDGitHash)) + log.Info("PD", zap.String("git-branch", PDGitBranch)) + log.Info("PD", zap.String("utc-build-time", PDBuildTS)) +} + +// PrintPDInfo prints the PD version information without log info. +func PrintPDInfo() { + fmt.Println("Release Version:", PDReleaseVersion) + fmt.Println("Git Commit Hash:", PDGitHash) + fmt.Println("Git Branch:", PDGitBranch) + fmt.Println("UTC Build Time: ", PDBuildTS) +} + +// PrintConfigCheckMsg prints the message about configuration checks. +func PrintConfigCheckMsg(cfg *config.Config) { + if len(cfg.WarningMsgs) == 0 { + fmt.Println("config check successful") + return + } + + for _, msg := range cfg.WarningMsgs { + fmt.Println(msg) + } +} + +func initOrGetClusterID(c *clientv3.Client, key string) (uint64, error) { + ctx, cancel := context.WithTimeout(c.Ctx(), requestTimeout) + defer cancel() + + // Generate a random cluster ID. + ts := uint64(time.Now().Unix()) + clusterID := (ts << 32) + uint64(rand.Uint32()) + value := typeutil.Uint64ToBytes(clusterID) + + // Multiple PDs may try to init the cluster ID at the same time. + // Only one PD can commit this transaction, then other PDs can get + // the committed cluster ID. + resp, err := c.Txn(ctx). + If(clientv3.Compare(clientv3.CreateRevision(key), "=", 0)). + Then(clientv3.OpPut(key, string(value))). + Else(clientv3.OpGet(key)). + Commit() + if err != nil { + return 0, errors.WithStack(err) + } + + // Txn commits ok, return the generated cluster ID. + if resp.Succeeded { + return clusterID, nil + } + + // Otherwise, parse the committed cluster ID. + if len(resp.Responses) == 0 { + return 0, errors.Errorf("txn returns empty response: %v", resp) + } + + response := resp.Responses[0].GetResponseRange() + if response == nil || len(response.Kvs) != 1 { + return 0, errors.Errorf("txn returns invalid range response: %v", resp) + } + + return typeutil.BytesToUint64(response.Kvs[0].Value) +} + +// GetMembers return a slice of Members. +func GetMembers(etcdClient *clientv3.Client) ([]*schedulerpb.Member, error) { + listResp, err := etcdutil.ListEtcdMembers(etcdClient) + if err != nil { + return nil, err + } + + members := make([]*schedulerpb.Member, 0, len(listResp.Members)) + for _, m := range listResp.Members { + info := &schedulerpb.Member{ + Name: m.Name, + MemberId: m.ID, + ClientUrls: m.ClientURLs, + PeerUrls: m.PeerURLs, + } + members = append(members, info) + } + + return members, nil +} + +// InitHTTPClient initials a http client. +func InitHTTPClient(svr *Server) error { + tlsConfig, err := svr.GetSecurityConfig().ToTLSConfig() + if err != nil { + return err + } + + dialClient = &http.Client{ + Timeout: clientTimeout, + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + DisableKeepAlives: true, + }, + } + return nil +} diff --git a/scheduler/tests/client/client_test.go b/scheduler/tests/client/client_test.go new file mode 100644 index 00000000..a837b771 --- /dev/null +++ b/scheduler/tests/client/client_test.go @@ -0,0 +1,180 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package client_test + +import ( + "context" + "path/filepath" + "sort" + "strconv" + "sync" + "testing" + "time" + + pd "github.com/pingcap-incubator/tinykv/scheduler/client" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/testutil" + "github.com/pingcap-incubator/tinykv/scheduler/server" + "github.com/pingcap-incubator/tinykv/scheduler/tests" + . "github.com/pingcap/check" + "go.etcd.io/etcd/clientv3" +) + +func Test(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&serverTestSuite{}) + +type serverTestSuite struct{} + +func (s *serverTestSuite) SetUpSuite(c *C) { + server.EnableZap = true +} + +type client interface { + GetLeaderAddr() string + ScheduleCheckLeader() + GetURLs() []string +} + +func (s *serverTestSuite) TestClientLeaderChange(c *C) { + cluster, err := tests.NewTestCluster(3) + c.Assert(err, IsNil) + defer cluster.Destroy() + + err = cluster.RunInitialServers() + c.Assert(err, IsNil) + cluster.WaitLeader() + + var endpoints []string + for _, s := range cluster.GetServers() { + endpoints = append(endpoints, s.GetConfig().AdvertiseClientUrls) + } + cli, err := pd.NewClient(endpoints, pd.SecurityOption{}) + c.Assert(err, IsNil) + + var p1, l1 int64 + testutil.WaitUntil(c, func(c *C) bool { + p1, l1, err = cli.GetTS(context.TODO()) + if err == nil { + return true + } + c.Log(err) + return false + }) + + leader := cluster.GetLeader() + s.waitLeader(c, cli.(client), cluster.GetServer(leader).GetConfig().ClientUrls) + + err = cluster.GetServer(leader).Stop() + c.Assert(err, IsNil) + leader = cluster.WaitLeader() + c.Assert(leader, Not(Equals), "") + s.waitLeader(c, cli.(client), cluster.GetServer(leader).GetConfig().ClientUrls) + + // Check TS won't fall back after leader changed. + testutil.WaitUntil(c, func(c *C) bool { + p2, l2, err := cli.GetTS(context.TODO()) + if err != nil { + c.Log(err) + return false + } + c.Assert(p1<<18+l1, Less, p2<<18+l2) + return true + }) + + // Check URL list. + cli.Close() + urls := cli.(client).GetURLs() + sort.Strings(urls) + sort.Strings(endpoints) + c.Assert(urls, DeepEquals, endpoints) +} + +func (s *serverTestSuite) TestLeaderTransfer(c *C) { + cluster, err := tests.NewTestCluster(2) + c.Assert(err, IsNil) + defer cluster.Destroy() + + err = cluster.RunInitialServers() + c.Assert(err, IsNil) + cluster.WaitLeader() + + var endpoints []string + for _, s := range cluster.GetServers() { + endpoints = append(endpoints, s.GetConfig().AdvertiseClientUrls) + } + cli, err := pd.NewClient(endpoints, pd.SecurityOption{}) + c.Assert(err, IsNil) + + var physical, logical int64 + testutil.WaitUntil(c, func(c *C) bool { + physical, logical, err = cli.GetTS(context.TODO()) + if err == nil { + return true + } + c.Log(err) + return false + }) + lastTS := s.makeTS(physical, logical) + // Start a goroutine the make sure TS won't fall back. + quit := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-quit: + return + default: + } + + physical, logical, err1 := cli.GetTS(context.TODO()) + if err1 == nil { + ts := s.makeTS(physical, logical) + c.Assert(lastTS, Less, ts) + lastTS = ts + } + time.Sleep(time.Millisecond) + } + }() + // Transfer leader. + etcdCli, err := clientv3.New(clientv3.Config{ + Endpoints: endpoints, + DialTimeout: time.Second, + }) + c.Assert(err, IsNil) + leaderPath := filepath.Join("/pd", strconv.FormatUint(cli.GetClusterID(context.Background()), 10), "leader") + for i := 0; i < 10; i++ { + cluster.WaitLeader() + _, err = etcdCli.Delete(context.TODO(), leaderPath) + c.Assert(err, IsNil) + // Sleep to make sure all servers are notified and starts campaign. + time.Sleep(time.Second) + } + close(quit) + wg.Wait() +} + +func (s *serverTestSuite) waitLeader(c *C, cli client, leader string) { + testutil.WaitUntil(c, func(c *C) bool { + cli.ScheduleCheckLeader() + return cli.GetLeaderAddr() == leader + }) +} + +func (s *serverTestSuite) makeTS(physical, logical int64) uint64 { + return uint64(physical<<18 + logical) +} diff --git a/scheduler/tests/cluster.go b/scheduler/tests/cluster.go new file mode 100644 index 00000000..341580c6 --- /dev/null +++ b/scheduler/tests/cluster.go @@ -0,0 +1,459 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tests + +import ( + "context" + "os" + "sync" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/metapb" + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/server" + "github.com/pingcap-incubator/tinykv/scheduler/server/config" + "github.com/pingcap-incubator/tinykv/scheduler/server/core" + "github.com/pingcap-incubator/tinykv/scheduler/server/id" + "github.com/pingcap/log" + "github.com/pkg/errors" + "go.etcd.io/etcd/clientv3" + "go.uber.org/zap" +) + +// TestServer states. +const ( + Initial int32 = iota + Running + Stop + Destroy +) + +// TestServer is only for test. +type TestServer struct { + sync.RWMutex + server *server.Server + state int32 +} + +var initHTTPClientOnce sync.Once + +var zapLogOnce sync.Once + +// NewTestServer creates a new TestServer. +func NewTestServer(cfg *config.Config) (*TestServer, error) { + err := cfg.SetupLogger() + if err != nil { + return nil, err + } + zapLogOnce.Do(func() { + log.ReplaceGlobals(cfg.GetZapLogger(), cfg.GetZapLogProperties()) + }) + svr, err := server.CreateServer(cfg) + if err != nil { + return nil, err + } + initHTTPClientOnce.Do(func() { + err = server.InitHTTPClient(svr) + }) + if err != nil { + return nil, err + } + return &TestServer{ + server: svr, + state: Initial, + }, nil +} + +// Run starts to run a TestServer. +func (s *TestServer) Run(ctx context.Context) error { + s.Lock() + defer s.Unlock() + if s.state != Initial && s.state != Stop { + return errors.Errorf("server(state%d) cannot run", s.state) + } + if err := s.server.Run(ctx); err != nil { + return err + } + s.state = Running + return nil +} + +// Stop is used to stop a TestServer. +func (s *TestServer) Stop() error { + s.Lock() + defer s.Unlock() + if s.state != Running { + return errors.Errorf("server(state%d) cannot stop", s.state) + } + s.server.Close() + s.state = Stop + return nil +} + +// Destroy is used to destroy a TestServer. +func (s *TestServer) Destroy() error { + s.Lock() + defer s.Unlock() + dir := s.server.GetConfig().DataDir + if s.state == Running { + s.server.Close() + } + if err := os.RemoveAll(dir); err != nil { + return err + } + s.state = Destroy + return nil +} + +// ResignLeader resigns the leader of the server. +func (s *TestServer) ResignLeader() error { + s.Lock() + defer s.Unlock() + return s.server.GetMember().ResignLeader(s.server.Context(), s.server.Name(), "") +} + +// State returns the current TestServer's state. +func (s *TestServer) State() int32 { + s.RLock() + defer s.RUnlock() + return s.state +} + +// GetConfig returns the current TestServer's configuration. +func (s *TestServer) GetConfig() *config.Config { + s.RLock() + defer s.RUnlock() + return s.server.GetConfig() +} + +// GetAllocator returns the current TestServer's ID allocator. +func (s *TestServer) GetAllocator() *id.AllocatorImpl { + s.RLock() + defer s.RUnlock() + return s.server.GetAllocator() +} + +// GetAddr returns the address of TestCluster. +func (s *TestServer) GetAddr() string { + s.RLock() + defer s.RUnlock() + return s.server.GetAddr() +} + +// GetServer returns the real server of TestServer. +func (s *TestServer) GetServer() *server.Server { + s.RLock() + defer s.RUnlock() + return s.server +} + +// GetClusterID returns the cluster ID. +func (s *TestServer) GetClusterID() uint64 { + s.RLock() + defer s.RUnlock() + return s.server.ClusterID() +} + +// GetLeader returns current leader of PD cluster. +func (s *TestServer) GetLeader() *schedulerpb.Member { + s.RLock() + defer s.RUnlock() + return s.server.GetLeader() +} + +// GetCluster returns PD cluster. +func (s *TestServer) GetCluster() *metapb.Cluster { + s.RLock() + defer s.RUnlock() + return s.server.GetCluster() +} + +// GetServerID returns the unique etcd ID for this server in etcd cluster. +func (s *TestServer) GetServerID() uint64 { + s.RLock() + defer s.RUnlock() + return s.server.GetMember().ID() +} + +// IsLeader returns whether the server is leader or not. +func (s *TestServer) IsLeader() bool { + s.RLock() + defer s.RUnlock() + return !s.server.IsClosed() && s.server.GetMember().IsLeader() +} + +// GetEtcdLeader returns the builtin etcd leader. +func (s *TestServer) GetEtcdLeader() (string, error) { + s.RLock() + defer s.RUnlock() + req := &schedulerpb.GetMembersRequest{Header: &schedulerpb.RequestHeader{ClusterId: s.server.ClusterID()}} + members, err := s.server.GetMembers(context.TODO(), req) + if err != nil { + return "", errors.WithStack(err) + } + return members.GetEtcdLeader().GetName(), nil +} + +// GetEtcdLeaderID returns the builtin etcd leader ID. +func (s *TestServer) GetEtcdLeaderID() (uint64, error) { + s.RLock() + defer s.RUnlock() + req := &schedulerpb.GetMembersRequest{Header: &schedulerpb.RequestHeader{ClusterId: s.server.ClusterID()}} + members, err := s.server.GetMembers(context.TODO(), req) + if err != nil { + return 0, errors.WithStack(err) + } + return members.GetEtcdLeader().GetMemberId(), nil +} + +// MoveEtcdLeader moves etcd leader from old to new. +func (s *TestServer) MoveEtcdLeader(old, new uint64) error { + s.RLock() + defer s.RUnlock() + return s.server.GetMember().MoveEtcdLeader(context.Background(), old, new) +} + +// GetEtcdClient returns the builtin etcd client. +func (s *TestServer) GetEtcdClient() *clientv3.Client { + s.RLock() + defer s.RUnlock() + return s.server.GetClient() +} + +// GetStores returns the stores of the cluster. +func (s *TestServer) GetStores() []*metapb.Store { + s.RLock() + defer s.RUnlock() + return s.server.GetRaftCluster().GetMetaStores() +} + +// GetStore returns the store with a given store ID. +func (s *TestServer) GetStore(storeID uint64) *core.StoreInfo { + s.RLock() + defer s.RUnlock() + return s.server.GetRaftCluster().GetStore(storeID) +} + +// GetRaftCluster returns Raft cluster. +// If cluster has not been bootstrapped, return nil. +func (s *TestServer) GetRaftCluster() *server.RaftCluster { + s.RLock() + defer s.RUnlock() + return s.server.GetRaftCluster() +} + +// GetRegions returns all regions' information in detail. +func (s *TestServer) GetRegions() []*core.RegionInfo { + s.RLock() + defer s.RUnlock() + return s.server.GetRaftCluster().GetRegions() +} + +// GetRegionInfoByID returns regionInfo by regionID from cluster. +func (s *TestServer) GetRegionInfoByID(regionID uint64) *core.RegionInfo { + s.RLock() + defer s.RUnlock() + return s.server.GetRaftCluster().GetRegion(regionID) +} + +// GetStoreRegions returns all regions' information with a given storeID. +func (s *TestServer) GetStoreRegions(storeID uint64) []*core.RegionInfo { + s.RLock() + defer s.RUnlock() + return s.server.GetRaftCluster().GetStoreRegions(storeID) +} + +// CheckHealth checks if members are healthy. +func (s *TestServer) CheckHealth(members []*schedulerpb.Member) map[uint64]*schedulerpb.Member { + s.RLock() + defer s.RUnlock() + return s.server.CheckHealth(members) +} + +// BootstrapCluster is used to bootstrap the cluster. +func (s *TestServer) BootstrapCluster() error { + bootstrapReq := &schedulerpb.BootstrapRequest{ + Header: &schedulerpb.RequestHeader{ClusterId: s.GetClusterID()}, + Store: &metapb.Store{Id: 1, Address: "mock://1"}, + } + _, err := s.server.Bootstrap(context.Background(), bootstrapReq) + if err != nil { + return err + } + return nil +} + +// TestCluster is only for test. +type TestCluster struct { + config *clusterConfig + servers map[string]*TestServer +} + +// ConfigOption is used to define customize settings in test. +type ConfigOption func(conf *config.Config) + +// NewTestCluster creates a new TestCluster. +func NewTestCluster(initialServerCount int, opts ...ConfigOption) (*TestCluster, error) { + config := newClusterConfig(initialServerCount) + servers := make(map[string]*TestServer) + for _, conf := range config.InitialServers { + serverConf, err := conf.Generate(opts...) + if err != nil { + return nil, err + } + s, err := NewTestServer(serverConf) + if err != nil { + return nil, err + } + servers[conf.Name] = s + } + return &TestCluster{ + config: config, + servers: servers, + }, nil +} + +// RunServer starts to run TestServer. +func (c *TestCluster) RunServer(ctx context.Context, server *TestServer) <-chan error { + resC := make(chan error) + go func() { resC <- server.Run(ctx) }() + return resC +} + +// RunServers starts to run multiple TestServer. +func (c *TestCluster) RunServers(ctx context.Context, servers []*TestServer) error { + res := make([]<-chan error, len(servers)) + for i, s := range servers { + res[i] = c.RunServer(ctx, s) + } + for _, c := range res { + if err := <-c; err != nil { + return errors.WithStack(err) + } + } + return nil +} + +// RunInitialServers starts to run servers in InitialServers. +func (c *TestCluster) RunInitialServers() error { + var servers []*TestServer + for _, conf := range c.config.InitialServers { + servers = append(servers, c.GetServer(conf.Name)) + } + return c.RunServers(context.Background(), servers) +} + +// StopAll is used to stop all servers. +func (c *TestCluster) StopAll() error { + for _, s := range c.servers { + if err := s.Stop(); err != nil { + return err + } + } + return nil +} + +// GetServer returns a server with a given name. +func (c *TestCluster) GetServer(name string) *TestServer { + return c.servers[name] +} + +// GetServers returns all servers. +func (c *TestCluster) GetServers() map[string]*TestServer { + return c.servers +} + +// GetLeader returns the leader of all servers +func (c *TestCluster) GetLeader() string { + for name, s := range c.servers { + if s.IsLeader() { + return name + } + } + return "" +} + +// WaitLeader is used to get leader. +// If it exceeds the maximum number of loops, it will return an empty string. +func (c *TestCluster) WaitLeader() string { + for i := 0; i < 100; i++ { + counter := make(map[string]int) + running := 0 + for _, s := range c.servers { + if s.state == Running { + running++ + } + n := s.GetLeader().GetName() + if n != "" { + counter[n]++ + } + } + for name, num := range counter { + if num == running && c.GetServer(name).IsLeader() { + return name + } + } + time.Sleep(500 * time.Millisecond) + } + return "" +} + +// ResignLeader resigns the leader of the cluster. +func (c *TestCluster) ResignLeader() error { + leader := c.GetLeader() + if len(leader) != 0 { + return c.servers[leader].ResignLeader() + } + return errors.New("no leader") +} + +// GetCluster returns PD cluster. +func (c *TestCluster) GetCluster() *metapb.Cluster { + leader := c.GetLeader() + return c.servers[leader].GetCluster() +} + +// GetEtcdClient returns the builtin etcd client. +func (c *TestCluster) GetEtcdClient() *clientv3.Client { + leader := c.GetLeader() + return c.servers[leader].GetEtcdClient() +} + +// GetConfig returns the current TestCluster's configuration. +func (c *TestCluster) GetConfig() *clusterConfig { + return c.config +} + +// CheckHealth checks if members are healthy. +func (c *TestCluster) CheckHealth(members []*schedulerpb.Member) map[uint64]*schedulerpb.Member { + leader := c.GetLeader() + return c.servers[leader].CheckHealth(members) +} + +// HandleRegionHeartbeat processes RegionInfo reports from the client. +func (c *TestCluster) HandleRegionHeartbeat(region *core.RegionInfo) error { + leader := c.GetLeader() + cluster := c.servers[leader].GetRaftCluster() + return cluster.HandleRegionHeartbeat(region) +} + +// Destroy is used to destroy a TestCluster. +func (c *TestCluster) Destroy() { + for _, s := range c.servers { + err := s.Destroy() + if err != nil { + log.Error("failed to destroy the cluster:", zap.Error(err)) + } + } +} diff --git a/scheduler/tests/config.go b/scheduler/tests/config.go new file mode 100644 index 00000000..25eb3e15 --- /dev/null +++ b/scheduler/tests/config.go @@ -0,0 +1,96 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tests + +import ( + "fmt" + "io/ioutil" + "strings" + + "github.com/pingcap-incubator/tinykv/scheduler/pkg/tempurl" + "github.com/pingcap-incubator/tinykv/scheduler/server/config" +) + +type serverConfig struct { + Name string + DataDir string + ClientURLs string + AdvertiseClientURLs string + PeerURLs string + AdvertisePeerURLs string + ClusterConfig *clusterConfig +} + +func newServerConfig(name string, cc *clusterConfig) *serverConfig { + tempDir, _ := ioutil.TempDir("/tmp", "pd-tests") + return &serverConfig{ + Name: name, + DataDir: tempDir, + ClientURLs: tempurl.Alloc(), + PeerURLs: tempurl.Alloc(), + ClusterConfig: cc, + } +} + +func (c *serverConfig) Generate(opts ...ConfigOption) (*config.Config, error) { + arguments := []string{ + "--name=" + c.Name, + "--data-dir=" + c.DataDir, + "--client-urls=" + c.ClientURLs, + "--advertise-client-urls=" + c.AdvertiseClientURLs, + "--peer-urls=" + c.PeerURLs, + "--advertise-peer-urls=" + c.AdvertisePeerURLs, + } + + arguments = append(arguments, "--initial-cluster="+c.ClusterConfig.GetServerAddrs()) + + cfg := config.NewConfig() + err := cfg.Parse(arguments) + if err != nil { + return nil, err + } + for _, opt := range opts { + opt(cfg) + } + return cfg, nil +} + +type clusterConfig struct { + InitialServers []*serverConfig +} + +func newClusterConfig(n int) *clusterConfig { + var cc clusterConfig + for i := 0; i < n; i++ { + c := newServerConfig(cc.nextServerName(), &cc) + cc.InitialServers = append(cc.InitialServers, c) + } + return &cc +} + +func (c *clusterConfig) nextServerName() string { + return fmt.Sprintf("pd%d", len(c.InitialServers)+1) +} + +func (c *clusterConfig) GetServerAddrs() string { + var addrs []string + for _, s := range c.InitialServers { + addrs = append(addrs, fmt.Sprintf("%s=%s", s.Name, s.PeerURLs)) + } + return strings.Join(addrs, ",") +} + +func (c *clusterConfig) GetClientURLs() string { + return c.InitialServers[0].ClientURLs +} diff --git a/scheduler/tests/server/id/id_test.go b/scheduler/tests/server/id/id_test.go new file mode 100644 index 00000000..f2540ae7 --- /dev/null +++ b/scheduler/tests/server/id/id_test.go @@ -0,0 +1,109 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package id_test + +import ( + "context" + "sync" + "testing" + + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/testutil" + "github.com/pingcap-incubator/tinykv/scheduler/server" + "github.com/pingcap-incubator/tinykv/scheduler/tests" + . "github.com/pingcap/check" +) + +func Test(t *testing.T) { + TestingT(t) +} + +const allocStep = uint64(1000) + +var _ = Suite(&testAllocIDSuite{}) + +type testAllocIDSuite struct{} + +func (s *testAllocIDSuite) SetUpSuite(c *C) { + server.EnableZap = true +} + +func (s *testAllocIDSuite) TestID(c *C) { + var err error + cluster, err := tests.NewTestCluster(1) + defer cluster.Destroy() + c.Assert(err, IsNil) + + err = cluster.RunInitialServers() + c.Assert(err, IsNil) + cluster.WaitLeader() + + leaderServer := cluster.GetServer(cluster.GetLeader()) + var last uint64 + for i := uint64(0); i < allocStep; i++ { + id, err := leaderServer.GetAllocator().Alloc() + c.Assert(err, IsNil) + c.Assert(id, Greater, last) + last = id + } + + var wg sync.WaitGroup + + var m sync.Mutex + ids := make(map[uint64]struct{}) + + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + for i := 0; i < 200; i++ { + id, err := leaderServer.GetAllocator().Alloc() + c.Assert(err, IsNil) + m.Lock() + _, ok := ids[id] + ids[id] = struct{}{} + m.Unlock() + c.Assert(ok, IsFalse) + } + }() + } + + wg.Wait() +} + +func (s *testAllocIDSuite) TestCommand(c *C) { + var err error + cluster, err := tests.NewTestCluster(1) + defer cluster.Destroy() + c.Assert(err, IsNil) + + err = cluster.RunInitialServers() + c.Assert(err, IsNil) + cluster.WaitLeader() + + leaderServer := cluster.GetServer(cluster.GetLeader()) + req := &schedulerpb.AllocIDRequest{ + Header: testutil.NewRequestHeader(leaderServer.GetClusterID()), + } + + grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + var last uint64 + for i := uint64(0); i < 2*allocStep; i++ { + resp, err := grpcPDClient.AllocID(context.Background(), req) + c.Assert(err, IsNil) + c.Assert(resp.GetId(), Greater, last) + last = resp.GetId() + } +} diff --git a/scheduler/tests/server/server_test.go b/scheduler/tests/server/server_test.go new file mode 100644 index 00000000..64835535 --- /dev/null +++ b/scheduler/tests/server/server_test.go @@ -0,0 +1,121 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package server_test + +import ( + "testing" + + "github.com/pingcap-incubator/tinykv/scheduler/pkg/tempurl" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/testutil" + "github.com/pingcap-incubator/tinykv/scheduler/server" + "github.com/pingcap-incubator/tinykv/scheduler/tests" + . "github.com/pingcap/check" + + // Register schedulers. + _ "github.com/pingcap-incubator/tinykv/scheduler/server/schedulers" +) + +func Test(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&serverTestSuite{}) + +type serverTestSuite struct{} + +func (s *serverTestSuite) SetUpSuite(c *C) { + server.EnableZap = true +} + +func (s *serverTestSuite) TestUpdateAdvertiseUrls(c *C) { + cluster, err := tests.NewTestCluster(2) + defer cluster.Destroy() + c.Assert(err, IsNil) + + err = cluster.RunInitialServers() + c.Assert(err, IsNil) + + // AdvertisePeerUrls should equals to PeerUrls. + for _, conf := range cluster.GetConfig().InitialServers { + serverConf := cluster.GetServer(conf.Name).GetConfig() + c.Assert(serverConf.AdvertisePeerUrls, Equals, conf.PeerURLs) + c.Assert(serverConf.AdvertiseClientUrls, Equals, conf.ClientURLs) + } + + err = cluster.StopAll() + c.Assert(err, IsNil) + + // Change config will not affect peer urls. + // Recreate servers with new peer URLs. + for _, conf := range cluster.GetConfig().InitialServers { + conf.AdvertisePeerURLs = conf.PeerURLs + "," + tempurl.Alloc() + } + for _, conf := range cluster.GetConfig().InitialServers { + serverConf, e := conf.Generate() + c.Assert(e, IsNil) + s, e := tests.NewTestServer(serverConf) + c.Assert(e, IsNil) + cluster.GetServers()[conf.Name] = s + } + err = cluster.RunInitialServers() + c.Assert(err, IsNil) + for _, conf := range cluster.GetConfig().InitialServers { + serverConf := cluster.GetServer(conf.Name).GetConfig() + c.Assert(serverConf.AdvertisePeerUrls, Equals, conf.PeerURLs) + } +} + +func (s *serverTestSuite) TestClusterID(c *C) { + cluster, err := tests.NewTestCluster(3) + defer cluster.Destroy() + c.Assert(err, IsNil) + + err = cluster.RunInitialServers() + c.Assert(err, IsNil) + + clusterID := cluster.GetServer("pd1").GetClusterID() + for _, s := range cluster.GetServers() { + c.Assert(s.GetClusterID(), Equals, clusterID) + } + + // Restart all PDs. + err = cluster.StopAll() + c.Assert(err, IsNil) + err = cluster.RunInitialServers() + c.Assert(err, IsNil) + + // All PDs should have the same cluster ID as before. + for _, s := range cluster.GetServers() { + c.Assert(s.GetClusterID(), Equals, clusterID) + } +} + +func (s *serverTestSuite) TestLeader(c *C) { + cluster, err := tests.NewTestCluster(3) + defer cluster.Destroy() + c.Assert(err, IsNil) + + err = cluster.RunInitialServers() + c.Assert(err, IsNil) + + leader1 := cluster.WaitLeader() + c.Assert(leader1, Not(Equals), "") + + err = cluster.GetServer(leader1).Stop() + c.Assert(err, IsNil) + testutil.WaitUntil(c, func(c *C) bool { + leader := cluster.GetLeader() + return leader != leader1 + }) +} diff --git a/scheduler/tests/server/tso/tso_test.go b/scheduler/tests/server/tso/tso_test.go new file mode 100644 index 00000000..7c187483 --- /dev/null +++ b/scheduler/tests/server/tso/tso_test.go @@ -0,0 +1,123 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tso_test + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/pingcap-incubator/tinykv/proto/pkg/schedulerpb" + "github.com/pingcap-incubator/tinykv/scheduler/pkg/testutil" + "github.com/pingcap-incubator/tinykv/scheduler/tests" + . "github.com/pingcap/check" +) + +func Test(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testTsoSuite{}) + +type testTsoSuite struct { +} + +func (s *testTsoSuite) SetUpSuite(c *C) { +} + +func (s *testTsoSuite) testGetTimestamp(c *C, n int) *schedulerpb.Timestamp { + var err error + cluster, err := tests.NewTestCluster(1) + defer cluster.Destroy() + c.Assert(err, IsNil) + + err = cluster.RunInitialServers() + c.Assert(err, IsNil) + cluster.WaitLeader() + + leaderServer := cluster.GetServer(cluster.GetLeader()) + grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + + clusterID := leaderServer.GetClusterID() + req := &schedulerpb.TsoRequest{ + Header: testutil.NewRequestHeader(clusterID), + Count: uint32(n), + } + + tsoClient, err := grpcPDClient.Tso(context.Background()) + c.Assert(err, IsNil) + defer tsoClient.CloseSend() + err = tsoClient.Send(req) + c.Assert(err, IsNil) + resp, err := tsoClient.Recv() + c.Assert(err, IsNil) + c.Assert(resp.GetCount(), Equals, uint32(n)) + + res := resp.GetTimestamp() + c.Assert(res.GetLogical(), Greater, int64(0)) + + return res +} + +func (s *testTsoSuite) TestTso3C(c *C) { + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + last := &schedulerpb.Timestamp{ + Physical: 0, + Logical: 0, + } + + for j := 0; j < 30; j++ { + ts := s.testGetTimestamp(c, 10) + c.Assert(ts.GetPhysical(), Not(Less), last.GetPhysical()) + if ts.GetPhysical() == last.GetPhysical() { + c.Assert(ts.GetLogical(), Greater, last.GetLogical()) + } + last = ts + time.Sleep(10 * time.Millisecond) + } + }() + } + + wg.Wait() +} + +func (s *testTsoSuite) TestTsoWithoutCount3C(c *C) { + var err error + cluster, err := tests.NewTestCluster(1) + defer cluster.Destroy() + c.Assert(err, IsNil) + + err = cluster.RunInitialServers() + c.Assert(err, IsNil) + cluster.WaitLeader() + + leaderServer := cluster.GetServer(cluster.GetLeader()) + grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + clusterID := leaderServer.GetClusterID() + + req := &schedulerpb.TsoRequest{Header: testutil.NewRequestHeader(clusterID)} + tsoClient, err := grpcPDClient.Tso(context.Background()) + c.Assert(err, IsNil) + defer tsoClient.CloseSend() + err = tsoClient.Send(req) + c.Assert(err, IsNil) + _, err = tsoClient.Recv() + c.Assert(err, NotNil) +} diff --git a/scheduler/tools.json b/scheduler/tools.json new file mode 100644 index 00000000..25cc8d48 --- /dev/null +++ b/scheduler/tools.json @@ -0,0 +1,29 @@ +{ + "Tools": [ + { + "Repository": "github.com/mgechev/revive", + "Commit": "7773f47324c2bf1c8f7a5500aff2b6c01d3ed73b" + }, + { + "Repository": "github.com/dnephin/govet", + "Commit": "4a96d43e39d340b63daa8bc5576985aa599885f6" + }, + { + "Repository": "github.com/pingcap/failpoint/failpoint-ctl", + "Commit": "30cc7431d99c6a7f2836387d4bb255a3bd6a5e0a" + }, + { + "Repository": "github.com/go-playground/overalls", + "Commit": "22ec1a223b7c9a2e56355bd500b539cba3784238" + }, + { + "Repository": "github.com/golangci/golangci-lint/cmd/golangci-lint", + "Commit": "4ba2155996359eabd8800d1fbf3e3a9777c80490" + }, + { + "Repository": "golang.org/x/tools/cmd/goimports", + "Commit": "04b5d21e00f1f47bd824a6ade581e7189bacde87" + } + ], + "RetoolVersion": "1.3.7" +}