Auto temp file delete (#340)

* auto delete tmp file
This commit is contained in:
charstal 2021-12-22 17:32:57 +08:00 committed by GitHub
parent db489b317f
commit 7aa7a2099c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 23 additions and 13 deletions

View File

@ -10,6 +10,7 @@ endif
GO := GO111MODULE=on go
GOBUILD := $(GO) build $(BUILD_FLAG) -tags codes
GOTEST := $(GO) test -v --count=1 --parallel=1 -p=1
TEST_CLEAN := rm -rf /tmp/*test-raftstore*
TEST_LDFLAGS := ""
@ -68,6 +69,7 @@ project2ac:
$(GOTEST) ./raft -run 2AC
project2b:
$(TEST_CLEAN)
$(GOTEST) ./kv/test_raftstore -run ^TestBasic2B$ || true
$(GOTEST) ./kv/test_raftstore -run ^TestConcurrent2B$ || true
$(GOTEST) ./kv/test_raftstore -run ^TestUnreliable2B$ || true
@ -79,9 +81,12 @@ project2b:
$(GOTEST) ./kv/test_raftstore -run ^TestPersistConcurrentUnreliable2B$ || true
$(GOTEST) ./kv/test_raftstore -run ^TestPersistPartition2B$ || true
$(GOTEST) ./kv/test_raftstore -run ^TestPersistPartitionUnreliable2B$ || true
$(TEST_CLEAN)
project2c:
$(GOTEST) ./raft ./kv/test_raftstore -run 2C
$(TEST_CLEAN)
$(GOTEST) ./raft ./kv/test_raftstore -run 2C || true
$(TEST_CLEAN)
project3: project3a project3b project3c
@ -89,6 +94,7 @@ project3a:
$(GOTEST) ./raft -run 3A
project3b:
$(TEST_CLEAN)
$(GOTEST) ./kv/test_raftstore -run ^TestTransferLeader3B$ || true
$(GOTEST) ./kv/test_raftstore -run ^TestBasicConfChange3B$ || true
$(GOTEST) ./kv/test_raftstore -run ^TestConfChangeRecover3B$ || true
@ -104,6 +110,7 @@ project3b:
$(GOTEST) ./kv/test_raftstore -run ^TestSplitUnreliableRecover3B$ || true
$(GOTEST) ./kv/test_raftstore -run ^TestSplitConfChangeSnapshotUnreliableRecover3B$ || true
$(GOTEST) ./kv/test_raftstore -run ^TestSplitConfChangeSnapshotUnreliableRecoverConcurrentPartition3B$ || true
$(TEST_CLEAN)
project3c:
$(GOTEST) ./scheduler/server ./scheduler/server/schedulers -check.f="3C"
@ -117,4 +124,4 @@ project4b:
$(GOTEST) ./kv/transaction/... -run 4B
project4c:
$(GOTEST) ./kv/transaction/... -run 4C
$(GOTEST) ./kv/transaction/... -run 4C

View File

@ -114,12 +114,12 @@ These states are stored in two badger instances: raftdb and kvdb:
The format is as below and some helper functions are provided in `kv/raftstore/meta`, and set them to badger with `writebatch.SetMeta()`.
| Key | KeyFormat | Value | DB |
|:---- |:---- |:---- |:---|
|raft_log_key |0x01 0x02 region_id 0x01 log_idx|Entry |raft|
|raft_state_key |0x01 0x02 region_id 0x02 |RaftLocalState |raft|
|apply_state_key |0x01 0x02 region_id 0x03 |RaftApplyState |kv |
|region_state_key|0x01 0x03 region_id 0x01 |RegionLocalState|kv |
| Key | KeyFormat | Value | DB |
| :--------------- | :------------------------------- | :--------------- | :--- |
| raft_log_key | 0x01 0x02 region_id 0x01 log_idx | Entry | raft |
| raft_state_key | 0x01 0x02 region_id 0x02 | RaftLocalState | raft |
| apply_state_key | 0x01 0x02 region_id 0x03 | RaftApplyState | kv |
| region_state_key | 0x01 0x03 region_id 0x01 | RegionLocalState | kv |
> You may wonder why TinyKV needs two badger instances. Actually, it can use only one badger to store both raft log and state machine data. Separating into two instances is just to be consistent with TiKV design.
@ -201,6 +201,7 @@ In this stage, you may consider these errors, and others will be processed in pr
> - You can apply the committed Raft log entries in an asynchronous way just like TiKV does. Its not necessary, though a big challenge to improve performance.
> - Record the callback of the command when proposing, and return the callback after applying.
> - For the snap command response, should set badger Txn to callback explicitly.
> - After 2A, some tests you may need to run them multiple times to find bugs
## Part C

View File

@ -38,6 +38,7 @@ type Cluster struct {
dirs []string
simulator Simulator
cfg *config.Config
baseDir string
}
func NewCluster(count int, schedulerClient *MockSchedulerClient, simulator Simulator, cfg *config.Config) *Cluster {
@ -48,6 +49,7 @@ func NewCluster(count int, schedulerClient *MockSchedulerClient, simulator Simul
snapPaths: make(map[uint64]string),
simulator: simulator,
cfg: cfg,
baseDir: "test-raftstore",
}
}
@ -56,7 +58,7 @@ func (c *Cluster) Start() {
clusterID := c.schedulerClient.GetClusterID(ctx)
for storeID := uint64(1); storeID <= uint64(c.count); storeID++ {
dbPath, err := ioutil.TempDir("", "test-raftstore")
dbPath, err := ioutil.TempDir("", c.baseDir)
if err != nil {
panic(err)
}
@ -65,7 +67,7 @@ func (c *Cluster) Start() {
raftPath := filepath.Join(dbPath, "raft")
snapPath := filepath.Join(dbPath, "snap")
c.snapPaths[storeID] = snapPath
c.dirs = append(c.dirs, []string{kvPath, raftPath, snapPath}...)
c.dirs = append(c.dirs, dbPath)
err = os.MkdirAll(kvPath, os.ModePerm)
if err != nil {
@ -181,7 +183,7 @@ func (c *Cluster) AllocPeer(storeID uint64) *metapb.Peer {
func (c *Cluster) Request(key []byte, reqs []*raft_cmdpb.Request, timeout time.Duration) (*raft_cmdpb.RaftCmdResponse, *badger.Txn) {
startTime := time.Now()
for i := 0; i < 10 || time.Now().Sub(startTime) < timeout; i++ {
for i := 0; i < 10 || time.Since(startTime) < timeout; i++ {
region := c.GetRegion(key)
regionID := region.GetId()
req := NewRequest(regionID, region.RegionEpoch, reqs)
@ -210,7 +212,7 @@ func (c *Cluster) CallCommandOnLeader(request *raft_cmdpb.RaftCmdRequest, timeou
regionID := request.Header.RegionId
leader := c.LeaderOfRegion(regionID)
for {
if time.Now().Sub(startTime) > timeout {
if time.Since(startTime) > timeout {
return nil, nil
}
if leader == nil {
@ -413,7 +415,7 @@ func (c *Cluster) MustTransferLeader(regionID uint64, leader *metapb.Peer) {
currentLeader.StoreId == leader.StoreId {
return
}
if time.Now().Sub(timer) > 5*time.Second {
if time.Since(timer) > 5*time.Second {
panic(fmt.Sprintf("failed to transfer leader to [%d] %s", regionID, leader.String()))
}
c.TransferLeader(regionID, leader)