forked from yihuang/grocksdb
-
Notifications
You must be signed in to change notification settings - Fork 1
/
write_batch.go
362 lines (321 loc) · 11.5 KB
/
write_batch.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
package grocksdb
// #include "rocksdb/c.h"
import "C"
import (
"errors"
"io"
)
// WriteBatch is a batching of Puts, Merges and Deletes.
type WriteBatch struct {
c *C.rocksdb_writebatch_t
}
// NewWriteBatch create a WriteBatch object.
func NewWriteBatch() *WriteBatch {
return newNativeWriteBatch(C.rocksdb_writebatch_create())
}
// NewNativeWriteBatch create a WriteBatch object.
func newNativeWriteBatch(c *C.rocksdb_writebatch_t) *WriteBatch {
return &WriteBatch{
c: c,
}
}
// WriteBatchFrom creates a write batch from a serialized WriteBatch.
func WriteBatchFrom(data []byte) *WriteBatch {
cWB := C.rocksdb_writebatch_create_from(byteToChar(data), C.size_t(len(data)))
return newNativeWriteBatch(cWB)
}
// Put queues a key-value pair.
func (wb *WriteBatch) Put(key, value []byte) {
cKey := byteToChar(key)
cValue := byteToChar(value)
C.rocksdb_writebatch_put(wb.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
}
// PutCF queues a key-value pair in a column family.
func (wb *WriteBatch) PutCF(cf *ColumnFamilyHandle, key, value []byte) {
cKey := byteToChar(key)
cValue := byteToChar(value)
C.rocksdb_writebatch_put_cf(wb.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
}
// PutCFWithTS queues a key-value pair with given timestamp in a column family.
func (wb *WriteBatch) PutCFWithTS(cf *ColumnFamilyHandle, key, ts, value []byte) {
cKey := byteToChar(key)
cValue := byteToChar(value)
cTs := byteToChar(ts)
C.rocksdb_writebatch_put_cf_with_ts(wb.c, cf.c, cKey, C.size_t(len(key)), cTs, C.size_t(len(ts)), cValue, C.size_t(len(value)))
}
// PutLogData appends a blob of arbitrary size to the records in this batch.
func (wb *WriteBatch) PutLogData(blob []byte) {
cBlob := byteToChar(blob)
C.rocksdb_writebatch_put_log_data(wb.c, cBlob, C.size_t(len(blob)))
}
// Merge queues a merge of "value" with the existing value of "key".
func (wb *WriteBatch) Merge(key, value []byte) {
cKey := byteToChar(key)
cValue := byteToChar(value)
C.rocksdb_writebatch_merge(wb.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
}
// MergeCF queues a merge of "value" with the existing value of "key" in a
// column family.
func (wb *WriteBatch) MergeCF(cf *ColumnFamilyHandle, key, value []byte) {
cKey := byteToChar(key)
cValue := byteToChar(value)
C.rocksdb_writebatch_merge_cf(wb.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)))
}
// Delete queues a deletion of the data at key.
func (wb *WriteBatch) Delete(key []byte) {
cKey := byteToChar(key)
C.rocksdb_writebatch_delete(wb.c, cKey, C.size_t(len(key)))
}
// SingleDelete removes the database entry for "key". Requires that the key exists
// and was not overwritten. Returns OK on success, and a non-OK status
// on error. It is not an error if "key" did not exist in the database.
//
// If a key is overwritten (by calling Put() multiple times), then the result
// of calling SingleDelete() on this key is undefined. SingleDelete() only
// behaves correctly if there has been only one Put() for this key since the
// previous call to SingleDelete() for this key.
//
// This feature is currently an experimental performance optimization
// for a very specific workload. It is up to the caller to ensure that
// SingleDelete is only used for a key that is not deleted using Delete() or
// written using Merge(). Mixing SingleDelete operations with Deletes and
// Merges can result in undefined behavior.
//
// Note: consider setting options.sync = true.
func (wb *WriteBatch) SingleDelete(key []byte) {
cKey := byteToChar(key)
C.rocksdb_writebatch_singledelete(wb.c, cKey, C.size_t(len(key)))
}
// DeleteCF queues a deletion of the data at key in a column family.
func (wb *WriteBatch) DeleteCF(cf *ColumnFamilyHandle, key []byte) {
cKey := byteToChar(key)
C.rocksdb_writebatch_delete_cf(wb.c, cf.c, cKey, C.size_t(len(key)))
}
// DeleteCF queues a deletion of the data at key with given timestamp in a column family.
func (wb *WriteBatch) DeleteCFWithTS(cf *ColumnFamilyHandle, key, ts []byte) {
cKey := byteToChar(key)
cTs := byteToChar(ts)
C.rocksdb_writebatch_delete_cf_with_ts(wb.c, cf.c, cKey, C.size_t(len(key)), cTs, C.size_t(len(ts)))
}
// SingleDeleteCF same as SingleDelete but specific column family
func (wb *WriteBatch) SingleDeleteCF(cf *ColumnFamilyHandle, key []byte) {
cKey := byteToChar(key)
C.rocksdb_writebatch_singledelete_cf(wb.c, cf.c, cKey, C.size_t(len(key)))
}
// SingleDeleteCFWithTS same as SingleDelete but with timestamp for specific column family
func (wb *WriteBatch) SingleDeleteCFWithTS(cf *ColumnFamilyHandle, key, ts []byte) {
cKey := byteToChar(key)
cTs := byteToChar(ts)
C.rocksdb_writebatch_singledelete_cf_with_ts(wb.c, cf.c, cKey, C.size_t(len(key)), cTs, C.size_t(len(ts)))
}
// DeleteRange deletes keys that are between [startKey, endKey)
func (wb *WriteBatch) DeleteRange(startKey []byte, endKey []byte) {
cStartKey := byteToChar(startKey)
cEndKey := byteToChar(endKey)
C.rocksdb_writebatch_delete_range(wb.c, cStartKey, C.size_t(len(startKey)), cEndKey, C.size_t(len(endKey)))
}
// DeleteRangeCF deletes keys that are between [startKey, endKey) and
// belong to a given column family
func (wb *WriteBatch) DeleteRangeCF(cf *ColumnFamilyHandle, startKey []byte, endKey []byte) {
cStartKey := byteToChar(startKey)
cEndKey := byteToChar(endKey)
C.rocksdb_writebatch_delete_range_cf(wb.c, cf.c, cStartKey, C.size_t(len(startKey)), cEndKey, C.size_t(len(endKey)))
}
// Data returns the serialized version of this batch.
func (wb *WriteBatch) Data() []byte {
var cSize C.size_t
cValue := C.rocksdb_writebatch_data(wb.c, &cSize)
return charToByte(cValue, cSize)
}
// Count returns the number of updates in the batch.
func (wb *WriteBatch) Count() int {
return int(C.rocksdb_writebatch_count(wb.c))
}
// NewIterator returns a iterator to iterate over the records in the batch.
func (wb *WriteBatch) NewIterator() *WriteBatchIterator {
data := wb.Data()
if len(data) < 8+4 {
return &WriteBatchIterator{}
}
return &WriteBatchIterator{data: data[12:]}
}
// SetSavePoint records the state of the batch for future calls to RollbackToSavePoint().
// May be called multiple times to set multiple save points.
func (wb *WriteBatch) SetSavePoint() {
C.rocksdb_writebatch_set_save_point(wb.c)
}
// RollbackToSavePoint removes all entries in this batch (Put, Merge, Delete, PutLogData) since the
// most recent call to SetSavePoint() and removes the most recent save point.
func (wb *WriteBatch) RollbackToSavePoint() (err error) {
var cErr *C.char
C.rocksdb_writebatch_rollback_to_save_point(wb.c, &cErr)
err = fromCError(cErr)
return
}
// PopSavePoint pops the most recent save point.
// If there is no previous call to SetSavePoint(), Status::NotFound()
// will be returned.
func (wb *WriteBatch) PopSavePoint() (err error) {
var cErr *C.char
C.rocksdb_writebatch_pop_save_point(wb.c, &cErr)
err = fromCError(cErr)
return
}
// Clear removes all the enqueued Put and Deletes.
func (wb *WriteBatch) Clear() {
C.rocksdb_writebatch_clear(wb.c)
}
// Destroy deallocates the WriteBatch object.
func (wb *WriteBatch) Destroy() {
if wb.c != nil {
C.rocksdb_writebatch_destroy(wb.c)
wb.c = nil
}
}
// WriteBatchRecordType describes the type of a batch record.
type WriteBatchRecordType byte
// Types of batch records.
const (
WriteBatchDeletionRecord WriteBatchRecordType = 0x0
WriteBatchValueRecord WriteBatchRecordType = 0x1
WriteBatchMergeRecord WriteBatchRecordType = 0x2
WriteBatchLogDataRecord WriteBatchRecordType = 0x3
WriteBatchCFDeletionRecord WriteBatchRecordType = 0x4
WriteBatchCFValueRecord WriteBatchRecordType = 0x5
WriteBatchCFMergeRecord WriteBatchRecordType = 0x6
WriteBatchSingleDeletionRecord WriteBatchRecordType = 0x7
WriteBatchCFSingleDeletionRecord WriteBatchRecordType = 0x8
WriteBatchBeginPrepareXIDRecord WriteBatchRecordType = 0x9
WriteBatchEndPrepareXIDRecord WriteBatchRecordType = 0xA
WriteBatchCommitXIDRecord WriteBatchRecordType = 0xB
WriteBatchRollbackXIDRecord WriteBatchRecordType = 0xC
WriteBatchNoopRecord WriteBatchRecordType = 0xD
WriteBatchRangeDeletion WriteBatchRecordType = 0xF
WriteBatchCFRangeDeletion WriteBatchRecordType = 0xE
WriteBatchCFBlobIndex WriteBatchRecordType = 0x10
WriteBatchBlobIndex WriteBatchRecordType = 0x11
WriteBatchBeginPersistedPrepareXIDRecord WriteBatchRecordType = 0x12
WriteBatchNotUsedRecord WriteBatchRecordType = 0x7F
)
// WriteBatchRecord represents a record inside a WriteBatch.
type WriteBatchRecord struct {
CF int
Key []byte
Value []byte
Type WriteBatchRecordType
}
// WriteBatchIterator represents a iterator to iterator over records.
type WriteBatchIterator struct {
data []byte
record WriteBatchRecord
err error
}
// Next returns the next record.
// Returns false if no further record exists.
func (iter *WriteBatchIterator) Next() bool {
if iter.err != nil || len(iter.data) == 0 {
return false
}
// reset the current record
iter.record.CF = 0
iter.record.Key = nil
iter.record.Value = nil
// parse the record type
iter.record.Type = iter.decodeRecType()
switch iter.record.Type {
case
WriteBatchDeletionRecord,
WriteBatchSingleDeletionRecord:
iter.record.Key = iter.decodeSlice()
case
WriteBatchCFDeletionRecord,
WriteBatchCFSingleDeletionRecord:
iter.record.CF = int(iter.decodeVarint())
if iter.err == nil {
iter.record.Key = iter.decodeSlice()
}
case
WriteBatchValueRecord,
WriteBatchMergeRecord,
WriteBatchRangeDeletion,
WriteBatchBlobIndex:
iter.record.Key = iter.decodeSlice()
if iter.err == nil {
iter.record.Value = iter.decodeSlice()
}
case
WriteBatchCFValueRecord,
WriteBatchCFRangeDeletion,
WriteBatchCFMergeRecord,
WriteBatchCFBlobIndex:
iter.record.CF = int(iter.decodeVarint())
if iter.err == nil {
iter.record.Key = iter.decodeSlice()
}
if iter.err == nil {
iter.record.Value = iter.decodeSlice()
}
case WriteBatchLogDataRecord:
iter.record.Value = iter.decodeSlice()
case
WriteBatchNoopRecord,
WriteBatchBeginPrepareXIDRecord,
WriteBatchBeginPersistedPrepareXIDRecord:
case
WriteBatchEndPrepareXIDRecord,
WriteBatchCommitXIDRecord,
WriteBatchRollbackXIDRecord:
iter.record.Value = iter.decodeSlice()
default:
iter.err = errors.New("unsupported wal record type")
}
return iter.err == nil
}
// Record returns the current record.
func (iter *WriteBatchIterator) Record() *WriteBatchRecord {
return &iter.record
}
// Error returns the error if the iteration is failed.
func (iter *WriteBatchIterator) Error() error {
return iter.err
}
func (iter *WriteBatchIterator) decodeSlice() []byte {
l := int(iter.decodeVarint())
if l > len(iter.data) {
iter.err = io.ErrShortBuffer
}
if iter.err != nil {
return []byte{}
}
ret := iter.data[:l]
iter.data = iter.data[l:]
return ret
}
func (iter *WriteBatchIterator) decodeRecType() WriteBatchRecordType {
if len(iter.data) == 0 {
iter.err = io.ErrShortBuffer
return WriteBatchNotUsedRecord
}
t := iter.data[0]
iter.data = iter.data[1:]
return WriteBatchRecordType(t)
}
func (iter *WriteBatchIterator) decodeVarint() uint64 {
var n int
var x uint64
for shift := uint(0); shift < 64 && n < len(iter.data); shift += 7 {
b := uint64(iter.data[n])
n++
x |= (b & 0x7F) << shift
if (b & 0x80) == 0 {
iter.data = iter.data[n:]
return x
}
}
if n == len(iter.data) {
iter.err = io.ErrShortBuffer
} else {
iter.err = errors.New("malformed varint")
}
return 0
}