libnetwork/bitmap: remove datastore concerns

There is a solid bit-vector datatype hidden inside bitseq.Handle, but
it's obscured by all the intrusive datastore and KVObject nonsense.
It can be used without a datastore, but the datastore baggage limits its
utility inside and outside libnetwork. Extract the datatype goodness
into its own package which depends only on the standard library so it
can be used in more situations.

Signed-off-by: Cory Snider <csnider@mirantis.com>
This commit is contained in:
Cory Snider 2023-01-20 13:42:40 -05:00
parent ad03a09451
commit c4d7294b5c
3 changed files with 132 additions and 563 deletions

View file

@ -1,18 +1,11 @@
// Package bitseq provides a structure and utilities for representing long bitmask
// as sequence of run-length encoded blocks. It operates directly on the encoded
// representation, it does not decode/encode.
package bitseq
// Package bitmap provides a datatype for long vectors of bits.
package bitmap
import (
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"sync"
"github.com/docker/docker/libnetwork/datastore"
"github.com/docker/docker/libnetwork/types"
"github.com/sirupsen/logrus"
)
// block sequence constants
@ -32,51 +25,48 @@ var (
ErrBitAllocated = errors.New("requested bit is already allocated")
)
// Handle contains the sequence representing the bitmask and its identifier
type Handle struct {
// https://github.com/golang/go/issues/8005#issuecomment-190753527
type noCopy struct{}
func (noCopy) Lock() {}
// Bitmap is a fixed-length bit vector. It is not safe for concurrent use.
//
// The data is stored as a list of run-length encoded blocks. It operates
// directly on the encoded representation, without decompressing.
type Bitmap struct {
bits uint64
unselected uint64
head *sequence
app string
id string
dbIndex uint64
dbExists bool
curr uint64
store datastore.DataStore
sync.Mutex
// Shallow copies would share the same head pointer but a copy of the
// unselected count. Mutating the sequence through one would change the
// bits for all copies but only update that one copy's unselected count,
// which would result in subtle bugs.
noCopy noCopy
}
// NewHandle returns a thread-safe instance of the bitmask handler
func NewHandle(app string, ds datastore.DataStore, id string, numElements uint64) (*Handle, error) {
h := &Handle{
app: app,
id: id,
store: ds,
bits: numElements,
unselected: numElements,
// NewHandle returns a new Bitmap n bits long.
func New(n uint64) *Bitmap {
return &Bitmap{
bits: n,
unselected: n,
head: &sequence{
block: 0x0,
count: getNumBlocks(numElements),
count: getNumBlocks(n),
},
}
}
if h.store == nil {
return h, nil
// Copy returns a deep copy of b.
func Copy(b *Bitmap) *Bitmap {
return &Bitmap{
bits: b.bits,
unselected: b.unselected,
head: b.head.getCopy(),
curr: b.curr,
}
// Get the initial status from the ds if present.
if err := h.store.GetObject(datastore.Key(h.Key()...), h); err != nil && err != datastore.ErrKeyNotFound {
return nil, err
}
// If the handle is not in store, write it.
if !h.Exists() {
if err := h.writeToStore(); err != nil {
return nil, fmt.Errorf("failed to write bitsequence to store: %v", err)
}
}
return h, nil
}
// sequence represents a recurring sequence of 32 bits long bitmasks
@ -186,24 +176,14 @@ func (s *sequence) fromByteArray(data []byte) error {
return nil
}
func (h *Handle) getCopy() *Handle {
return &Handle{
bits: h.bits,
unselected: h.unselected,
head: h.head.getCopy(),
app: h.app,
id: h.id,
dbIndex: h.dbIndex,
dbExists: h.dbExists,
store: h.store,
curr: h.curr,
}
}
// SetAnyInRange atomically sets the first unset bit in the specified range in the sequence and returns the corresponding ordinal
func (h *Handle) SetAnyInRange(start, end uint64, serial bool) (uint64, error) {
// SetAnyInRange sets the first unset bit in the range [start, end) and returns
// the ordinal of the set bit.
//
// When serial=true, the bitmap is scanned starting from the ordinal following
// the bit most recently set by [Bitmap.SetAny] or [Bitmap.SetAnyInRange].
func (h *Bitmap) SetAnyInRange(start, end uint64, serial bool) (uint64, error) {
if end < start || end >= h.bits {
return invalidPos, fmt.Errorf("invalid bit range [%d, %d]", start, end)
return invalidPos, fmt.Errorf("invalid bit range [%d, %d)", start, end)
}
if h.Unselected() == 0 {
return invalidPos, ErrNoBitAvailable
@ -211,8 +191,12 @@ func (h *Handle) SetAnyInRange(start, end uint64, serial bool) (uint64, error) {
return h.set(0, start, end, true, false, serial)
}
// SetAny atomically sets the first unset bit in the sequence and returns the corresponding ordinal
func (h *Handle) SetAny(serial bool) (uint64, error) {
// SetAny sets the first unset bit in the sequence and returns the ordinal of
// the set bit.
//
// When serial=true, the bitmap is scanned starting from the ordinal following
// the bit most recently set by [Bitmap.SetAny] or [Bitmap.SetAnyInRange].
func (h *Bitmap) SetAny(serial bool) (uint64, error) {
if h.Unselected() == 0 {
return invalidPos, ErrNoBitAvailable
}
@ -220,7 +204,7 @@ func (h *Handle) SetAny(serial bool) (uint64, error) {
}
// Set atomically sets the corresponding bit in the sequence
func (h *Handle) Set(ordinal uint64) error {
func (h *Bitmap) Set(ordinal uint64) error {
if err := h.validateOrdinal(ordinal); err != nil {
return err
}
@ -229,7 +213,7 @@ func (h *Handle) Set(ordinal uint64) error {
}
// Unset atomically unsets the corresponding bit in the sequence
func (h *Handle) Unset(ordinal uint64) error {
func (h *Bitmap) Unset(ordinal uint64) error {
if err := h.validateOrdinal(ordinal); err != nil {
return err
}
@ -239,17 +223,17 @@ func (h *Handle) Unset(ordinal uint64) error {
// IsSet atomically checks if the ordinal bit is set. In case ordinal
// is outside of the bit sequence limits, false is returned.
func (h *Handle) IsSet(ordinal uint64) bool {
func (h *Bitmap) IsSet(ordinal uint64) bool {
if err := h.validateOrdinal(ordinal); err != nil {
return false
}
h.Lock()
_, _, err := checkIfAvailable(h.head, ordinal)
h.Unlock()
return err != nil
}
func (h *Handle) runConsistencyCheck() bool {
// CheckConsistency checks if the bit sequence is in an inconsistent state and attempts to fix it.
// It looks for a corruption signature that may happen in docker 1.9.0 and 1.9.1.
func (h *Bitmap) CheckConsistency() bool {
corrupted := false
for p, c := h.head, h.head.next; c != nil; c = c.next {
if c.count == 0 {
@ -262,47 +246,8 @@ func (h *Handle) runConsistencyCheck() bool {
return corrupted
}
// CheckConsistency checks if the bit sequence is in an inconsistent state and attempts to fix it.
// It looks for a corruption signature that may happen in docker 1.9.0 and 1.9.1.
func (h *Handle) CheckConsistency() error {
for {
h.Lock()
store := h.store
h.Unlock()
if store != nil {
if err := store.GetObject(datastore.Key(h.Key()...), h); err != nil && err != datastore.ErrKeyNotFound {
return err
}
}
h.Lock()
nh := h.getCopy()
h.Unlock()
if !nh.runConsistencyCheck() {
return nil
}
if err := nh.writeToStore(); err != nil {
if _, ok := err.(types.RetryError); !ok {
return fmt.Errorf("internal failure while fixing inconsistent bitsequence: %v", err)
}
continue
}
logrus.Infof("Fixed inconsistent bit sequence in datastore:\n%s\n%s", h, nh)
h.Lock()
h.head = nh.head
h.Unlock()
return nil
}
}
// set/reset the bit
func (h *Handle) set(ordinal, start, end uint64, any bool, release bool, serial bool) (uint64, error) {
func (h *Bitmap) set(ordinal, start, end uint64, any bool, release bool, serial bool) (uint64, error) {
var (
bitPos uint64
bytePos uint64
@ -310,122 +255,67 @@ func (h *Handle) set(ordinal, start, end uint64, any bool, release bool, serial
err error
)
for {
var store datastore.DataStore
curr := uint64(0)
h.Lock()
store = h.store
if store != nil {
h.Unlock() // The lock is acquired in the GetObject
if err := store.GetObject(datastore.Key(h.Key()...), h); err != nil && err != datastore.ErrKeyNotFound {
return ret, err
}
h.Lock() // Acquire the lock back
}
if serial {
curr = h.curr
}
// Get position if available
if release {
bytePos, bitPos = ordinalToPos(ordinal)
} else {
if any {
bytePos, bitPos, err = getAvailableFromCurrent(h.head, start, curr, end)
ret = posToOrdinal(bytePos, bitPos)
if err == nil {
h.curr = ret + 1
}
} else {
bytePos, bitPos, err = checkIfAvailable(h.head, ordinal)
ret = ordinal
}
}
if err != nil {
h.Unlock()
return ret, err
}
// Create a private copy of h and work on it
nh := h.getCopy()
nh.head = pushReservation(bytePos, bitPos, nh.head, release)
if release {
nh.unselected++
} else {
nh.unselected--
}
if h.store != nil {
h.Unlock()
// Attempt to write private copy to store
if err := nh.writeToStore(); err != nil {
if _, ok := err.(types.RetryError); !ok {
return ret, fmt.Errorf("internal failure while setting the bit: %v", err)
}
// Retry
continue
}
h.Lock()
}
// Previous atomic push was successful. Save private copy to local copy
h.unselected = nh.unselected
h.head = nh.head
h.dbExists = nh.dbExists
h.dbIndex = nh.dbIndex
h.Unlock()
return ret, nil
curr := uint64(0)
if serial {
curr = h.curr
}
// Get position if available
if release {
bytePos, bitPos = ordinalToPos(ordinal)
} else {
if any {
bytePos, bitPos, err = getAvailableFromCurrent(h.head, start, curr, end)
ret = posToOrdinal(bytePos, bitPos)
if err == nil {
h.curr = ret + 1
}
} else {
bytePos, bitPos, err = checkIfAvailable(h.head, ordinal)
ret = ordinal
}
}
if err != nil {
return ret, err
}
h.head = pushReservation(bytePos, bitPos, h.head, release)
if release {
h.unselected++
} else {
h.unselected--
}
return ret, nil
}
// checks is needed because to cover the case where the number of bits is not a multiple of blockLen
func (h *Handle) validateOrdinal(ordinal uint64) error {
h.Lock()
defer h.Unlock()
func (h *Bitmap) validateOrdinal(ordinal uint64) error {
if ordinal >= h.bits {
return errors.New("bit does not belong to the sequence")
}
return nil
}
// Destroy removes from the datastore the data belonging to this handle
func (h *Handle) Destroy() error {
for {
if err := h.deleteFromStore(); err != nil {
if _, ok := err.(types.RetryError); !ok {
return fmt.Errorf("internal failure while destroying the sequence: %v", err)
}
// Fetch latest
if err := h.store.GetObject(datastore.Key(h.Key()...), h); err != nil {
if err == datastore.ErrKeyNotFound { // already removed
return nil
}
return fmt.Errorf("failed to fetch from store when destroying the sequence: %v", err)
}
continue
}
return nil
}
}
// ToByteArray converts this handle's data into a byte array
func (h *Handle) ToByteArray() ([]byte, error) {
h.Lock()
defer h.Unlock()
// MarshalBinary encodes h into a binary representation.
func (h *Bitmap) MarshalBinary() ([]byte, error) {
ba := make([]byte, 16)
binary.BigEndian.PutUint64(ba[0:], h.bits)
binary.BigEndian.PutUint64(ba[8:], h.unselected)
bm, err := h.head.toByteArray()
if err != nil {
return nil, fmt.Errorf("failed to serialize head: %s", err.Error())
return nil, fmt.Errorf("failed to serialize head: %v", err)
}
ba = append(ba, bm...)
return ba, nil
}
// FromByteArray reads his handle's data from a byte array
func (h *Handle) FromByteArray(ba []byte) error {
// UnmarshalBinary decodes a binary representation of a Bitmap value which was
// generated using [Bitmap.MarshalBinary].
//
// The scan position for serial [Bitmap.SetAny] and [Bitmap.SetAnyInRange]
// operations is neither unmarshaled nor reset.
func (h *Bitmap) UnmarshalBinary(ba []byte) error {
if ba == nil {
return errors.New("nil byte array")
}
@ -433,67 +323,46 @@ func (h *Handle) FromByteArray(ba []byte) error {
nh := &sequence{}
err := nh.fromByteArray(ba[16:])
if err != nil {
return fmt.Errorf("failed to deserialize head: %s", err.Error())
return fmt.Errorf("failed to deserialize head: %v", err)
}
h.Lock()
h.head = nh
h.bits = binary.BigEndian.Uint64(ba[0:8])
h.unselected = binary.BigEndian.Uint64(ba[8:16])
h.Unlock()
return nil
}
// Bits returns the length of the bit sequence
func (h *Handle) Bits() uint64 {
func (h *Bitmap) Bits() uint64 {
return h.bits
}
// Unselected returns the number of bits which are not selected
func (h *Handle) Unselected() uint64 {
h.Lock()
defer h.Unlock()
func (h *Bitmap) Unselected() uint64 {
return h.unselected
}
func (h *Handle) String() string {
h.Lock()
defer h.Unlock()
return fmt.Sprintf("App: %s, ID: %s, DBIndex: 0x%x, Bits: %d, Unselected: %d, Sequence: %s Curr:%d",
h.app, h.id, h.dbIndex, h.bits, h.unselected, h.head.toString(), h.curr)
func (h *Bitmap) String() string {
return fmt.Sprintf("Bits: %d, Unselected: %d, Sequence: %s Curr:%d",
h.bits, h.unselected, h.head.toString(), h.curr)
}
// MarshalJSON encodes Handle into json message
func (h *Handle) MarshalJSON() ([]byte, error) {
m := map[string]interface{}{
"id": h.id,
}
b, err := h.ToByteArray()
// MarshalJSON encodes h into a JSON message
func (h *Bitmap) MarshalJSON() ([]byte, error) {
b, err := h.MarshalBinary()
if err != nil {
return nil, err
}
m["sequence"] = b
return json.Marshal(m)
return json.Marshal(b)
}
// UnmarshalJSON decodes json message into Handle
func (h *Handle) UnmarshalJSON(data []byte) error {
var (
m map[string]interface{}
b []byte
err error
)
if err = json.Unmarshal(data, &m); err != nil {
// UnmarshalJSON decodes JSON message into h
func (h *Bitmap) UnmarshalJSON(data []byte) error {
var b []byte
if err := json.Unmarshal(data, &b); err != nil {
return err
}
h.id = m["id"].(string)
bi, _ := json.Marshal(m["sequence"])
if err := json.Unmarshal(bi, &b); err != nil {
return err
}
return h.FromByteArray(b)
return h.UnmarshalBinary(b)
}
// getFirstAvailable looks for the first unset bit in passed mask starting from start

View file

@ -1,46 +1,11 @@
package bitseq
package bitmap
import (
"fmt"
"math/rand"
"os"
"path/filepath"
"testing"
"time"
"github.com/docker/docker/libnetwork/datastore"
"github.com/docker/libkv/store"
"github.com/docker/libkv/store/boltdb"
)
var (
defaultPrefix = filepath.Join(os.TempDir(), "libnetwork", "test", "bitseq")
)
func init() {
boltdb.Register()
}
func randomLocalStore() (datastore.DataStore, error) {
tmp, err := os.CreateTemp("", "libnetwork-")
if err != nil {
return nil, fmt.Errorf("Error creating temp file: %v", err)
}
if err := tmp.Close(); err != nil {
return nil, fmt.Errorf("Error closing temp file: %v", err)
}
return datastore.NewDataStore(datastore.LocalScope, &datastore.ScopeCfg{
Client: datastore.ScopeClientCfg{
Provider: "boltdb",
Address: filepath.Join(defaultPrefix, filepath.Base(tmp.Name())),
Config: &store.Config{
Bucket: "libnetwork",
ConnectionTimeout: 3 * time.Second,
},
},
})
}
func TestSequenceGetAvailableBit(t *testing.T) {
input := []struct {
head *sequence
@ -538,10 +503,7 @@ func getTestSequence() *sequence {
}
func TestSet(t *testing.T) {
hnd, err := NewHandle("", nil, "", 1024*32)
if err != nil {
t.Fatal(err)
}
hnd := New(1024 * 32)
hnd.head = getTestSequence()
firstAv := uint64(32*100 + 31)
@ -597,10 +559,7 @@ func TestSet(t *testing.T) {
func TestSetUnset(t *testing.T) {
numBits := uint64(32 * blockLen)
hnd, err := NewHandle("", nil, "", numBits)
if err != nil {
t.Fatal(err)
}
hnd := New(numBits)
if err := hnd.Set(uint64(32 * blockLen)); err == nil {
t.Fatal("Expected failure, but succeeded")
@ -635,11 +594,7 @@ func TestSetUnset(t *testing.T) {
func TestOffsetSetUnset(t *testing.T) {
numBits := uint64(32 * blockLen)
var o uint64
hnd, err := NewHandle("", nil, "", numBits)
if err != nil {
t.Fatal(err)
}
hnd := New(numBits)
// set and unset all one by one
for hnd.Unselected() > 0 {
@ -661,7 +616,8 @@ func TestOffsetSetUnset(t *testing.T) {
}
//At this point sequence is (0xffffffff, 9)->(0x7fffffff, 1)->(0xffffffff, 22)->end
if o, err = hnd.SetAnyInRange(32, 500, false); err != nil {
o, err := hnd.SetAnyInRange(32, 500, false)
if err != nil {
t.Fatal(err)
}
@ -672,10 +628,7 @@ func TestOffsetSetUnset(t *testing.T) {
func TestSetInRange(t *testing.T) {
numBits := uint64(1024 * blockLen)
hnd, err := NewHandle("", nil, "", numBits)
if err != nil {
t.Fatal(err)
}
hnd := New(numBits)
hnd.head = getTestSequence()
firstAv := uint64(100*blockLen + blockLen - 1)
@ -739,10 +692,7 @@ func TestSetInRange(t *testing.T) {
}
// create a non multiple of 32 mask
hnd, err = NewHandle("", nil, "", 30)
if err != nil {
t.Fatal(err)
}
hnd = New(30)
// set all bit in the first range
for hnd.Unselected() > 22 {
@ -798,10 +748,7 @@ func TestSetInRange(t *testing.T) {
// in the first or last sequence block.
func TestSetAnyInRange(t *testing.T) {
numBits := uint64(8 * blockLen)
hnd, err := NewHandle("", nil, "", numBits)
if err != nil {
t.Fatal(err)
}
hnd := New(numBits)
if err := hnd.Set(0); err != nil {
t.Fatal(err)
@ -847,10 +794,7 @@ func TestSetAnyInRange(t *testing.T) {
func TestMethods(t *testing.T) {
numBits := uint64(256 * blockLen)
hnd, err := NewHandle("path/to/data", nil, "sequence1", numBits)
if err != nil {
t.Fatal(err)
}
hnd := New(numBits)
if hnd.Bits() != numBits {
t.Fatalf("Unexpected bit number: %d", hnd.Bits())
@ -879,16 +823,8 @@ func TestMethods(t *testing.T) {
}
func TestRandomAllocateDeallocate(t *testing.T) {
ds, err := randomLocalStore()
if err != nil {
t.Fatal(err)
}
numBits := int(16 * blockLen)
hnd, err := NewHandle("bitseq-test/data/", ds, "test1", uint64(numBits))
if err != nil {
t.Fatal(err)
}
hnd := New(uint64(numBits))
seed := time.Now().Unix()
rand.Seed(seed)
@ -922,25 +858,12 @@ func TestRandomAllocateDeallocate(t *testing.T) {
if hnd.head.toString() != "(0x0, 16)->end" {
t.Fatalf("Unexpected db: %s", hnd.head.toString())
}
err = hnd.Destroy()
if err != nil {
t.Fatal(err)
}
}
func TestAllocateRandomDeallocate(t *testing.T) {
ds, err := randomLocalStore()
if err != nil {
t.Fatal(err)
}
numBlocks := uint32(8)
numBits := int(numBlocks * blockLen)
hnd, err := NewHandle(filepath.Join("bitseq", "test", "data"), ds, "test1", uint64(numBits))
if err != nil {
t.Fatal(err)
}
hnd := New(uint64(numBits))
expected := &sequence{block: 0xffffffff, count: uint64(numBlocks / 2), next: &sequence{block: 0x0, count: uint64(numBlocks / 2)}}
@ -987,25 +910,13 @@ func TestAllocateRandomDeallocate(t *testing.T) {
if !hnd.head.equal(expected) {
t.Fatalf("Unexpected sequence. Got:\n%s", hnd)
}
err = hnd.Destroy()
if err != nil {
t.Fatal(err)
}
}
func TestAllocateRandomDeallocateSerialize(t *testing.T) {
ds, err := randomLocalStore()
if err != nil {
t.Fatal(err)
}
numBlocks := uint32(8)
numBits := int(numBlocks * blockLen)
hnd, err := NewHandle("bitseq-test/data/", ds, "test1", uint64(numBits))
if err != nil {
t.Fatal(err)
}
hnd := New(uint64(numBits))
expected := &sequence{block: 0xffffffff, count: uint64(numBlocks / 2), next: &sequence{block: 0x0, count: uint64(numBlocks / 2)}}
@ -1050,92 +961,33 @@ func TestAllocateRandomDeallocateSerialize(t *testing.T) {
if hnd.Unselected() != uint64(numBits/2) {
t.Fatalf("Expected half sequence. Instead found %d free bits.\nSeed: %d\n%s", hnd.unselected, seed, hnd)
}
err = hnd.Destroy()
if err != nil {
t.Fatal(err)
}
}
func TestRetrieveFromStore(t *testing.T) {
ds, err := randomLocalStore()
if err != nil {
t.Fatal(err)
}
numBits := int(8 * blockLen)
hnd, err := NewHandle("bitseq-test/data/", ds, "test1", uint64(numBits))
if err != nil {
t.Fatal(err)
}
// Allocate first half of the bits
for i := 0; i < numBits/2; i++ {
_, err := hnd.SetAny(false)
if err != nil {
t.Fatalf("Unexpected failure on allocation %d: %v\n%s", i, err, hnd)
}
}
hnd0 := hnd.String()
// Retrieve same handle
hnd, err = NewHandle("bitseq-test/data/", ds, "test1", uint64(numBits))
if err != nil {
t.Fatal(err)
}
hnd1 := hnd.String()
if hnd1 != hnd0 {
t.Fatalf("%v\n%v", hnd0, hnd1)
}
err = hnd.Destroy()
if err != nil {
t.Fatal(err)
}
}
func TestIsCorrupted(t *testing.T) {
ds, err := randomLocalStore()
if err != nil {
t.Fatal(err)
}
// Negative test
hnd, err := NewHandle("bitseq-test/data/", ds, "test_corrupted", 1024)
if err != nil {
t.Fatal(err)
}
hnd := New(1024)
if hnd.runConsistencyCheck() {
if hnd.CheckConsistency() {
t.Fatalf("Unexpected corrupted for %s", hnd)
}
if err := hnd.CheckConsistency(); err != nil {
t.Fatal(err)
}
hnd.Set(0)
if hnd.runConsistencyCheck() {
if hnd.CheckConsistency() {
t.Fatalf("Unexpected corrupted for %s", hnd)
}
hnd.Set(1023)
if hnd.runConsistencyCheck() {
if hnd.CheckConsistency() {
t.Fatalf("Unexpected corrupted for %s", hnd)
}
if err := hnd.CheckConsistency(); err != nil {
t.Fatal(err)
}
// Try real corrupted ipam handles found in the local store files reported by three docker users,
// plus a generic ipam handle from docker 1.9.1. This last will fail as well, because of how the
// last node in the sequence is expressed (This is true for IPAM handle only, because of the broadcast
// address reservation: last bit). This will allow an application using bitseq that runs a consistency
// check to detect and replace the 1.9.0/1 old vulnerable handle with the new one.
input := []*Handle{
input := []*Bitmap{
{
id: "LocalDefault/172.17.0.0/16",
bits: 65536,
unselected: 65412,
head: &sequence{
@ -1170,7 +1022,6 @@ func TestIsCorrupted(t *testing.T) {
},
},
{
id: "LocalDefault/172.17.0.0/16",
bits: 65536,
unselected: 65319,
head: &sequence{
@ -1203,7 +1054,6 @@ func TestIsCorrupted(t *testing.T) {
},
},
{
id: "LocalDefault/172.17.0.0/16",
bits: 65536,
unselected: 65456,
head: &sequence{
@ -1234,27 +1084,19 @@ func TestIsCorrupted(t *testing.T) {
}
for idx, hnd := range input {
if !hnd.runConsistencyCheck() {
if !hnd.CheckConsistency() {
t.Fatalf("Expected corrupted for (%d): %s", idx, hnd)
}
if hnd.runConsistencyCheck() {
if hnd.CheckConsistency() {
t.Fatalf("Sequence still marked corrupted (%d): %s", idx, hnd)
}
}
}
func testSetRollover(t *testing.T, serial bool) {
ds, err := randomLocalStore()
if err != nil {
t.Fatal(err)
}
numBlocks := uint32(8)
numBits := int(numBlocks * blockLen)
hnd, err := NewHandle("bitseq-test/data/", ds, "test1", uint64(numBits))
if err != nil {
t.Fatal(err)
}
hnd := New(uint64(numBits))
// Allocate first half of the bits
for i := 0; i < numBits/2; i++ {
@ -1309,11 +1151,6 @@ func testSetRollover(t *testing.T, serial bool) {
if hnd.Unselected() != 0 {
t.Fatalf("Unexpected number of unselected bits %d, Expected %d", hnd.Unselected(), 0)
}
err = hnd.Destroy()
if err != nil {
t.Fatal(err)
}
}
func TestSetRollover(t *testing.T) {
@ -1361,12 +1198,8 @@ func TestGetFirstAvailableFromCurrent(t *testing.T) {
}
func TestMarshalJSON(t *testing.T) {
const expectedID = "my-bitseq"
expected := []byte("hello libnetwork")
hnd, err := NewHandle("", nil, expectedID, uint64(len(expected)*8))
if err != nil {
t.Fatal(err)
}
hnd := New(uint64(len(expected) * 8))
for i, c := range expected {
for j := 0; j < 8; j++ {
@ -1391,7 +1224,7 @@ func TestMarshalJSON(t *testing.T) {
// found in the wild. We need to support unmarshaling old versions to
// maintain backwards compatibility with sequences persisted on disk.
const (
goldenV0 = `{"id":"my-bitseq","sequence":"AAAAAAAAAIAAAAAAAAAAPRamNjYAAAAAAAAAAfYENpYAAAAAAAAAAUZ2pi4AAAAAAAAAAe72TtYAAAAAAAAAAQ=="}`
goldenV0 = `"AAAAAAAAAIAAAAAAAAAAPRamNjYAAAAAAAAAAfYENpYAAAAAAAAAAUZ2pi4AAAAAAAAAAe72TtYAAAAAAAAAAQ=="`
)
if string(marshaled) != goldenV0 {
@ -1407,10 +1240,7 @@ func TestMarshalJSON(t *testing.T) {
} {
tt := tt
t.Run("UnmarshalJSON="+tt.name, func(t *testing.T) {
hnd2, err := NewHandle("", nil, "", 0)
if err != nil {
t.Fatal(err)
}
hnd2 := New(0)
if err := hnd2.UnmarshalJSON(tt.data); err != nil {
t.Errorf("UnmarshalJSON() err = %v", err)
}
@ -1418,7 +1248,7 @@ func TestMarshalJSON(t *testing.T) {
h2str := hnd2.String()
t.Log(h2str)
if hstr != h2str {
t.Errorf("Unmarshaled a different bitseq: want %q, got %q", hstr, h2str)
t.Errorf("Unmarshaled a different bitmap: want %q, got %q", hstr, h2str)
}
})
}

View file

@ -1,130 +0,0 @@
package bitseq
import (
"encoding/json"
"github.com/docker/docker/libnetwork/datastore"
"github.com/docker/docker/libnetwork/types"
)
// Key provides the Key to be used in KV Store
func (h *Handle) Key() []string {
h.Lock()
defer h.Unlock()
return []string{h.app, h.id}
}
// KeyPrefix returns the immediate parent key that can be used for tree walk
func (h *Handle) KeyPrefix() []string {
h.Lock()
defer h.Unlock()
return []string{h.app}
}
// Value marshals the data to be stored in the KV store
func (h *Handle) Value() []byte {
b, err := json.Marshal(h)
if err != nil {
return nil
}
return b
}
// SetValue unmarshals the data from the KV store
func (h *Handle) SetValue(value []byte) error {
return json.Unmarshal(value, h)
}
// Index returns the latest DB Index as seen by this object
func (h *Handle) Index() uint64 {
h.Lock()
defer h.Unlock()
return h.dbIndex
}
// SetIndex method allows the datastore to store the latest DB Index into this object
func (h *Handle) SetIndex(index uint64) {
h.Lock()
h.dbIndex = index
h.dbExists = true
h.Unlock()
}
// Exists method is true if this object has been stored in the DB.
func (h *Handle) Exists() bool {
h.Lock()
defer h.Unlock()
return h.dbExists
}
// New method returns a handle based on the receiver handle
func (h *Handle) New() datastore.KVObject {
h.Lock()
defer h.Unlock()
return &Handle{
app: h.app,
store: h.store,
}
}
// CopyTo deep copies the handle into the passed destination object
func (h *Handle) CopyTo(o datastore.KVObject) error {
h.Lock()
defer h.Unlock()
dstH := o.(*Handle)
if h == dstH {
return nil
}
dstH.Lock()
dstH.bits = h.bits
dstH.unselected = h.unselected
dstH.head = h.head.getCopy()
dstH.app = h.app
dstH.id = h.id
dstH.dbIndex = h.dbIndex
dstH.dbExists = h.dbExists
dstH.store = h.store
dstH.curr = h.curr
dstH.Unlock()
return nil
}
// Skip provides a way for a KV Object to avoid persisting it in the KV Store
func (h *Handle) Skip() bool {
return false
}
// DataScope method returns the storage scope of the datastore
func (h *Handle) DataScope() string {
h.Lock()
defer h.Unlock()
return h.store.Scope()
}
func (h *Handle) writeToStore() error {
h.Lock()
store := h.store
h.Unlock()
if store == nil {
return nil
}
err := store.PutObjectAtomic(h)
if err == datastore.ErrKeyModified {
return types.RetryErrorf("failed to perform atomic write (%v). Retry might fix the error", err)
}
return err
}
func (h *Handle) deleteFromStore() error {
h.Lock()
store := h.store
h.Unlock()
if store == nil {
return nil
}
return store.DeleteObjectAtomic(h)
}