libnetwork: delete package bitseq

Package idm was the last remaining user of bitseq.

Signed-off-by: Cory Snider <csnider@mirantis.com>
This commit is contained in:
Cory Snider 2023-07-07 14:53:44 -04:00
parent 8b167535db
commit f9a83daebb
3 changed files with 0 additions and 666 deletions

View file

@ -1,213 +0,0 @@
// Package bitseq provides a structure and utilities for representing a long
// bitmask which is persisted in a datastore. It is backed by [bitmap.Bitmap]
// which operates directly on the encoded representation, without uncompressing.
package bitseq
import (
"encoding/json"
"fmt"
"sync"
"github.com/docker/docker/libnetwork/bitmap"
"github.com/docker/docker/libnetwork/datastore"
"github.com/docker/docker/libnetwork/types"
)
var (
// ErrNoBitAvailable is returned when no more bits are available to set
ErrNoBitAvailable = bitmap.ErrNoBitAvailable
// ErrBitAllocated is returned when the specific bit requested is already set
ErrBitAllocated = bitmap.ErrBitAllocated
)
// Handle contains the sequence representing the bitmask and its identifier
type Handle struct {
app string
id string
dbIndex uint64
dbExists bool
store datastore.DataStore
bm *bitmap.Bitmap
mu sync.Mutex
}
// NewHandle returns a thread-safe instance of the bitmask handler
func NewHandle(app string, ds datastore.DataStore, id string, numElements uint64) (*Handle, error) {
h := &Handle{
bm: bitmap.New(numElements),
app: app,
id: id,
store: ds,
}
if h.store == nil {
return h, nil
}
// Get the initial status from the ds if present.
if err := h.store.GetObject(datastore.Key(h.Key()...), h); err != nil && err != datastore.ErrKeyNotFound {
return nil, err
}
// If the handle is not in store, write it.
if !h.Exists() {
if err := h.writeToStore(); err != nil {
return nil, fmt.Errorf("failed to write bitsequence to store: %v", err)
}
}
return h, nil
}
func (h *Handle) getCopy() *Handle {
return &Handle{
bm: bitmap.Copy(h.bm),
app: h.app,
id: h.id,
dbIndex: h.dbIndex,
dbExists: h.dbExists,
store: h.store,
}
}
// SetAnyInRange atomically sets the first unset bit in the specified range in the sequence and returns the corresponding ordinal
func (h *Handle) SetAnyInRange(start, end uint64, serial bool) (uint64, error) {
return h.apply(func(b *bitmap.Bitmap) (uint64, error) { return b.SetAnyInRange(start, end, serial) })
}
// SetAny atomically sets the first unset bit in the sequence and returns the corresponding ordinal
func (h *Handle) SetAny(serial bool) (uint64, error) {
return h.apply(func(b *bitmap.Bitmap) (uint64, error) { return b.SetAny(serial) })
}
// Set atomically sets the corresponding bit in the sequence
func (h *Handle) Set(ordinal uint64) error {
_, err := h.apply(func(b *bitmap.Bitmap) (uint64, error) { return 0, b.Set(ordinal) })
return err
}
// Unset atomically unsets the corresponding bit in the sequence
func (h *Handle) Unset(ordinal uint64) error {
_, err := h.apply(func(b *bitmap.Bitmap) (uint64, error) { return 0, b.Unset(ordinal) })
return err
}
// IsSet atomically checks if the ordinal bit is set. In case ordinal
// is outside of the bit sequence limits, false is returned.
func (h *Handle) IsSet(ordinal uint64) bool {
h.mu.Lock()
defer h.mu.Unlock()
return h.bm.IsSet(ordinal)
}
// set/reset the bit
func (h *Handle) apply(op func(*bitmap.Bitmap) (uint64, error)) (uint64, error) {
for {
var store datastore.DataStore
h.mu.Lock()
store = h.store
if store != nil {
h.mu.Unlock() // The lock is acquired in the GetObject
if err := store.GetObject(datastore.Key(h.Key()...), h); err != nil && err != datastore.ErrKeyNotFound {
return 0, err
}
h.mu.Lock() // Acquire the lock back
}
// Create a private copy of h and work on it
nh := h.getCopy()
ret, err := op(nh.bm)
if err != nil {
h.mu.Unlock()
return ret, err
}
if h.store != nil {
h.mu.Unlock()
// Attempt to write private copy to store
if err := nh.writeToStore(); err != nil {
if _, ok := err.(types.RetryError); !ok {
return ret, fmt.Errorf("internal failure while setting the bit: %v", err)
}
// Retry
continue
}
h.mu.Lock()
}
// Previous atomic push was successful. Save private copy to local copy
h.bm = nh.bm
h.dbExists = nh.dbExists
h.dbIndex = nh.dbIndex
h.mu.Unlock()
return ret, nil
}
}
// Destroy removes from the datastore the data belonging to this handle
func (h *Handle) Destroy() error {
for {
if err := h.deleteFromStore(); err != nil {
if _, ok := err.(types.RetryError); !ok {
return fmt.Errorf("internal failure while destroying the sequence: %v", err)
}
// Fetch latest
if err := h.store.GetObject(datastore.Key(h.Key()...), h); err != nil {
if err == datastore.ErrKeyNotFound { // already removed
return nil
}
return fmt.Errorf("failed to fetch from store when destroying the sequence: %v", err)
}
continue
}
return nil
}
}
// Bits returns the length of the bit sequence
func (h *Handle) Bits() uint64 {
h.mu.Lock()
defer h.mu.Unlock()
return h.bm.Bits()
}
// Unselected returns the number of bits which are not selected
func (h *Handle) Unselected() uint64 {
h.mu.Lock()
defer h.mu.Unlock()
return h.bm.Unselected()
}
func (h *Handle) String() string {
h.mu.Lock()
defer h.mu.Unlock()
return fmt.Sprintf("App: %s, ID: %s, DBIndex: 0x%x, %s",
h.app, h.id, h.dbIndex, h.bm)
}
type jsonMessage struct {
ID string `json:"id"`
Sequence *bitmap.Bitmap `json:"sequence"`
}
// MarshalJSON encodes h into a JSON message.
func (h *Handle) MarshalJSON() ([]byte, error) {
h.mu.Lock()
defer h.mu.Unlock()
m := jsonMessage{ID: h.id, Sequence: h.bm}
return json.Marshal(m)
}
// UnmarshalJSON decodes a JSON message into h.
func (h *Handle) UnmarshalJSON(data []byte) error {
var m jsonMessage
if err := json.Unmarshal(data, &m); err != nil {
return err
}
h.mu.Lock()
defer h.mu.Unlock()
h.id, h.bm = m.ID, m.Sequence
return nil
}

View file

@ -1,325 +0,0 @@
package bitseq
import (
"fmt"
"math/rand"
"os"
"path/filepath"
"testing"
"time"
"github.com/docker/docker/libnetwork/datastore"
store "github.com/docker/docker/libnetwork/internal/kvstore"
"github.com/docker/docker/libnetwork/internal/kvstore/boltdb"
)
var defaultPrefix = filepath.Join(os.TempDir(), "libnetwork", "test", "bitseq")
func init() {
boltdb.Register()
}
func randomLocalStore() (datastore.DataStore, error) {
tmp, err := os.CreateTemp("", "libnetwork-")
if err != nil {
return nil, fmt.Errorf("Error creating temp file: %v", err)
}
if err := tmp.Close(); err != nil {
return nil, fmt.Errorf("Error closing temp file: %v", err)
}
return datastore.NewDataStore(datastore.ScopeCfg{
Client: datastore.ScopeClientCfg{
Provider: "boltdb",
Address: filepath.Join(defaultPrefix, filepath.Base(tmp.Name())),
Config: &store.Config{
Bucket: "libnetwork",
ConnectionTimeout: 3 * time.Second,
},
},
})
}
const blockLen = 32
// This one tests an allocation pattern which unveiled an issue in pushReservation
// Specifically a failure in detecting when we are in the (B) case (the bit to set
// belongs to the last block of the current sequence). Because of a bug, code
// was assuming the bit belonged to a block in the middle of the current sequence.
// Which in turn caused an incorrect allocation when requesting a bit which is not
// in the first or last sequence block.
func TestSetAnyInRange(t *testing.T) {
numBits := uint64(8 * blockLen)
hnd, err := NewHandle("", nil, "", numBits)
if err != nil {
t.Fatal(err)
}
if err := hnd.Set(0); err != nil {
t.Fatal(err)
}
if err := hnd.Set(255); err != nil {
t.Fatal(err)
}
o, err := hnd.SetAnyInRange(128, 255, false)
if err != nil {
t.Fatal(err)
}
if o != 128 {
t.Fatalf("Unexpected ordinal: %d", o)
}
o, err = hnd.SetAnyInRange(128, 255, false)
if err != nil {
t.Fatal(err)
}
if o != 129 {
t.Fatalf("Unexpected ordinal: %d", o)
}
o, err = hnd.SetAnyInRange(246, 255, false)
if err != nil {
t.Fatal(err)
}
if o != 246 {
t.Fatalf("Unexpected ordinal: %d", o)
}
o, err = hnd.SetAnyInRange(246, 255, false)
if err != nil {
t.Fatal(err)
}
if o != 247 {
t.Fatalf("Unexpected ordinal: %d", o)
}
}
func TestRandomAllocateDeallocate(t *testing.T) {
ds, err := randomLocalStore()
if err != nil {
t.Fatal(err)
}
numBits := int(16 * blockLen)
hnd, err := NewHandle("bitseq-test/data/", ds, "test1", uint64(numBits))
if err != nil {
t.Fatal(err)
}
defer func() {
if err := hnd.Destroy(); err != nil {
t.Fatal(err)
}
}()
seed := time.Now().Unix()
rng := rand.New(rand.NewSource(seed))
// Allocate all bits using a random pattern
pattern := rng.Perm(numBits)
for _, bit := range pattern {
err := hnd.Set(uint64(bit))
if err != nil {
t.Errorf("Unexpected failure on allocation of %d: %v.\nSeed: %d.\n%s", bit, err, seed, hnd)
}
}
if unselected := hnd.Unselected(); unselected != 0 {
t.Errorf("Expected full sequence. Instead found %d free bits. Seed: %d.\n%s", unselected, seed, hnd)
}
// Deallocate all bits using a random pattern
pattern = rng.Perm(numBits)
for _, bit := range pattern {
err := hnd.Unset(uint64(bit))
if err != nil {
t.Errorf("Unexpected failure on deallocation of %d: %v.\nSeed: %d.\n%s", bit, err, seed, hnd)
}
}
if unselected := hnd.Unselected(); unselected != uint64(numBits) {
t.Errorf("Expected full sequence. Instead found %d free bits. Seed: %d.\n%s", unselected, seed, hnd)
}
}
func TestRetrieveFromStore(t *testing.T) {
ds, err := randomLocalStore()
if err != nil {
t.Fatal(err)
}
numBits := int(8 * blockLen)
hnd, err := NewHandle("bitseq-test/data/", ds, "test1", uint64(numBits))
if err != nil {
t.Fatal(err)
}
// Allocate first half of the bits
for i := 0; i < numBits/2; i++ {
_, err := hnd.SetAny(false)
if err != nil {
t.Fatalf("Unexpected failure on allocation %d: %v\n%s", i, err, hnd)
}
}
hnd0 := hnd.String()
// Retrieve same handle
hnd, err = NewHandle("bitseq-test/data/", ds, "test1", uint64(numBits))
if err != nil {
t.Fatal(err)
}
hnd1 := hnd.String()
if hnd1 != hnd0 {
t.Fatalf("%v\n%v", hnd0, hnd1)
}
err = hnd.Destroy()
if err != nil {
t.Fatal(err)
}
}
func testSetRollover(t *testing.T, serial bool) {
ds, err := randomLocalStore()
if err != nil {
t.Fatal(err)
}
numBlocks := uint32(8)
numBits := int(numBlocks * blockLen)
hnd, err := NewHandle("bitseq-test/data/", ds, "test1", uint64(numBits))
if err != nil {
t.Fatal(err)
}
// Allocate first half of the bits
for i := 0; i < numBits/2; i++ {
_, err := hnd.SetAny(serial)
if err != nil {
t.Fatalf("Unexpected failure on allocation %d: %v\n%s", i, err, hnd)
}
}
if unselected := hnd.Unselected(); unselected != uint64(numBits/2) {
t.Fatalf("Expected full sequence. Instead found %d free bits. %s", unselected, hnd)
}
seed := time.Now().Unix()
rng := rand.New(rand.NewSource(seed))
// Deallocate half of the allocated bits following a random pattern
pattern := rng.Perm(numBits / 2)
for i := 0; i < numBits/4; i++ {
bit := pattern[i]
err := hnd.Unset(uint64(bit))
if err != nil {
t.Fatalf("Unexpected failure on deallocation of %d: %v.\nSeed: %d.\n%s", bit, err, seed, hnd)
}
}
if unselected := hnd.Unselected(); unselected != uint64(3*numBits/4) {
t.Fatalf("Unexpected free bits: found %d free bits.\nSeed: %d.\n%s", unselected, seed, hnd)
}
// request to allocate for remaining half of the bits
for i := 0; i < numBits/2; i++ {
_, err := hnd.SetAny(serial)
if err != nil {
t.Fatalf("Unexpected failure on allocation %d: %v\nSeed: %d\n%s", i, err, seed, hnd)
}
}
// At this point all the bits must be allocated except the randomly unallocated bits
// which were unallocated in the first half of the bit sequence
if unselected := hnd.Unselected(); unselected != uint64(numBits/4) {
t.Fatalf("Unexpected number of unselected bits %d, Expected %d", unselected, numBits/4)
}
for i := 0; i < numBits/4; i++ {
_, err := hnd.SetAny(serial)
if err != nil {
t.Fatalf("Unexpected failure on allocation %d: %v\nSeed: %d\n%s", i, err, seed, hnd)
}
}
// Now requesting to allocate the unallocated random bits (qurter of the number of bits) should
// leave no more bits that can be allocated.
if hnd.Unselected() != 0 {
t.Fatalf("Unexpected number of unselected bits %d, Expected %d", hnd.Unselected(), 0)
}
err = hnd.Destroy()
if err != nil {
t.Fatal(err)
}
}
func TestSetRollover(t *testing.T) {
testSetRollover(t, false)
}
func TestSetRolloverSerial(t *testing.T) {
testSetRollover(t, true)
}
func TestMarshalJSON(t *testing.T) {
const expectedID = "my-bitseq"
expected := []byte("hello libnetwork")
hnd, err := NewHandle("", nil, expectedID, uint64(len(expected)*8))
if err != nil {
t.Fatal(err)
}
for i, c := range expected {
for j := 0; j < 8; j++ {
if c&(1<<j) == 0 {
continue
}
if err := hnd.Set(uint64(i*8 + j)); err != nil {
t.Fatal(err)
}
}
}
hstr := hnd.String()
t.Log(hstr)
marshaled, err := hnd.MarshalJSON()
if err != nil {
t.Fatalf("MarshalJSON() err = %v", err)
}
t.Logf("%s", marshaled)
// Serializations of hnd as would be marshaled by versions of the code
// found in the wild. We need to support unmarshaling old versions to
// maintain backwards compatibility with sequences persisted on disk.
const (
goldenV0 = `{"id":"my-bitseq","sequence":"AAAAAAAAAIAAAAAAAAAAPRamNjYAAAAAAAAAAfYENpYAAAAAAAAAAUZ2pi4AAAAAAAAAAe72TtYAAAAAAAAAAQ=="}`
)
if string(marshaled) != goldenV0 {
t.Errorf("MarshalJSON() output differs from golden. Please add a new golden case to this test.")
}
for _, tt := range []struct {
name string
data []byte
}{
{name: "Live", data: marshaled},
{name: "Golden-v0", data: []byte(goldenV0)},
} {
tt := tt
t.Run("UnmarshalJSON="+tt.name, func(t *testing.T) {
hnd2, err := NewHandle("", nil, "", 0)
if err != nil {
t.Fatal(err)
}
if err := hnd2.UnmarshalJSON(tt.data); err != nil {
t.Errorf("UnmarshalJSON() err = %v", err)
}
h2str := hnd2.String()
t.Log(h2str)
if hstr != h2str {
t.Errorf("Unmarshaled a different bitseq: want %q, got %q", hstr, h2str)
}
})
}
}

View file

@ -1,128 +0,0 @@
package bitseq
import (
"encoding/json"
"github.com/docker/docker/libnetwork/bitmap"
"github.com/docker/docker/libnetwork/datastore"
"github.com/docker/docker/libnetwork/types"
)
// Key provides the Key to be used in KV Store
func (h *Handle) Key() []string {
h.mu.Lock()
defer h.mu.Unlock()
return []string{h.app, h.id}
}
// KeyPrefix returns the immediate parent key that can be used for tree walk
func (h *Handle) KeyPrefix() []string {
h.mu.Lock()
defer h.mu.Unlock()
return []string{h.app}
}
// Value marshals the data to be stored in the KV store
func (h *Handle) Value() []byte {
b, err := json.Marshal(h)
if err != nil {
return nil
}
return b
}
// SetValue unmarshals the data from the KV store
func (h *Handle) SetValue(value []byte) error {
return json.Unmarshal(value, h)
}
// Index returns the latest DB Index as seen by this object
func (h *Handle) Index() uint64 {
h.mu.Lock()
defer h.mu.Unlock()
return h.dbIndex
}
// SetIndex method allows the datastore to store the latest DB Index into this object
func (h *Handle) SetIndex(index uint64) {
h.mu.Lock()
h.dbIndex = index
h.dbExists = true
h.mu.Unlock()
}
// Exists method is true if this object has been stored in the DB.
func (h *Handle) Exists() bool {
h.mu.Lock()
defer h.mu.Unlock()
return h.dbExists
}
// New method returns a handle based on the receiver handle
func (h *Handle) New() datastore.KVObject {
h.mu.Lock()
defer h.mu.Unlock()
return &Handle{
app: h.app,
store: h.store,
}
}
// CopyTo deep copies the handle into the passed destination object
func (h *Handle) CopyTo(o datastore.KVObject) error {
h.mu.Lock()
defer h.mu.Unlock()
dstH := o.(*Handle)
if h == dstH {
return nil
}
dstH.mu.Lock()
defer dstH.mu.Unlock()
dstH.bm = bitmap.Copy(h.bm)
dstH.app = h.app
dstH.id = h.id
dstH.dbIndex = h.dbIndex
dstH.dbExists = h.dbExists
dstH.store = h.store
return nil
}
// Skip provides a way for a KV Object to avoid persisting it in the KV Store
func (h *Handle) Skip() bool {
return false
}
// DataScope method returns the storage scope of the datastore
func (h *Handle) DataScope() string {
h.mu.Lock()
defer h.mu.Unlock()
return h.store.Scope()
}
func (h *Handle) writeToStore() error {
h.mu.Lock()
store := h.store
h.mu.Unlock()
if store == nil {
return nil
}
err := store.PutObjectAtomic(h)
if err == datastore.ErrKeyModified {
return types.RetryErrorf("failed to perform atomic write (%v). Retry might fix the error", err)
}
return err
}
func (h *Handle) deleteFromStore() error {
h.mu.Lock()
store := h.store
h.mu.Unlock()
if store == nil {
return nil
}
return store.DeleteObjectAtomic(h)
}