cluster.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761
  1. package networkdb
  2. import (
  3. "bytes"
  4. "context"
  5. "crypto/rand"
  6. "encoding/hex"
  7. "fmt"
  8. "log"
  9. "math/big"
  10. rnd "math/rand"
  11. "net"
  12. "strings"
  13. "time"
  14. "github.com/hashicorp/memberlist"
  15. "github.com/sirupsen/logrus"
  16. )
  17. const (
  18. reapPeriod = 5 * time.Second
  19. rejoinClusterDuration = 10 * time.Second
  20. rejoinInterval = 60 * time.Second
  21. retryInterval = 1 * time.Second
  22. nodeReapInterval = 24 * time.Hour
  23. nodeReapPeriod = 2 * time.Hour
  24. // considering a cluster with > 20 nodes and a drain speed of 100 msg/s
  25. // the following is roughly 1 minute
  26. maxQueueLenBroadcastOnSync = 500
  27. )
  28. type logWriter struct{}
  29. func (l *logWriter) Write(p []byte) (int, error) {
  30. str := string(p)
  31. str = strings.TrimSuffix(str, "\n")
  32. switch {
  33. case strings.HasPrefix(str, "[WARN] "):
  34. str = strings.TrimPrefix(str, "[WARN] ")
  35. logrus.Warn(str)
  36. case strings.HasPrefix(str, "[DEBUG] "):
  37. str = strings.TrimPrefix(str, "[DEBUG] ")
  38. logrus.Debug(str)
  39. case strings.HasPrefix(str, "[INFO] "):
  40. str = strings.TrimPrefix(str, "[INFO] ")
  41. logrus.Info(str)
  42. case strings.HasPrefix(str, "[ERR] "):
  43. str = strings.TrimPrefix(str, "[ERR] ")
  44. logrus.Warn(str)
  45. }
  46. return len(p), nil
  47. }
  48. // SetKey adds a new key to the key ring
  49. func (nDB *NetworkDB) SetKey(key []byte) {
  50. logrus.Debugf("Adding key %.5s", hex.EncodeToString(key))
  51. nDB.Lock()
  52. defer nDB.Unlock()
  53. for _, dbKey := range nDB.config.Keys {
  54. if bytes.Equal(key, dbKey) {
  55. return
  56. }
  57. }
  58. nDB.config.Keys = append(nDB.config.Keys, key)
  59. if nDB.keyring != nil {
  60. nDB.keyring.AddKey(key)
  61. }
  62. }
  63. // SetPrimaryKey sets the given key as the primary key. This should have
  64. // been added apriori through SetKey
  65. func (nDB *NetworkDB) SetPrimaryKey(key []byte) {
  66. logrus.Debugf("Primary Key %.5s", hex.EncodeToString(key))
  67. nDB.RLock()
  68. defer nDB.RUnlock()
  69. for _, dbKey := range nDB.config.Keys {
  70. if bytes.Equal(key, dbKey) {
  71. if nDB.keyring != nil {
  72. nDB.keyring.UseKey(dbKey)
  73. }
  74. break
  75. }
  76. }
  77. }
  78. // RemoveKey removes a key from the key ring. The key being removed
  79. // can't be the primary key
  80. func (nDB *NetworkDB) RemoveKey(key []byte) {
  81. logrus.Debugf("Remove Key %.5s", hex.EncodeToString(key))
  82. nDB.Lock()
  83. defer nDB.Unlock()
  84. for i, dbKey := range nDB.config.Keys {
  85. if bytes.Equal(key, dbKey) {
  86. nDB.config.Keys = append(nDB.config.Keys[:i], nDB.config.Keys[i+1:]...)
  87. if nDB.keyring != nil {
  88. nDB.keyring.RemoveKey(dbKey)
  89. }
  90. break
  91. }
  92. }
  93. }
  94. func (nDB *NetworkDB) clusterInit() error {
  95. nDB.lastStatsTimestamp = time.Now()
  96. nDB.lastHealthTimestamp = nDB.lastStatsTimestamp
  97. config := memberlist.DefaultLANConfig()
  98. config.Name = nDB.config.NodeID
  99. config.BindAddr = nDB.config.BindAddr
  100. config.AdvertiseAddr = nDB.config.AdvertiseAddr
  101. config.UDPBufferSize = nDB.config.PacketBufferSize
  102. if nDB.config.BindPort != 0 {
  103. config.BindPort = nDB.config.BindPort
  104. }
  105. config.ProtocolVersion = memberlist.ProtocolVersion2Compatible
  106. config.Delegate = &delegate{nDB: nDB}
  107. config.Events = &eventDelegate{nDB: nDB}
  108. // custom logger that does not add time or date, so they are not
  109. // duplicated by logrus
  110. config.Logger = log.New(&logWriter{}, "", 0)
  111. var err error
  112. if len(nDB.config.Keys) > 0 {
  113. for i, key := range nDB.config.Keys {
  114. logrus.Debugf("Encryption key %d: %.5s", i+1, hex.EncodeToString(key))
  115. }
  116. nDB.keyring, err = memberlist.NewKeyring(nDB.config.Keys, nDB.config.Keys[0])
  117. if err != nil {
  118. return err
  119. }
  120. config.Keyring = nDB.keyring
  121. }
  122. nDB.networkBroadcasts = &memberlist.TransmitLimitedQueue{
  123. NumNodes: func() int {
  124. nDB.RLock()
  125. num := len(nDB.nodes)
  126. nDB.RUnlock()
  127. return num
  128. },
  129. RetransmitMult: config.RetransmitMult,
  130. }
  131. nDB.nodeBroadcasts = &memberlist.TransmitLimitedQueue{
  132. NumNodes: func() int {
  133. nDB.RLock()
  134. num := len(nDB.nodes)
  135. nDB.RUnlock()
  136. return num
  137. },
  138. RetransmitMult: config.RetransmitMult,
  139. }
  140. mlist, err := memberlist.Create(config)
  141. if err != nil {
  142. return fmt.Errorf("failed to create memberlist: %v", err)
  143. }
  144. nDB.ctx, nDB.cancelCtx = context.WithCancel(context.Background())
  145. nDB.memberlist = mlist
  146. for _, trigger := range []struct {
  147. interval time.Duration
  148. fn func()
  149. }{
  150. {reapPeriod, nDB.reapState},
  151. {config.GossipInterval, nDB.gossip},
  152. {config.PushPullInterval, nDB.bulkSyncTables},
  153. {retryInterval, nDB.reconnectNode},
  154. {nodeReapPeriod, nDB.reapDeadNode},
  155. {rejoinInterval, nDB.rejoinClusterBootStrap},
  156. } {
  157. t := time.NewTicker(trigger.interval)
  158. go nDB.triggerFunc(trigger.interval, t.C, trigger.fn)
  159. nDB.tickers = append(nDB.tickers, t)
  160. }
  161. return nil
  162. }
  163. func (nDB *NetworkDB) retryJoin(ctx context.Context, members []string) {
  164. t := time.NewTicker(retryInterval)
  165. defer t.Stop()
  166. for {
  167. select {
  168. case <-t.C:
  169. if _, err := nDB.memberlist.Join(members); err != nil {
  170. logrus.Errorf("Failed to join memberlist %s on retry: %v", members, err)
  171. continue
  172. }
  173. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  174. logrus.Errorf("failed to send node join on retry: %v", err)
  175. continue
  176. }
  177. return
  178. case <-ctx.Done():
  179. return
  180. }
  181. }
  182. }
  183. func (nDB *NetworkDB) clusterJoin(members []string) error {
  184. mlist := nDB.memberlist
  185. if _, err := mlist.Join(members); err != nil {
  186. // In case of failure, we no longer need to explicitly call retryJoin.
  187. // rejoinClusterBootStrap, which runs every minute, will retryJoin for 10sec
  188. return fmt.Errorf("could not join node to memberlist: %v", err)
  189. }
  190. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  191. return fmt.Errorf("failed to send node join: %v", err)
  192. }
  193. return nil
  194. }
  195. func (nDB *NetworkDB) clusterLeave() error {
  196. mlist := nDB.memberlist
  197. if err := nDB.sendNodeEvent(NodeEventTypeLeave); err != nil {
  198. logrus.Errorf("failed to send node leave: %v", err)
  199. }
  200. if err := mlist.Leave(time.Second); err != nil {
  201. return err
  202. }
  203. // cancel the context
  204. nDB.cancelCtx()
  205. for _, t := range nDB.tickers {
  206. t.Stop()
  207. }
  208. return mlist.Shutdown()
  209. }
  210. func (nDB *NetworkDB) triggerFunc(stagger time.Duration, C <-chan time.Time, f func()) {
  211. // Use a random stagger to avoid synchronizing
  212. randStagger := time.Duration(uint64(rnd.Int63()) % uint64(stagger))
  213. select {
  214. case <-time.After(randStagger):
  215. case <-nDB.ctx.Done():
  216. return
  217. }
  218. for {
  219. select {
  220. case <-C:
  221. f()
  222. case <-nDB.ctx.Done():
  223. return
  224. }
  225. }
  226. }
  227. func (nDB *NetworkDB) reapDeadNode() {
  228. nDB.Lock()
  229. defer nDB.Unlock()
  230. for _, nodeMap := range []map[string]*node{
  231. nDB.failedNodes,
  232. nDB.leftNodes,
  233. } {
  234. for id, n := range nodeMap {
  235. if n.reapTime > nodeReapPeriod {
  236. n.reapTime -= nodeReapPeriod
  237. continue
  238. }
  239. logrus.Debugf("Garbage collect node %v", n.Name)
  240. delete(nodeMap, id)
  241. }
  242. }
  243. }
  244. // rejoinClusterBootStrap is called periodically to check if all bootStrap nodes are active in the cluster,
  245. // if not, call the cluster join to merge 2 separate clusters that are formed when all managers
  246. // stopped/started at the same time
  247. func (nDB *NetworkDB) rejoinClusterBootStrap() {
  248. nDB.RLock()
  249. if len(nDB.bootStrapIP) == 0 {
  250. nDB.RUnlock()
  251. return
  252. }
  253. myself, ok := nDB.nodes[nDB.config.NodeID]
  254. if !ok {
  255. nDB.RUnlock()
  256. logrus.Warnf("rejoinClusterBootstrap unable to find local node info using ID:%v", nDB.config.NodeID)
  257. return
  258. }
  259. bootStrapIPs := make([]string, 0, len(nDB.bootStrapIP))
  260. for _, bootIP := range nDB.bootStrapIP {
  261. // botostrap IPs are usually IP:port from the Join
  262. var bootstrapIP net.IP
  263. ipStr, _, err := net.SplitHostPort(bootIP)
  264. if err != nil {
  265. // try to parse it as an IP with port
  266. // Note this seems to be the case for swarm that do not specify any port
  267. ipStr = bootIP
  268. }
  269. bootstrapIP = net.ParseIP(ipStr)
  270. if bootstrapIP != nil {
  271. for _, node := range nDB.nodes {
  272. if node.Addr.Equal(bootstrapIP) && !node.Addr.Equal(myself.Addr) {
  273. // One of the bootstrap nodes (and not myself) is part of the cluster, return
  274. nDB.RUnlock()
  275. return
  276. }
  277. }
  278. bootStrapIPs = append(bootStrapIPs, bootIP)
  279. }
  280. }
  281. nDB.RUnlock()
  282. if len(bootStrapIPs) == 0 {
  283. // this will also avoid to call the Join with an empty list erasing the current bootstrap ip list
  284. logrus.Debug("rejoinClusterBootStrap did not find any valid IP")
  285. return
  286. }
  287. // None of the bootStrap nodes are in the cluster, call memberlist join
  288. logrus.Debugf("rejoinClusterBootStrap, calling cluster join with bootStrap %v", bootStrapIPs)
  289. ctx, cancel := context.WithTimeout(nDB.ctx, rejoinClusterDuration)
  290. defer cancel()
  291. nDB.retryJoin(ctx, bootStrapIPs)
  292. }
  293. func (nDB *NetworkDB) reconnectNode() {
  294. nDB.RLock()
  295. if len(nDB.failedNodes) == 0 {
  296. nDB.RUnlock()
  297. return
  298. }
  299. nodes := make([]*node, 0, len(nDB.failedNodes))
  300. for _, n := range nDB.failedNodes {
  301. nodes = append(nodes, n)
  302. }
  303. nDB.RUnlock()
  304. node := nodes[randomOffset(len(nodes))]
  305. addr := net.UDPAddr{IP: node.Addr, Port: int(node.Port)}
  306. if _, err := nDB.memberlist.Join([]string{addr.String()}); err != nil {
  307. return
  308. }
  309. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  310. return
  311. }
  312. logrus.Debugf("Initiating bulk sync with node %s after reconnect", node.Name)
  313. nDB.bulkSync([]string{node.Name}, true)
  314. }
  315. // For timing the entry deletion in the reaper APIs that doesn't use monotonic clock
  316. // source (time.Now, Sub etc.) should be avoided. Hence we use reapTime in every
  317. // entry which is set initially to reapInterval and decremented by reapPeriod every time
  318. // the reaper runs. NOTE nDB.reapTableEntries updates the reapTime with a readlock. This
  319. // is safe as long as no other concurrent path touches the reapTime field.
  320. func (nDB *NetworkDB) reapState() {
  321. // The reapTableEntries leverage the presence of the network so garbage collect entries first
  322. nDB.reapTableEntries()
  323. nDB.reapNetworks()
  324. }
  325. func (nDB *NetworkDB) reapNetworks() {
  326. nDB.Lock()
  327. for _, nn := range nDB.networks {
  328. for id, n := range nn {
  329. if n.leaving {
  330. if n.reapTime <= 0 {
  331. delete(nn, id)
  332. continue
  333. }
  334. n.reapTime -= reapPeriod
  335. }
  336. }
  337. }
  338. nDB.Unlock()
  339. }
  340. func (nDB *NetworkDB) reapTableEntries() {
  341. var nodeNetworks []string
  342. // This is best effort, if the list of network changes will be picked up in the next cycle
  343. nDB.RLock()
  344. for nid := range nDB.networks[nDB.config.NodeID] {
  345. nodeNetworks = append(nodeNetworks, nid)
  346. }
  347. nDB.RUnlock()
  348. cycleStart := time.Now()
  349. // In order to avoid blocking the database for a long time, apply the garbage collection logic by network
  350. // The lock is taken at the beginning of the cycle and the deletion is inline
  351. for _, nid := range nodeNetworks {
  352. nDB.Lock()
  353. nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), func(path string, v interface{}) bool {
  354. // timeCompensation compensate in case the lock took some time to be released
  355. timeCompensation := time.Since(cycleStart)
  356. entry, ok := v.(*entry)
  357. if !ok || !entry.deleting {
  358. return false
  359. }
  360. // In this check we are adding an extra 1 second to guarantee that when the number is truncated to int32 to fit the packet
  361. // for the tableEvent the number is always strictly > 1 and never 0
  362. if entry.reapTime > reapPeriod+timeCompensation+time.Second {
  363. entry.reapTime -= reapPeriod + timeCompensation
  364. return false
  365. }
  366. params := strings.Split(path[1:], "/")
  367. nid := params[0]
  368. tname := params[1]
  369. key := params[2]
  370. okTable, okNetwork := nDB.deleteEntry(nid, tname, key)
  371. if !okTable {
  372. logrus.Errorf("Table tree delete failed, entry with key:%s does not exists in the table:%s network:%s", key, tname, nid)
  373. }
  374. if !okNetwork {
  375. logrus.Errorf("Network tree delete failed, entry with key:%s does not exists in the network:%s table:%s", key, nid, tname)
  376. }
  377. return false
  378. })
  379. nDB.Unlock()
  380. }
  381. }
  382. func (nDB *NetworkDB) gossip() {
  383. networkNodes := make(map[string][]string)
  384. nDB.RLock()
  385. thisNodeNetworks := nDB.networks[nDB.config.NodeID]
  386. for nid := range thisNodeNetworks {
  387. networkNodes[nid] = nDB.networkNodes[nid]
  388. }
  389. printStats := time.Since(nDB.lastStatsTimestamp) >= nDB.config.StatsPrintPeriod
  390. printHealth := time.Since(nDB.lastHealthTimestamp) >= nDB.config.HealthPrintPeriod
  391. nDB.RUnlock()
  392. if printHealth {
  393. healthScore := nDB.memberlist.GetHealthScore()
  394. if healthScore != 0 {
  395. logrus.Warnf("NetworkDB stats %v(%v) - healthscore:%d (connectivity issues)", nDB.config.Hostname, nDB.config.NodeID, healthScore)
  396. }
  397. nDB.lastHealthTimestamp = time.Now()
  398. }
  399. for nid, nodes := range networkNodes {
  400. mNodes := nDB.mRandomNodes(3, nodes)
  401. bytesAvail := nDB.config.PacketBufferSize - compoundHeaderOverhead
  402. nDB.RLock()
  403. network, ok := thisNodeNetworks[nid]
  404. nDB.RUnlock()
  405. if !ok || network == nil {
  406. // It is normal for the network to be removed
  407. // between the time we collect the network
  408. // attachments of this node and processing
  409. // them here.
  410. continue
  411. }
  412. broadcastQ := network.tableBroadcasts
  413. if broadcastQ == nil {
  414. logrus.Errorf("Invalid broadcastQ encountered while gossiping for network %s", nid)
  415. continue
  416. }
  417. msgs := broadcastQ.GetBroadcasts(compoundOverhead, bytesAvail)
  418. // Collect stats and print the queue info, note this code is here also to have a view of the queues empty
  419. network.qMessagesSent += len(msgs)
  420. if printStats {
  421. logrus.Infof("NetworkDB stats %v(%v) - netID:%s leaving:%t netPeers:%d entries:%d Queue qLen:%d netMsg/s:%d",
  422. nDB.config.Hostname, nDB.config.NodeID,
  423. nid, network.leaving, broadcastQ.NumNodes(), network.entriesNumber, broadcastQ.NumQueued(),
  424. network.qMessagesSent/int((nDB.config.StatsPrintPeriod/time.Second)))
  425. network.qMessagesSent = 0
  426. }
  427. if len(msgs) == 0 {
  428. continue
  429. }
  430. // Create a compound message
  431. compound := makeCompoundMessage(msgs)
  432. for _, node := range mNodes {
  433. nDB.RLock()
  434. mnode := nDB.nodes[node]
  435. nDB.RUnlock()
  436. if mnode == nil {
  437. break
  438. }
  439. // Send the compound message
  440. if err := nDB.memberlist.SendBestEffort(&mnode.Node, compound); err != nil {
  441. logrus.Errorf("Failed to send gossip to %s: %s", mnode.Addr, err)
  442. }
  443. }
  444. }
  445. // Reset the stats
  446. if printStats {
  447. nDB.lastStatsTimestamp = time.Now()
  448. }
  449. }
  450. func (nDB *NetworkDB) bulkSyncTables() {
  451. var networks []string
  452. nDB.RLock()
  453. for nid, network := range nDB.networks[nDB.config.NodeID] {
  454. if network.leaving {
  455. continue
  456. }
  457. networks = append(networks, nid)
  458. }
  459. nDB.RUnlock()
  460. for {
  461. if len(networks) == 0 {
  462. break
  463. }
  464. nid := networks[0]
  465. networks = networks[1:]
  466. nDB.RLock()
  467. nodes := nDB.networkNodes[nid]
  468. nDB.RUnlock()
  469. // No peer nodes on this network. Move on.
  470. if len(nodes) == 0 {
  471. continue
  472. }
  473. completed, err := nDB.bulkSync(nodes, false)
  474. if err != nil {
  475. logrus.Errorf("periodic bulk sync failure for network %s: %v", nid, err)
  476. continue
  477. }
  478. // Remove all the networks for which we have
  479. // successfully completed bulk sync in this iteration.
  480. updatedNetworks := make([]string, 0, len(networks))
  481. for _, nid := range networks {
  482. var found bool
  483. for _, completedNid := range completed {
  484. if nid == completedNid {
  485. found = true
  486. break
  487. }
  488. }
  489. if !found {
  490. updatedNetworks = append(updatedNetworks, nid)
  491. }
  492. }
  493. networks = updatedNetworks
  494. }
  495. }
  496. func (nDB *NetworkDB) bulkSync(nodes []string, all bool) ([]string, error) {
  497. if !all {
  498. // Get 2 random nodes. 2nd node will be tried if the bulk sync to
  499. // 1st node fails.
  500. nodes = nDB.mRandomNodes(2, nodes)
  501. }
  502. if len(nodes) == 0 {
  503. return nil, nil
  504. }
  505. var err error
  506. var networks []string
  507. var success bool
  508. for _, node := range nodes {
  509. if node == nDB.config.NodeID {
  510. continue
  511. }
  512. logrus.Debugf("%v(%v): Initiating bulk sync with node %v", nDB.config.Hostname, nDB.config.NodeID, node)
  513. networks = nDB.findCommonNetworks(node)
  514. err = nDB.bulkSyncNode(networks, node, true)
  515. if err != nil {
  516. err = fmt.Errorf("bulk sync to node %s failed: %v", node, err)
  517. logrus.Warn(err.Error())
  518. } else {
  519. // bulk sync succeeded
  520. success = true
  521. // if its periodic bulksync stop after the first successful sync
  522. if !all {
  523. break
  524. }
  525. }
  526. }
  527. if success {
  528. // if at least one node sync succeeded
  529. return networks, nil
  530. }
  531. return nil, err
  532. }
  533. // Bulk sync all the table entries belonging to a set of networks to a
  534. // single peer node. It can be unsolicited or can be in response to an
  535. // unsolicited bulk sync
  536. func (nDB *NetworkDB) bulkSyncNode(networks []string, node string, unsolicited bool) error {
  537. var msgs [][]byte
  538. var unsolMsg string
  539. if unsolicited {
  540. unsolMsg = "unsolicited"
  541. }
  542. logrus.Debugf("%v(%v): Initiating %s bulk sync for networks %v with node %s",
  543. nDB.config.Hostname, nDB.config.NodeID, unsolMsg, networks, node)
  544. nDB.RLock()
  545. mnode := nDB.nodes[node]
  546. if mnode == nil {
  547. nDB.RUnlock()
  548. return nil
  549. }
  550. for _, nid := range networks {
  551. nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), func(path string, v interface{}) bool {
  552. entry, ok := v.(*entry)
  553. if !ok {
  554. return false
  555. }
  556. eType := TableEventTypeCreate
  557. if entry.deleting {
  558. eType = TableEventTypeDelete
  559. }
  560. params := strings.Split(path[1:], "/")
  561. tEvent := TableEvent{
  562. Type: eType,
  563. LTime: entry.ltime,
  564. NodeName: entry.node,
  565. NetworkID: nid,
  566. TableName: params[1],
  567. Key: params[2],
  568. Value: entry.value,
  569. // The duration in second is a float that below would be truncated
  570. ResidualReapTime: int32(entry.reapTime.Seconds()),
  571. }
  572. msg, err := encodeMessage(MessageTypeTableEvent, &tEvent)
  573. if err != nil {
  574. logrus.Errorf("Encode failure during bulk sync: %#v", tEvent)
  575. return false
  576. }
  577. msgs = append(msgs, msg)
  578. return false
  579. })
  580. }
  581. nDB.RUnlock()
  582. // Create a compound message
  583. compound := makeCompoundMessage(msgs)
  584. bsm := BulkSyncMessage{
  585. LTime: nDB.tableClock.Time(),
  586. Unsolicited: unsolicited,
  587. NodeName: nDB.config.NodeID,
  588. Networks: networks,
  589. Payload: compound,
  590. }
  591. buf, err := encodeMessage(MessageTypeBulkSync, &bsm)
  592. if err != nil {
  593. return fmt.Errorf("failed to encode bulk sync message: %v", err)
  594. }
  595. nDB.Lock()
  596. ch := make(chan struct{})
  597. nDB.bulkSyncAckTbl[node] = ch
  598. nDB.Unlock()
  599. err = nDB.memberlist.SendReliable(&mnode.Node, buf)
  600. if err != nil {
  601. nDB.Lock()
  602. delete(nDB.bulkSyncAckTbl, node)
  603. nDB.Unlock()
  604. return fmt.Errorf("failed to send a TCP message during bulk sync: %v", err)
  605. }
  606. // Wait on a response only if it is unsolicited.
  607. if unsolicited {
  608. startTime := time.Now()
  609. t := time.NewTimer(30 * time.Second)
  610. select {
  611. case <-t.C:
  612. logrus.Errorf("Bulk sync to node %s timed out", node)
  613. case <-ch:
  614. logrus.Debugf("%v(%v): Bulk sync to node %s took %s", nDB.config.Hostname, nDB.config.NodeID, node, time.Since(startTime))
  615. }
  616. t.Stop()
  617. }
  618. return nil
  619. }
  620. // Returns a random offset between 0 and n
  621. func randomOffset(n int) int {
  622. if n == 0 {
  623. return 0
  624. }
  625. val, err := rand.Int(rand.Reader, big.NewInt(int64(n)))
  626. if err != nil {
  627. logrus.Errorf("Failed to get a random offset: %v", err)
  628. return 0
  629. }
  630. return int(val.Int64())
  631. }
  632. // mRandomNodes is used to select up to m random nodes. It is possible
  633. // that less than m nodes are returned.
  634. func (nDB *NetworkDB) mRandomNodes(m int, nodes []string) []string {
  635. n := len(nodes)
  636. mNodes := make([]string, 0, m)
  637. OUTER:
  638. // Probe up to 3*n times, with large n this is not necessary
  639. // since k << n, but with small n we want search to be
  640. // exhaustive
  641. for i := 0; i < 3*n && len(mNodes) < m; i++ {
  642. // Get random node
  643. idx := randomOffset(n)
  644. node := nodes[idx]
  645. if node == nDB.config.NodeID {
  646. continue
  647. }
  648. // Check if we have this node already
  649. for j := 0; j < len(mNodes); j++ {
  650. if node == mNodes[j] {
  651. continue OUTER
  652. }
  653. }
  654. // Append the node
  655. mNodes = append(mNodes, node)
  656. }
  657. return mNodes
  658. }