splunk.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. // Package splunk provides the log driver for forwarding server logs to
  2. // Splunk HTTP Event Collector endpoint.
  3. package splunk // import "github.com/docker/docker/daemon/logger/splunk"
  4. import (
  5. "bytes"
  6. "compress/gzip"
  7. "context"
  8. "crypto/tls"
  9. "crypto/x509"
  10. "encoding/json"
  11. "fmt"
  12. "io"
  13. "net/http"
  14. "net/url"
  15. "os"
  16. "strconv"
  17. "strings"
  18. "sync"
  19. "time"
  20. "github.com/containerd/log"
  21. "github.com/docker/docker/daemon/logger"
  22. "github.com/docker/docker/daemon/logger/loggerutils"
  23. "github.com/docker/docker/pkg/pools"
  24. "github.com/google/uuid"
  25. )
  26. const (
  27. driverName = "splunk"
  28. splunkURLKey = "splunk-url"
  29. splunkTokenKey = "splunk-token"
  30. splunkSourceKey = "splunk-source"
  31. splunkSourceTypeKey = "splunk-sourcetype"
  32. splunkIndexKey = "splunk-index"
  33. splunkCAPathKey = "splunk-capath"
  34. splunkCANameKey = "splunk-caname"
  35. splunkInsecureSkipVerifyKey = "splunk-insecureskipverify"
  36. splunkFormatKey = "splunk-format"
  37. splunkVerifyConnectionKey = "splunk-verify-connection" // #nosec G101 -- ignoring: Potential hardcoded credentials (gosec)
  38. splunkGzipCompressionKey = "splunk-gzip"
  39. splunkGzipCompressionLevelKey = "splunk-gzip-level"
  40. splunkIndexAcknowledgment = "splunk-index-acknowledgment"
  41. envKey = "env"
  42. envRegexKey = "env-regex"
  43. labelsKey = "labels"
  44. labelsRegexKey = "labels-regex"
  45. tagKey = "tag"
  46. )
  47. const (
  48. // How often do we send messages (if we are not reaching batch size)
  49. defaultPostMessagesFrequency = 5 * time.Second
  50. // How big can be batch of messages
  51. defaultPostMessagesBatchSize = 1000
  52. // Maximum number of messages we can store in buffer
  53. defaultBufferMaximum = 10 * defaultPostMessagesBatchSize
  54. // Number of messages allowed to be queued in the channel
  55. defaultStreamChannelSize = 4 * defaultPostMessagesBatchSize
  56. // maxResponseSize is the max amount that will be read from an http response
  57. maxResponseSize = 1024
  58. )
  59. const (
  60. envVarPostMessagesFrequency = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_FREQUENCY"
  61. envVarPostMessagesBatchSize = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_BATCH_SIZE"
  62. envVarBufferMaximum = "SPLUNK_LOGGING_DRIVER_BUFFER_MAX"
  63. envVarStreamChannelSize = "SPLUNK_LOGGING_DRIVER_CHANNEL_SIZE"
  64. )
  65. var batchSendTimeout = 30 * time.Second
  66. type splunkLoggerInterface interface {
  67. logger.Logger
  68. worker()
  69. }
  70. type splunkLogger struct {
  71. client *http.Client
  72. transport *http.Transport
  73. url string
  74. auth string
  75. nullMessage *splunkMessage
  76. // http compression
  77. gzipCompression bool
  78. gzipCompressionLevel int
  79. // Advanced options
  80. postMessagesFrequency time.Duration
  81. postMessagesBatchSize int
  82. bufferMaximum int
  83. indexAck bool
  84. // For synchronization between background worker and logger.
  85. // We use channel to send messages to worker go routine.
  86. // All other variables for blocking Close call before we flush all messages to HEC
  87. stream chan *splunkMessage
  88. lock sync.RWMutex
  89. closed bool
  90. closedCond *sync.Cond
  91. }
  92. type splunkLoggerInline struct {
  93. *splunkLogger
  94. nullEvent *splunkMessageEvent
  95. }
  96. type splunkLoggerJSON struct {
  97. *splunkLoggerInline
  98. }
  99. type splunkLoggerRaw struct {
  100. *splunkLogger
  101. prefix []byte
  102. }
  103. type splunkMessage struct {
  104. Event interface{} `json:"event"`
  105. Time string `json:"time"`
  106. Host string `json:"host"`
  107. Source string `json:"source,omitempty"`
  108. SourceType string `json:"sourcetype,omitempty"`
  109. Index string `json:"index,omitempty"`
  110. }
  111. type splunkMessageEvent struct {
  112. Line interface{} `json:"line"`
  113. Source string `json:"source"`
  114. Tag string `json:"tag,omitempty"`
  115. Attrs map[string]string `json:"attrs,omitempty"`
  116. }
  117. const (
  118. splunkFormatRaw = "raw"
  119. splunkFormatJSON = "json"
  120. splunkFormatInline = "inline"
  121. )
  122. func init() {
  123. if err := logger.RegisterLogDriver(driverName, New); err != nil {
  124. panic(err)
  125. }
  126. if err := logger.RegisterLogOptValidator(driverName, ValidateLogOpt); err != nil {
  127. panic(err)
  128. }
  129. }
  130. // New creates splunk logger driver using configuration passed in context
  131. func New(info logger.Info) (logger.Logger, error) {
  132. hostname, err := info.Hostname()
  133. if err != nil {
  134. return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName)
  135. }
  136. // Parse and validate Splunk URL
  137. splunkURL, err := parseURL(info)
  138. if err != nil {
  139. return nil, err
  140. }
  141. // Splunk Token is required parameter
  142. splunkToken, ok := info.Config[splunkTokenKey]
  143. if !ok {
  144. return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey)
  145. }
  146. // FIXME set minimum TLS version for splunk (see https://github.com/moby/moby/issues/42443)
  147. tlsConfig := &tls.Config{} //nolint: gosec // G402: TLS MinVersion too low.
  148. // Splunk is using autogenerated certificates by default,
  149. // allow users to trust them with skipping verification
  150. if insecureSkipVerifyStr, ok := info.Config[splunkInsecureSkipVerifyKey]; ok {
  151. insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr)
  152. if err != nil {
  153. return nil, err
  154. }
  155. tlsConfig.InsecureSkipVerify = insecureSkipVerify
  156. }
  157. // If path to the root certificate is provided - load it
  158. if caPath, ok := info.Config[splunkCAPathKey]; ok {
  159. caCert, err := os.ReadFile(caPath)
  160. if err != nil {
  161. return nil, err
  162. }
  163. caPool := x509.NewCertPool()
  164. caPool.AppendCertsFromPEM(caCert)
  165. tlsConfig.RootCAs = caPool
  166. }
  167. if caName, ok := info.Config[splunkCANameKey]; ok {
  168. tlsConfig.ServerName = caName
  169. }
  170. gzipCompression := false
  171. if gzipCompressionStr, ok := info.Config[splunkGzipCompressionKey]; ok {
  172. gzipCompression, err = strconv.ParseBool(gzipCompressionStr)
  173. if err != nil {
  174. return nil, err
  175. }
  176. }
  177. gzipCompressionLevel := gzip.DefaultCompression
  178. if gzipCompressionLevelStr, ok := info.Config[splunkGzipCompressionLevelKey]; ok {
  179. var err error
  180. gzipCompressionLevel64, err := strconv.ParseInt(gzipCompressionLevelStr, 10, 32)
  181. if err != nil {
  182. return nil, err
  183. }
  184. gzipCompressionLevel = int(gzipCompressionLevel64)
  185. if gzipCompressionLevel < gzip.DefaultCompression || gzipCompressionLevel > gzip.BestCompression {
  186. err := fmt.Errorf("not supported level '%s' for %s (supported values between %d and %d)",
  187. gzipCompressionLevelStr, splunkGzipCompressionLevelKey, gzip.DefaultCompression, gzip.BestCompression)
  188. return nil, err
  189. }
  190. }
  191. indexAck := false
  192. if indexAckStr, ok := info.Config[splunkIndexAcknowledgment]; ok {
  193. indexAck, err = strconv.ParseBool(indexAckStr)
  194. if err != nil {
  195. return nil, err
  196. }
  197. }
  198. transport := &http.Transport{
  199. TLSClientConfig: tlsConfig,
  200. Proxy: http.ProxyFromEnvironment,
  201. }
  202. client := &http.Client{
  203. Transport: transport,
  204. }
  205. source := info.Config[splunkSourceKey]
  206. sourceType := info.Config[splunkSourceTypeKey]
  207. index := info.Config[splunkIndexKey]
  208. nullMessage := &splunkMessage{
  209. Host: hostname,
  210. Source: source,
  211. SourceType: sourceType,
  212. Index: index,
  213. }
  214. // Allow user to remove tag from the messages by setting tag to empty string
  215. tag := ""
  216. if tagTemplate, ok := info.Config[tagKey]; !ok || tagTemplate != "" {
  217. tag, err = loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate)
  218. if err != nil {
  219. return nil, err
  220. }
  221. }
  222. attrs, err := info.ExtraAttributes(nil)
  223. if err != nil {
  224. return nil, err
  225. }
  226. var (
  227. postMessagesFrequency = getAdvancedOptionDuration(envVarPostMessagesFrequency, defaultPostMessagesFrequency)
  228. postMessagesBatchSize = getAdvancedOptionInt(envVarPostMessagesBatchSize, defaultPostMessagesBatchSize)
  229. bufferMaximum = getAdvancedOptionInt(envVarBufferMaximum, defaultBufferMaximum)
  230. streamChannelSize = getAdvancedOptionInt(envVarStreamChannelSize, defaultStreamChannelSize)
  231. )
  232. logger := &splunkLogger{
  233. client: client,
  234. transport: transport,
  235. url: splunkURL.String(),
  236. auth: "Splunk " + splunkToken,
  237. nullMessage: nullMessage,
  238. gzipCompression: gzipCompression,
  239. gzipCompressionLevel: gzipCompressionLevel,
  240. stream: make(chan *splunkMessage, streamChannelSize),
  241. postMessagesFrequency: postMessagesFrequency,
  242. postMessagesBatchSize: postMessagesBatchSize,
  243. bufferMaximum: bufferMaximum,
  244. indexAck: indexAck,
  245. }
  246. // By default we verify connection, but we allow use to skip that
  247. verifyConnection := true
  248. if verifyConnectionStr, ok := info.Config[splunkVerifyConnectionKey]; ok {
  249. var err error
  250. verifyConnection, err = strconv.ParseBool(verifyConnectionStr)
  251. if err != nil {
  252. return nil, err
  253. }
  254. }
  255. if verifyConnection {
  256. err = verifySplunkConnection(logger)
  257. if err != nil {
  258. return nil, err
  259. }
  260. }
  261. var splunkFormat string
  262. if splunkFormatParsed, ok := info.Config[splunkFormatKey]; ok {
  263. switch splunkFormatParsed {
  264. case splunkFormatInline:
  265. case splunkFormatJSON:
  266. case splunkFormatRaw:
  267. default:
  268. return nil, fmt.Errorf("Unknown format specified %s, supported formats are inline, json and raw", splunkFormat)
  269. }
  270. splunkFormat = splunkFormatParsed
  271. } else {
  272. splunkFormat = splunkFormatInline
  273. }
  274. var loggerWrapper splunkLoggerInterface
  275. switch splunkFormat {
  276. case splunkFormatInline:
  277. nullEvent := &splunkMessageEvent{
  278. Tag: tag,
  279. Attrs: attrs,
  280. }
  281. loggerWrapper = &splunkLoggerInline{logger, nullEvent}
  282. case splunkFormatJSON:
  283. nullEvent := &splunkMessageEvent{
  284. Tag: tag,
  285. Attrs: attrs,
  286. }
  287. loggerWrapper = &splunkLoggerJSON{&splunkLoggerInline{logger, nullEvent}}
  288. case splunkFormatRaw:
  289. var prefix bytes.Buffer
  290. if tag != "" {
  291. prefix.WriteString(tag)
  292. prefix.WriteString(" ")
  293. }
  294. for key, value := range attrs {
  295. prefix.WriteString(key)
  296. prefix.WriteString("=")
  297. prefix.WriteString(value)
  298. prefix.WriteString(" ")
  299. }
  300. loggerWrapper = &splunkLoggerRaw{logger, prefix.Bytes()}
  301. default:
  302. return nil, fmt.Errorf("Unexpected format %s", splunkFormat)
  303. }
  304. go loggerWrapper.worker()
  305. return loggerWrapper, nil
  306. }
  307. func (l *splunkLoggerInline) Log(msg *logger.Message) error {
  308. message := l.createSplunkMessage(msg)
  309. event := *l.nullEvent
  310. event.Line = string(msg.Line)
  311. event.Source = msg.Source
  312. message.Event = &event
  313. logger.PutMessage(msg)
  314. return l.queueMessageAsync(message)
  315. }
  316. func (l *splunkLoggerJSON) Log(msg *logger.Message) error {
  317. message := l.createSplunkMessage(msg)
  318. event := *l.nullEvent
  319. var rawJSONMessage json.RawMessage
  320. if err := json.Unmarshal(msg.Line, &rawJSONMessage); err == nil {
  321. event.Line = &rawJSONMessage
  322. } else {
  323. event.Line = string(msg.Line)
  324. }
  325. event.Source = msg.Source
  326. message.Event = &event
  327. logger.PutMessage(msg)
  328. return l.queueMessageAsync(message)
  329. }
  330. func (l *splunkLoggerRaw) Log(msg *logger.Message) error {
  331. // empty or whitespace-only messages are not accepted by HEC
  332. if strings.TrimSpace(string(msg.Line)) == "" {
  333. return nil
  334. }
  335. message := l.createSplunkMessage(msg)
  336. message.Event = string(append(l.prefix, msg.Line...))
  337. logger.PutMessage(msg)
  338. return l.queueMessageAsync(message)
  339. }
  340. func (l *splunkLogger) queueMessageAsync(message *splunkMessage) error {
  341. l.lock.RLock()
  342. defer l.lock.RUnlock()
  343. if l.closedCond != nil {
  344. return fmt.Errorf("%s: driver is closed", driverName)
  345. }
  346. l.stream <- message
  347. return nil
  348. }
  349. func (l *splunkLogger) worker() {
  350. timer := time.NewTicker(l.postMessagesFrequency)
  351. var messages []*splunkMessage
  352. for {
  353. select {
  354. case message, open := <-l.stream:
  355. if !open {
  356. l.postMessages(messages, true)
  357. l.lock.Lock()
  358. defer l.lock.Unlock()
  359. l.transport.CloseIdleConnections()
  360. l.closed = true
  361. l.closedCond.Signal()
  362. return
  363. }
  364. messages = append(messages, message)
  365. // Only sending when we get exactly to the batch size,
  366. // This also helps not to fire postMessages on every new message,
  367. // when previous try failed.
  368. if len(messages)%l.postMessagesBatchSize == 0 {
  369. messages = l.postMessages(messages, false)
  370. }
  371. case <-timer.C:
  372. messages = l.postMessages(messages, false)
  373. }
  374. }
  375. }
  376. func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) []*splunkMessage {
  377. messagesLen := len(messages)
  378. ctx, cancel := context.WithTimeout(context.Background(), batchSendTimeout)
  379. defer cancel()
  380. for i := 0; i < messagesLen; i += l.postMessagesBatchSize {
  381. upperBound := i + l.postMessagesBatchSize
  382. if upperBound > messagesLen {
  383. upperBound = messagesLen
  384. }
  385. if err := l.tryPostMessages(ctx, messages[i:upperBound]); err != nil {
  386. log.G(ctx).WithError(err).WithField("module", "logger/splunk").Warn("Error while sending logs")
  387. if messagesLen-i >= l.bufferMaximum || lastChance {
  388. // If this is last chance - print them all to the daemon log
  389. if lastChance {
  390. upperBound = messagesLen
  391. }
  392. // Not all sent, but buffer has got to its maximum, let's log all messages
  393. // we could not send and return buffer minus one batch size
  394. for j := i; j < upperBound; j++ {
  395. if jsonEvent, err := json.Marshal(messages[j]); err != nil {
  396. log.G(ctx).Error(err)
  397. } else {
  398. log.G(ctx).Error(fmt.Errorf("Failed to send a message '%s'", string(jsonEvent)))
  399. }
  400. }
  401. return messages[upperBound:messagesLen]
  402. }
  403. // Not all sent, returning buffer from where we have not sent messages
  404. return messages[i:messagesLen]
  405. }
  406. }
  407. // All sent, return empty buffer
  408. return messages[:0]
  409. }
  410. func (l *splunkLogger) tryPostMessages(ctx context.Context, messages []*splunkMessage) error {
  411. if len(messages) == 0 {
  412. return nil
  413. }
  414. var buffer bytes.Buffer
  415. var writer io.Writer
  416. var gzipWriter *gzip.Writer
  417. var err error
  418. // If gzip compression is enabled - create gzip writer with specified compression
  419. // level. If gzip compression is disabled, use standard buffer as a writer
  420. if l.gzipCompression {
  421. gzipWriter, err = gzip.NewWriterLevel(&buffer, l.gzipCompressionLevel)
  422. if err != nil {
  423. return err
  424. }
  425. writer = gzipWriter
  426. } else {
  427. writer = &buffer
  428. }
  429. for _, message := range messages {
  430. jsonEvent, err := json.Marshal(message)
  431. if err != nil {
  432. return err
  433. }
  434. if _, err := writer.Write(jsonEvent); err != nil {
  435. return err
  436. }
  437. }
  438. // If gzip compression is enabled, tell it, that we are done
  439. if l.gzipCompression {
  440. err = gzipWriter.Close()
  441. if err != nil {
  442. return err
  443. }
  444. }
  445. req, err := http.NewRequest(http.MethodPost, l.url, bytes.NewBuffer(buffer.Bytes()))
  446. if err != nil {
  447. return err
  448. }
  449. req = req.WithContext(ctx)
  450. req.Header.Set("Authorization", l.auth)
  451. // Tell if we are sending gzip compressed body
  452. if l.gzipCompression {
  453. req.Header.Set("Content-Encoding", "gzip")
  454. }
  455. // Set the correct header if index acknowledgment is enabled
  456. if l.indexAck {
  457. requestChannel, err := uuid.NewRandom()
  458. if err != nil {
  459. return err
  460. }
  461. req.Header.Set("X-Splunk-Request-Channel", requestChannel.String())
  462. }
  463. resp, err := l.client.Do(req)
  464. if err != nil {
  465. return err
  466. }
  467. defer func() {
  468. pools.Copy(io.Discard, resp.Body)
  469. resp.Body.Close()
  470. }()
  471. if resp.StatusCode != http.StatusOK {
  472. rdr := io.LimitReader(resp.Body, maxResponseSize)
  473. body, err := io.ReadAll(rdr)
  474. if err != nil {
  475. return err
  476. }
  477. return fmt.Errorf("%s: failed to send event - %s - %s", driverName, resp.Status, string(body))
  478. }
  479. return nil
  480. }
  481. func (l *splunkLogger) Close() error {
  482. l.lock.Lock()
  483. defer l.lock.Unlock()
  484. if l.closedCond == nil {
  485. l.closedCond = sync.NewCond(&l.lock)
  486. close(l.stream)
  487. for !l.closed {
  488. l.closedCond.Wait()
  489. }
  490. }
  491. return nil
  492. }
  493. func (l *splunkLogger) Name() string {
  494. return driverName
  495. }
  496. func (l *splunkLogger) createSplunkMessage(msg *logger.Message) *splunkMessage {
  497. message := *l.nullMessage
  498. message.Time = fmt.Sprintf("%f", float64(msg.Timestamp.UnixNano())/float64(time.Second))
  499. return &message
  500. }
  501. // ValidateLogOpt looks for all supported by splunk driver options
  502. func ValidateLogOpt(cfg map[string]string) error {
  503. for key := range cfg {
  504. switch key {
  505. case splunkURLKey:
  506. case splunkTokenKey:
  507. case splunkSourceKey:
  508. case splunkSourceTypeKey:
  509. case splunkIndexKey:
  510. case splunkCAPathKey:
  511. case splunkCANameKey:
  512. case splunkInsecureSkipVerifyKey:
  513. case splunkFormatKey:
  514. case splunkVerifyConnectionKey:
  515. case splunkGzipCompressionKey:
  516. case splunkGzipCompressionLevelKey:
  517. case splunkIndexAcknowledgment:
  518. case envKey:
  519. case envRegexKey:
  520. case labelsKey:
  521. case labelsRegexKey:
  522. case tagKey:
  523. default:
  524. return fmt.Errorf("unknown log opt '%s' for %s log driver", key, driverName)
  525. }
  526. }
  527. return nil
  528. }
  529. func parseURL(info logger.Info) (*url.URL, error) {
  530. splunkURLStr, ok := info.Config[splunkURLKey]
  531. if !ok {
  532. return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey)
  533. }
  534. splunkURL, err := url.Parse(splunkURLStr)
  535. if err != nil {
  536. return nil, fmt.Errorf("%s: failed to parse %s as url value in %s", driverName, splunkURLStr, splunkURLKey)
  537. }
  538. if !splunkURL.IsAbs() ||
  539. (splunkURL.Scheme != "http" && splunkURL.Scheme != "https") ||
  540. (splunkURL.Path != "" && splunkURL.Path != "/") ||
  541. splunkURL.RawQuery != "" ||
  542. splunkURL.Fragment != "" {
  543. return nil, fmt.Errorf("%s: expected format scheme://dns_name_or_ip:port for %s", driverName, splunkURLKey)
  544. }
  545. splunkURL.Path = "/services/collector/event/1.0"
  546. return splunkURL, nil
  547. }
  548. func verifySplunkConnection(l *splunkLogger) error {
  549. req, err := http.NewRequest(http.MethodOptions, l.url, nil)
  550. if err != nil {
  551. return err
  552. }
  553. resp, err := l.client.Do(req)
  554. if err != nil {
  555. return err
  556. }
  557. defer func() {
  558. pools.Copy(io.Discard, resp.Body)
  559. resp.Body.Close()
  560. }()
  561. if resp.StatusCode != http.StatusOK {
  562. rdr := io.LimitReader(resp.Body, maxResponseSize)
  563. body, err := io.ReadAll(rdr)
  564. if err != nil {
  565. return err
  566. }
  567. return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, resp.Status, string(body))
  568. }
  569. return nil
  570. }
  571. func getAdvancedOptionDuration(envName string, defaultValue time.Duration) time.Duration {
  572. valueStr := os.Getenv(envName)
  573. if valueStr == "" {
  574. return defaultValue
  575. }
  576. parsedValue, err := time.ParseDuration(valueStr)
  577. if err != nil {
  578. log.G(context.TODO()).Error(fmt.Sprintf("Failed to parse value of %s as duration. Using default %v. %v", envName, defaultValue, err))
  579. return defaultValue
  580. }
  581. return parsedValue
  582. }
  583. func getAdvancedOptionInt(envName string, defaultValue int) int {
  584. valueStr := os.Getenv(envName)
  585. if valueStr == "" {
  586. return defaultValue
  587. }
  588. parsedValue, err := strconv.ParseInt(valueStr, 10, 32)
  589. if err != nil {
  590. log.G(context.TODO()).Error(fmt.Sprintf("Failed to parse value of %s as integer. Using default %d. %v", envName, defaultValue, err))
  591. return defaultValue
  592. }
  593. return int(parsedValue)
  594. }