core.go 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972
  1. package btf
  2. import (
  3. "encoding/binary"
  4. "errors"
  5. "fmt"
  6. "math"
  7. "reflect"
  8. "strconv"
  9. "strings"
  10. "github.com/cilium/ebpf/asm"
  11. )
  12. // Code in this file is derived from libbpf, which is available under a BSD
  13. // 2-Clause license.
  14. // COREFixup is the result of computing a CO-RE relocation for a target.
  15. type COREFixup struct {
  16. kind coreKind
  17. local uint32
  18. target uint32
  19. // True if there is no valid fixup. The instruction is replaced with an
  20. // invalid dummy.
  21. poison bool
  22. // True if the validation of the local value should be skipped. Used by
  23. // some kinds of bitfield relocations.
  24. skipLocalValidation bool
  25. }
  26. func (f *COREFixup) equal(other COREFixup) bool {
  27. return f.local == other.local && f.target == other.target
  28. }
  29. func (f *COREFixup) String() string {
  30. if f.poison {
  31. return fmt.Sprintf("%s=poison", f.kind)
  32. }
  33. return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target)
  34. }
  35. func (f *COREFixup) Apply(ins *asm.Instruction) error {
  36. if f.poison {
  37. const badRelo = 0xbad2310
  38. *ins = asm.BuiltinFunc(badRelo).Call()
  39. return nil
  40. }
  41. switch class := ins.OpCode.Class(); class {
  42. case asm.LdXClass, asm.StClass, asm.StXClass:
  43. if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset {
  44. return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local)
  45. }
  46. if f.target > math.MaxInt16 {
  47. return fmt.Errorf("offset %d exceeds MaxInt16", f.target)
  48. }
  49. ins.Offset = int16(f.target)
  50. case asm.LdClass:
  51. if !ins.IsConstantLoad(asm.DWord) {
  52. return fmt.Errorf("not a dword-sized immediate load")
  53. }
  54. if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant {
  55. return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f)
  56. }
  57. ins.Constant = int64(f.target)
  58. case asm.ALUClass:
  59. if ins.OpCode.ALUOp() == asm.Swap {
  60. return fmt.Errorf("relocation against swap")
  61. }
  62. fallthrough
  63. case asm.ALU64Class:
  64. if src := ins.OpCode.Source(); src != asm.ImmSource {
  65. return fmt.Errorf("invalid source %s", src)
  66. }
  67. if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant {
  68. return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins)
  69. }
  70. if f.target > math.MaxInt32 {
  71. return fmt.Errorf("immediate %d exceeds MaxInt32", f.target)
  72. }
  73. ins.Constant = int64(f.target)
  74. default:
  75. return fmt.Errorf("invalid class %s", class)
  76. }
  77. return nil
  78. }
  79. func (f COREFixup) isNonExistant() bool {
  80. return f.kind.checksForExistence() && f.target == 0
  81. }
  82. // coreKind is the type of CO-RE relocation as specified in BPF source code.
  83. type coreKind uint32
  84. const (
  85. reloFieldByteOffset coreKind = iota /* field byte offset */
  86. reloFieldByteSize /* field size in bytes */
  87. reloFieldExists /* field existence in target kernel */
  88. reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
  89. reloFieldLShiftU64 /* bitfield-specific left bitshift */
  90. reloFieldRShiftU64 /* bitfield-specific right bitshift */
  91. reloTypeIDLocal /* type ID in local BPF object */
  92. reloTypeIDTarget /* type ID in target kernel */
  93. reloTypeExists /* type existence in target kernel */
  94. reloTypeSize /* type size in bytes */
  95. reloEnumvalExists /* enum value existence in target kernel */
  96. reloEnumvalValue /* enum value integer value */
  97. )
  98. func (k coreKind) checksForExistence() bool {
  99. return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists
  100. }
  101. func (k coreKind) String() string {
  102. switch k {
  103. case reloFieldByteOffset:
  104. return "byte_off"
  105. case reloFieldByteSize:
  106. return "byte_sz"
  107. case reloFieldExists:
  108. return "field_exists"
  109. case reloFieldSigned:
  110. return "signed"
  111. case reloFieldLShiftU64:
  112. return "lshift_u64"
  113. case reloFieldRShiftU64:
  114. return "rshift_u64"
  115. case reloTypeIDLocal:
  116. return "local_type_id"
  117. case reloTypeIDTarget:
  118. return "target_type_id"
  119. case reloTypeExists:
  120. return "type_exists"
  121. case reloTypeSize:
  122. return "type_size"
  123. case reloEnumvalExists:
  124. return "enumval_exists"
  125. case reloEnumvalValue:
  126. return "enumval_value"
  127. default:
  128. return "unknown"
  129. }
  130. }
  131. // CORERelocate calculates the difference in types between local and target.
  132. //
  133. // Returns a list of fixups which can be applied to instructions to make them
  134. // match the target type(s).
  135. //
  136. // Fixups are returned in the order of relos, e.g. fixup[i] is the solution
  137. // for relos[i].
  138. func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, error) {
  139. if local.byteOrder != target.byteOrder {
  140. return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
  141. }
  142. type reloGroup struct {
  143. relos []*CORERelocation
  144. // Position of each relocation in relos.
  145. indices []int
  146. }
  147. // Split relocations into per Type lists.
  148. relosByType := make(map[Type]*reloGroup)
  149. result := make([]COREFixup, len(relos))
  150. for i, relo := range relos {
  151. if relo.kind == reloTypeIDLocal {
  152. // Filtering out reloTypeIDLocal here makes our lives a lot easier
  153. // down the line, since it doesn't have a target at all.
  154. if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
  155. return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
  156. }
  157. id, err := local.TypeID(relo.typ)
  158. if err != nil {
  159. return nil, fmt.Errorf("%s: %w", relo.kind, err)
  160. }
  161. result[i] = COREFixup{
  162. kind: relo.kind,
  163. local: uint32(id),
  164. target: uint32(id),
  165. }
  166. continue
  167. }
  168. group, ok := relosByType[relo.typ]
  169. if !ok {
  170. group = &reloGroup{}
  171. relosByType[relo.typ] = group
  172. }
  173. group.relos = append(group.relos, relo)
  174. group.indices = append(group.indices, i)
  175. }
  176. for localType, group := range relosByType {
  177. localTypeName := localType.TypeName()
  178. if localTypeName == "" {
  179. return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported)
  180. }
  181. targets := target.namedTypes[newEssentialName(localTypeName)]
  182. fixups, err := coreCalculateFixups(local, target, localType, targets, group.relos)
  183. if err != nil {
  184. return nil, fmt.Errorf("relocate %s: %w", localType, err)
  185. }
  186. for j, index := range group.indices {
  187. result[index] = fixups[j]
  188. }
  189. }
  190. return result, nil
  191. }
  192. var errAmbiguousRelocation = errors.New("ambiguous relocation")
  193. var errImpossibleRelocation = errors.New("impossible relocation")
  194. // coreCalculateFixups calculates the fixups for the given relocations using
  195. // the "best" target.
  196. //
  197. // The best target is determined by scoring: the less poisoning we have to do
  198. // the better the target is.
  199. func coreCalculateFixups(localSpec, targetSpec *Spec, local Type, targets []Type, relos []*CORERelocation) ([]COREFixup, error) {
  200. localID, err := localSpec.TypeID(local)
  201. if err != nil {
  202. return nil, fmt.Errorf("local type ID: %w", err)
  203. }
  204. local = Copy(local, UnderlyingType)
  205. bestScore := len(relos)
  206. var bestFixups []COREFixup
  207. for i := range targets {
  208. targetID, err := targetSpec.TypeID(targets[i])
  209. if err != nil {
  210. return nil, fmt.Errorf("target type ID: %w", err)
  211. }
  212. target := Copy(targets[i], UnderlyingType)
  213. score := 0 // lower is better
  214. fixups := make([]COREFixup, 0, len(relos))
  215. for _, relo := range relos {
  216. fixup, err := coreCalculateFixup(localSpec.byteOrder, local, localID, target, targetID, relo)
  217. if err != nil {
  218. return nil, fmt.Errorf("target %s: %w", target, err)
  219. }
  220. if fixup.poison || fixup.isNonExistant() {
  221. score++
  222. }
  223. fixups = append(fixups, fixup)
  224. }
  225. if score > bestScore {
  226. // We have a better target already, ignore this one.
  227. continue
  228. }
  229. if score < bestScore {
  230. // This is the best target yet, use it.
  231. bestScore = score
  232. bestFixups = fixups
  233. continue
  234. }
  235. // Some other target has the same score as the current one. Make sure
  236. // the fixups agree with each other.
  237. for i, fixup := range bestFixups {
  238. if !fixup.equal(fixups[i]) {
  239. return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation)
  240. }
  241. }
  242. }
  243. if bestFixups == nil {
  244. // Nothing at all matched, probably because there are no suitable
  245. // targets at all.
  246. //
  247. // Poison everything except checksForExistence.
  248. bestFixups = make([]COREFixup, len(relos))
  249. for i, relo := range relos {
  250. if relo.kind.checksForExistence() {
  251. bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0}
  252. } else {
  253. bestFixups[i] = COREFixup{kind: relo.kind, poison: true}
  254. }
  255. }
  256. }
  257. return bestFixups, nil
  258. }
  259. // coreCalculateFixup calculates the fixup for a single local type, target type
  260. // and relocation.
  261. func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID, target Type, targetID TypeID, relo *CORERelocation) (COREFixup, error) {
  262. fixup := func(local, target uint32) (COREFixup, error) {
  263. return COREFixup{kind: relo.kind, local: local, target: target}, nil
  264. }
  265. fixupWithoutValidation := func(local, target uint32) (COREFixup, error) {
  266. return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil
  267. }
  268. poison := func() (COREFixup, error) {
  269. if relo.kind.checksForExistence() {
  270. return fixup(1, 0)
  271. }
  272. return COREFixup{kind: relo.kind, poison: true}, nil
  273. }
  274. zero := COREFixup{}
  275. switch relo.kind {
  276. case reloTypeIDTarget, reloTypeSize, reloTypeExists:
  277. if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
  278. return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
  279. }
  280. err := coreAreTypesCompatible(local, target)
  281. if errors.Is(err, errImpossibleRelocation) {
  282. return poison()
  283. }
  284. if err != nil {
  285. return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
  286. }
  287. switch relo.kind {
  288. case reloTypeExists:
  289. return fixup(1, 1)
  290. case reloTypeIDTarget:
  291. return fixup(uint32(localID), uint32(targetID))
  292. case reloTypeSize:
  293. localSize, err := Sizeof(local)
  294. if err != nil {
  295. return zero, err
  296. }
  297. targetSize, err := Sizeof(target)
  298. if err != nil {
  299. return zero, err
  300. }
  301. return fixup(uint32(localSize), uint32(targetSize))
  302. }
  303. case reloEnumvalValue, reloEnumvalExists:
  304. localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target)
  305. if errors.Is(err, errImpossibleRelocation) {
  306. return poison()
  307. }
  308. if err != nil {
  309. return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
  310. }
  311. switch relo.kind {
  312. case reloEnumvalExists:
  313. return fixup(1, 1)
  314. case reloEnumvalValue:
  315. return fixup(uint32(localValue.Value), uint32(targetValue.Value))
  316. }
  317. case reloFieldSigned:
  318. switch local.(type) {
  319. case *Enum:
  320. return fixup(1, 1)
  321. case *Int:
  322. return fixup(
  323. uint32(local.(*Int).Encoding&Signed),
  324. uint32(target.(*Int).Encoding&Signed),
  325. )
  326. default:
  327. return fixupWithoutValidation(0, 0)
  328. }
  329. case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64:
  330. if _, ok := target.(*Fwd); ok {
  331. // We can't relocate fields using a forward declaration, so
  332. // skip it. If a non-forward declaration is present in the BTF
  333. // we'll find it in one of the other iterations.
  334. return poison()
  335. }
  336. localField, targetField, err := coreFindField(local, relo.accessor, target)
  337. if errors.Is(err, errImpossibleRelocation) {
  338. return poison()
  339. }
  340. if err != nil {
  341. return zero, fmt.Errorf("target %s: %w", target, err)
  342. }
  343. maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) {
  344. f.skipLocalValidation = localField.bitfieldSize > 0
  345. return f, err
  346. }
  347. switch relo.kind {
  348. case reloFieldExists:
  349. return fixup(1, 1)
  350. case reloFieldByteOffset:
  351. return maybeSkipValidation(fixup(localField.offset, targetField.offset))
  352. case reloFieldByteSize:
  353. localSize, err := Sizeof(localField.Type)
  354. if err != nil {
  355. return zero, err
  356. }
  357. targetSize, err := Sizeof(targetField.Type)
  358. if err != nil {
  359. return zero, err
  360. }
  361. return maybeSkipValidation(fixup(uint32(localSize), uint32(targetSize)))
  362. case reloFieldLShiftU64:
  363. var target uint32
  364. if byteOrder == binary.LittleEndian {
  365. targetSize, err := targetField.sizeBits()
  366. if err != nil {
  367. return zero, err
  368. }
  369. target = uint32(64 - targetField.bitfieldOffset - targetSize)
  370. } else {
  371. loadWidth, err := Sizeof(targetField.Type)
  372. if err != nil {
  373. return zero, err
  374. }
  375. target = uint32(64 - Bits(loadWidth*8) + targetField.bitfieldOffset)
  376. }
  377. return fixupWithoutValidation(0, target)
  378. case reloFieldRShiftU64:
  379. targetSize, err := targetField.sizeBits()
  380. if err != nil {
  381. return zero, err
  382. }
  383. return fixupWithoutValidation(0, uint32(64-targetSize))
  384. }
  385. }
  386. return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported)
  387. }
  388. /* coreAccessor contains a path through a struct. It contains at least one index.
  389. *
  390. * The interpretation depends on the kind of the relocation. The following is
  391. * taken from struct bpf_core_relo in libbpf_internal.h:
  392. *
  393. * - for field-based relocations, string encodes an accessed field using
  394. * a sequence of field and array indices, separated by colon (:). It's
  395. * conceptually very close to LLVM's getelementptr ([0]) instruction's
  396. * arguments for identifying offset to a field.
  397. * - for type-based relocations, strings is expected to be just "0";
  398. * - for enum value-based relocations, string contains an index of enum
  399. * value within its enum type;
  400. *
  401. * Example to provide a better feel.
  402. *
  403. * struct sample {
  404. * int a;
  405. * struct {
  406. * int b[10];
  407. * };
  408. * };
  409. *
  410. * struct sample s = ...;
  411. * int x = &s->a; // encoded as "0:0" (a is field #0)
  412. * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
  413. * // b is field #0 inside anon struct, accessing elem #5)
  414. * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
  415. */
  416. type coreAccessor []int
  417. func parseCOREAccessor(accessor string) (coreAccessor, error) {
  418. if accessor == "" {
  419. return nil, fmt.Errorf("empty accessor")
  420. }
  421. parts := strings.Split(accessor, ":")
  422. result := make(coreAccessor, 0, len(parts))
  423. for _, part := range parts {
  424. // 31 bits to avoid overflowing int on 32 bit platforms.
  425. index, err := strconv.ParseUint(part, 10, 31)
  426. if err != nil {
  427. return nil, fmt.Errorf("accessor index %q: %s", part, err)
  428. }
  429. result = append(result, int(index))
  430. }
  431. return result, nil
  432. }
  433. func (ca coreAccessor) String() string {
  434. strs := make([]string, 0, len(ca))
  435. for _, i := range ca {
  436. strs = append(strs, strconv.Itoa(i))
  437. }
  438. return strings.Join(strs, ":")
  439. }
  440. func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
  441. e, ok := t.(*Enum)
  442. if !ok {
  443. return nil, fmt.Errorf("not an enum: %s", t)
  444. }
  445. if len(ca) > 1 {
  446. return nil, fmt.Errorf("invalid accessor %s for enum", ca)
  447. }
  448. i := ca[0]
  449. if i >= len(e.Values) {
  450. return nil, fmt.Errorf("invalid index %d for %s", i, e)
  451. }
  452. return &e.Values[i], nil
  453. }
  454. // coreField represents the position of a "child" of a composite type from the
  455. // start of that type.
  456. //
  457. // /- start of composite
  458. // | offset * 8 | bitfieldOffset | bitfieldSize | ... |
  459. // \- start of field end of field -/
  460. type coreField struct {
  461. Type Type
  462. // The position of the field from the start of the composite type in bytes.
  463. offset uint32
  464. // The offset of the bitfield in bits from the start of the field.
  465. bitfieldOffset Bits
  466. // The size of the bitfield in bits.
  467. //
  468. // Zero if the field is not a bitfield.
  469. bitfieldSize Bits
  470. }
  471. func (cf *coreField) adjustOffsetToNthElement(n int) error {
  472. size, err := Sizeof(cf.Type)
  473. if err != nil {
  474. return err
  475. }
  476. cf.offset += uint32(n) * uint32(size)
  477. return nil
  478. }
  479. func (cf *coreField) adjustOffsetBits(offset Bits) error {
  480. align, err := alignof(cf.Type)
  481. if err != nil {
  482. return err
  483. }
  484. // We can compute the load offset by:
  485. // 1) converting the bit offset to bytes with a flooring division.
  486. // 2) dividing and multiplying that offset by the alignment, yielding the
  487. // load size aligned offset.
  488. offsetBytes := uint32(offset/8) / uint32(align) * uint32(align)
  489. // The number of bits remaining is the bit offset less the number of bits
  490. // we can "skip" with the aligned offset.
  491. cf.bitfieldOffset = offset - Bits(offsetBytes*8)
  492. // We know that cf.offset is aligned at to at least align since we get it
  493. // from the compiler via BTF. Adding an aligned offsetBytes preserves the
  494. // alignment.
  495. cf.offset += offsetBytes
  496. return nil
  497. }
  498. func (cf *coreField) sizeBits() (Bits, error) {
  499. if cf.bitfieldSize > 0 {
  500. return cf.bitfieldSize, nil
  501. }
  502. // Someone is trying to access a non-bitfield via a bit shift relocation.
  503. // This happens when a field changes from a bitfield to a regular field
  504. // between kernel versions. Synthesise the size to make the shifts work.
  505. size, err := Sizeof(cf.Type)
  506. if err != nil {
  507. return 0, nil
  508. }
  509. return Bits(size * 8), nil
  510. }
  511. // coreFindField descends into the local type using the accessor and tries to
  512. // find an equivalent field in target at each step.
  513. //
  514. // Returns the field and the offset of the field from the start of
  515. // target in bits.
  516. func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) {
  517. local := coreField{Type: localT}
  518. target := coreField{Type: targetT}
  519. // The first index is used to offset a pointer of the base type like
  520. // when accessing an array.
  521. if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil {
  522. return coreField{}, coreField{}, err
  523. }
  524. if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil {
  525. return coreField{}, coreField{}, err
  526. }
  527. if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
  528. return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
  529. }
  530. var localMaybeFlex, targetMaybeFlex bool
  531. for i, acc := range localAcc[1:] {
  532. switch localType := local.Type.(type) {
  533. case composite:
  534. // For composite types acc is used to find the field in the local type,
  535. // and then we try to find a field in target with the same name.
  536. localMembers := localType.members()
  537. if acc >= len(localMembers) {
  538. return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType)
  539. }
  540. localMember := localMembers[acc]
  541. if localMember.Name == "" {
  542. _, ok := localMember.Type.(composite)
  543. if !ok {
  544. return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported)
  545. }
  546. // This is an anonymous struct or union, ignore it.
  547. local = coreField{
  548. Type: localMember.Type,
  549. offset: local.offset + localMember.Offset.Bytes(),
  550. }
  551. localMaybeFlex = false
  552. continue
  553. }
  554. targetType, ok := target.Type.(composite)
  555. if !ok {
  556. return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
  557. }
  558. targetMember, last, err := coreFindMember(targetType, localMember.Name)
  559. if err != nil {
  560. return coreField{}, coreField{}, err
  561. }
  562. local = coreField{
  563. Type: localMember.Type,
  564. offset: local.offset,
  565. bitfieldSize: localMember.BitfieldSize,
  566. }
  567. localMaybeFlex = acc == len(localMembers)-1
  568. target = coreField{
  569. Type: targetMember.Type,
  570. offset: target.offset,
  571. bitfieldSize: targetMember.BitfieldSize,
  572. }
  573. targetMaybeFlex = last
  574. if local.bitfieldSize == 0 && target.bitfieldSize == 0 {
  575. local.offset += localMember.Offset.Bytes()
  576. target.offset += targetMember.Offset.Bytes()
  577. break
  578. }
  579. // Either of the members is a bitfield. Make sure we're at the
  580. // end of the accessor.
  581. if next := i + 1; next < len(localAcc[1:]) {
  582. return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield")
  583. }
  584. if err := local.adjustOffsetBits(localMember.Offset); err != nil {
  585. return coreField{}, coreField{}, err
  586. }
  587. if err := target.adjustOffsetBits(targetMember.Offset); err != nil {
  588. return coreField{}, coreField{}, err
  589. }
  590. case *Array:
  591. // For arrays, acc is the index in the target.
  592. targetType, ok := target.Type.(*Array)
  593. if !ok {
  594. return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
  595. }
  596. if localType.Nelems == 0 && !localMaybeFlex {
  597. return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array")
  598. }
  599. if targetType.Nelems == 0 && !targetMaybeFlex {
  600. return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array")
  601. }
  602. if localType.Nelems > 0 && acc >= int(localType.Nelems) {
  603. return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc)
  604. }
  605. if targetType.Nelems > 0 && acc >= int(targetType.Nelems) {
  606. return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation)
  607. }
  608. local = coreField{
  609. Type: localType.Type,
  610. offset: local.offset,
  611. }
  612. localMaybeFlex = false
  613. if err := local.adjustOffsetToNthElement(acc); err != nil {
  614. return coreField{}, coreField{}, err
  615. }
  616. target = coreField{
  617. Type: targetType.Type,
  618. offset: target.offset,
  619. }
  620. targetMaybeFlex = false
  621. if err := target.adjustOffsetToNthElement(acc); err != nil {
  622. return coreField{}, coreField{}, err
  623. }
  624. default:
  625. return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported)
  626. }
  627. if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
  628. return coreField{}, coreField{}, err
  629. }
  630. }
  631. return local, target, nil
  632. }
  633. // coreFindMember finds a member in a composite type while handling anonymous
  634. // structs and unions.
  635. func coreFindMember(typ composite, name string) (Member, bool, error) {
  636. if name == "" {
  637. return Member{}, false, errors.New("can't search for anonymous member")
  638. }
  639. type offsetTarget struct {
  640. composite
  641. offset Bits
  642. }
  643. targets := []offsetTarget{{typ, 0}}
  644. visited := make(map[composite]bool)
  645. for i := 0; i < len(targets); i++ {
  646. target := targets[i]
  647. // Only visit targets once to prevent infinite recursion.
  648. if visited[target] {
  649. continue
  650. }
  651. if len(visited) >= maxTypeDepth {
  652. // This check is different than libbpf, which restricts the entire
  653. // path to BPF_CORE_SPEC_MAX_LEN items.
  654. return Member{}, false, fmt.Errorf("type is nested too deep")
  655. }
  656. visited[target] = true
  657. members := target.members()
  658. for j, member := range members {
  659. if member.Name == name {
  660. // NB: This is safe because member is a copy.
  661. member.Offset += target.offset
  662. return member, j == len(members)-1, nil
  663. }
  664. // The names don't match, but this member could be an anonymous struct
  665. // or union.
  666. if member.Name != "" {
  667. continue
  668. }
  669. comp, ok := member.Type.(composite)
  670. if !ok {
  671. return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
  672. }
  673. targets = append(targets, offsetTarget{comp, target.offset + member.Offset})
  674. }
  675. }
  676. return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation)
  677. }
  678. // coreFindEnumValue follows localAcc to find the equivalent enum value in target.
  679. func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) {
  680. localValue, err := localAcc.enumValue(local)
  681. if err != nil {
  682. return nil, nil, err
  683. }
  684. targetEnum, ok := target.(*Enum)
  685. if !ok {
  686. return nil, nil, errImpossibleRelocation
  687. }
  688. localName := newEssentialName(localValue.Name)
  689. for i, targetValue := range targetEnum.Values {
  690. if newEssentialName(targetValue.Name) != localName {
  691. continue
  692. }
  693. return localValue, &targetEnum.Values[i], nil
  694. }
  695. return nil, nil, errImpossibleRelocation
  696. }
  697. /* The comment below is from bpf_core_types_are_compat in libbpf.c:
  698. *
  699. * Check local and target types for compatibility. This check is used for
  700. * type-based CO-RE relocations and follow slightly different rules than
  701. * field-based relocations. This function assumes that root types were already
  702. * checked for name match. Beyond that initial root-level name check, names
  703. * are completely ignored. Compatibility rules are as follows:
  704. * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
  705. * kind should match for local and target types (i.e., STRUCT is not
  706. * compatible with UNION);
  707. * - for ENUMs, the size is ignored;
  708. * - for INT, size and signedness are ignored;
  709. * - for ARRAY, dimensionality is ignored, element types are checked for
  710. * compatibility recursively;
  711. * - CONST/VOLATILE/RESTRICT modifiers are ignored;
  712. * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
  713. * - FUNC_PROTOs are compatible if they have compatible signature: same
  714. * number of input args and compatible return and argument types.
  715. * These rules are not set in stone and probably will be adjusted as we get
  716. * more experience with using BPF CO-RE relocations.
  717. *
  718. * Returns errImpossibleRelocation if types are not compatible.
  719. */
  720. func coreAreTypesCompatible(localType Type, targetType Type) error {
  721. var (
  722. localTs, targetTs typeDeque
  723. l, t = &localType, &targetType
  724. depth = 0
  725. )
  726. for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
  727. if depth >= maxTypeDepth {
  728. return errors.New("types are nested too deep")
  729. }
  730. localType = *l
  731. targetType = *t
  732. if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
  733. return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
  734. }
  735. switch lv := (localType).(type) {
  736. case *Void, *Struct, *Union, *Enum, *Fwd, *Int:
  737. // Nothing to do here
  738. case *Pointer, *Array:
  739. depth++
  740. localType.walk(&localTs)
  741. targetType.walk(&targetTs)
  742. case *FuncProto:
  743. tv := targetType.(*FuncProto)
  744. if len(lv.Params) != len(tv.Params) {
  745. return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation)
  746. }
  747. depth++
  748. localType.walk(&localTs)
  749. targetType.walk(&targetTs)
  750. default:
  751. return fmt.Errorf("unsupported type %T", localType)
  752. }
  753. }
  754. if l != nil {
  755. return fmt.Errorf("dangling local type %T", *l)
  756. }
  757. if t != nil {
  758. return fmt.Errorf("dangling target type %T", *t)
  759. }
  760. return nil
  761. }
  762. /* coreAreMembersCompatible checks two types for field-based relocation compatibility.
  763. *
  764. * The comment below is from bpf_core_fields_are_compat in libbpf.c:
  765. *
  766. * Check two types for compatibility for the purpose of field access
  767. * relocation. const/volatile/restrict and typedefs are skipped to ensure we
  768. * are relocating semantically compatible entities:
  769. * - any two STRUCTs/UNIONs are compatible and can be mixed;
  770. * - any two FWDs are compatible, if their names match (modulo flavor suffix);
  771. * - any two PTRs are always compatible;
  772. * - for ENUMs, names should be the same (ignoring flavor suffix) or at
  773. * least one of enums should be anonymous;
  774. * - for ENUMs, check sizes, names are ignored;
  775. * - for INT, size and signedness are ignored;
  776. * - any two FLOATs are always compatible;
  777. * - for ARRAY, dimensionality is ignored, element types are checked for
  778. * compatibility recursively;
  779. * [ NB: coreAreMembersCompatible doesn't recurse, this check is done
  780. * by coreFindField. ]
  781. * - everything else shouldn't be ever a target of relocation.
  782. * These rules are not set in stone and probably will be adjusted as we get
  783. * more experience with using BPF CO-RE relocations.
  784. *
  785. * Returns errImpossibleRelocation if the members are not compatible.
  786. */
  787. func coreAreMembersCompatible(localType Type, targetType Type) error {
  788. doNamesMatch := func(a, b string) error {
  789. if a == "" || b == "" {
  790. // allow anonymous and named type to match
  791. return nil
  792. }
  793. if newEssentialName(a) == newEssentialName(b) {
  794. return nil
  795. }
  796. return fmt.Errorf("names don't match: %w", errImpossibleRelocation)
  797. }
  798. _, lok := localType.(composite)
  799. _, tok := targetType.(composite)
  800. if lok && tok {
  801. return nil
  802. }
  803. if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
  804. return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
  805. }
  806. switch lv := localType.(type) {
  807. case *Array, *Pointer, *Float, *Int:
  808. return nil
  809. case *Enum:
  810. tv := targetType.(*Enum)
  811. return doNamesMatch(lv.Name, tv.Name)
  812. case *Fwd:
  813. tv := targetType.(*Fwd)
  814. return doNamesMatch(lv.Name, tv.Name)
  815. default:
  816. return fmt.Errorf("type %s: %w", localType, ErrNotSupported)
  817. }
  818. }