|
@@ -3,43 +3,160 @@ package btf
|
|
|
import (
|
|
|
"errors"
|
|
|
"fmt"
|
|
|
+ "math"
|
|
|
"reflect"
|
|
|
+ "sort"
|
|
|
"strconv"
|
|
|
"strings"
|
|
|
+
|
|
|
+ "github.com/cilium/ebpf/asm"
|
|
|
)
|
|
|
|
|
|
// Code in this file is derived from libbpf, which is available under a BSD
|
|
|
// 2-Clause license.
|
|
|
|
|
|
-// Relocation describes a CO-RE relocation.
|
|
|
-type Relocation struct {
|
|
|
- Current uint32
|
|
|
- New uint32
|
|
|
+// COREFixup is the result of computing a CO-RE relocation for a target.
|
|
|
+type COREFixup struct {
|
|
|
+ Kind COREKind
|
|
|
+ Local uint32
|
|
|
+ Target uint32
|
|
|
+ Poison bool
|
|
|
+}
|
|
|
+
|
|
|
+func (f COREFixup) equal(other COREFixup) bool {
|
|
|
+ return f.Local == other.Local && f.Target == other.Target
|
|
|
+}
|
|
|
+
|
|
|
+func (f COREFixup) String() string {
|
|
|
+ if f.Poison {
|
|
|
+ return fmt.Sprintf("%s=poison", f.Kind)
|
|
|
+ }
|
|
|
+ return fmt.Sprintf("%s=%d->%d", f.Kind, f.Local, f.Target)
|
|
|
+}
|
|
|
+
|
|
|
+func (f COREFixup) apply(ins *asm.Instruction) error {
|
|
|
+ if f.Poison {
|
|
|
+ return errors.New("can't poison individual instruction")
|
|
|
+ }
|
|
|
+
|
|
|
+ switch class := ins.OpCode.Class(); class {
|
|
|
+ case asm.LdXClass, asm.StClass, asm.StXClass:
|
|
|
+ if want := int16(f.Local); want != ins.Offset {
|
|
|
+ return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, want)
|
|
|
+ }
|
|
|
+
|
|
|
+ if f.Target > math.MaxInt16 {
|
|
|
+ return fmt.Errorf("offset %d exceeds MaxInt16", f.Target)
|
|
|
+ }
|
|
|
+
|
|
|
+ ins.Offset = int16(f.Target)
|
|
|
+
|
|
|
+ case asm.LdClass:
|
|
|
+ if !ins.IsConstantLoad(asm.DWord) {
|
|
|
+ return fmt.Errorf("not a dword-sized immediate load")
|
|
|
+ }
|
|
|
+
|
|
|
+ if want := int64(f.Local); want != ins.Constant {
|
|
|
+ return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want)
|
|
|
+ }
|
|
|
+
|
|
|
+ ins.Constant = int64(f.Target)
|
|
|
+
|
|
|
+ case asm.ALUClass:
|
|
|
+ if ins.OpCode.ALUOp() == asm.Swap {
|
|
|
+ return fmt.Errorf("relocation against swap")
|
|
|
+ }
|
|
|
+
|
|
|
+ fallthrough
|
|
|
+
|
|
|
+ case asm.ALU64Class:
|
|
|
+ if src := ins.OpCode.Source(); src != asm.ImmSource {
|
|
|
+ return fmt.Errorf("invalid source %s", src)
|
|
|
+ }
|
|
|
+
|
|
|
+ if want := int64(f.Local); want != ins.Constant {
|
|
|
+ return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want)
|
|
|
+ }
|
|
|
+
|
|
|
+ if f.Target > math.MaxInt32 {
|
|
|
+ return fmt.Errorf("immediate %d exceeds MaxInt32", f.Target)
|
|
|
+ }
|
|
|
+
|
|
|
+ ins.Constant = int64(f.Target)
|
|
|
+
|
|
|
+ default:
|
|
|
+ return fmt.Errorf("invalid class %s", class)
|
|
|
+ }
|
|
|
+
|
|
|
+ return nil
|
|
|
}
|
|
|
|
|
|
-func (r Relocation) equal(other Relocation) bool {
|
|
|
- return r.Current == other.Current && r.New == other.New
|
|
|
+func (f COREFixup) isNonExistant() bool {
|
|
|
+ return f.Kind.checksForExistence() && f.Target == 0
|
|
|
}
|
|
|
|
|
|
-// coreReloKind is the type of CO-RE relocation
|
|
|
-type coreReloKind uint32
|
|
|
+type COREFixups map[uint64]COREFixup
|
|
|
+
|
|
|
+// Apply a set of CO-RE relocations to a BPF program.
|
|
|
+func (fs COREFixups) Apply(insns asm.Instructions) (asm.Instructions, error) {
|
|
|
+ if len(fs) == 0 {
|
|
|
+ cpy := make(asm.Instructions, len(insns))
|
|
|
+ copy(cpy, insns)
|
|
|
+ return insns, nil
|
|
|
+ }
|
|
|
+
|
|
|
+ cpy := make(asm.Instructions, 0, len(insns))
|
|
|
+ iter := insns.Iterate()
|
|
|
+ for iter.Next() {
|
|
|
+ fixup, ok := fs[iter.Offset.Bytes()]
|
|
|
+ if !ok {
|
|
|
+ cpy = append(cpy, *iter.Ins)
|
|
|
+ continue
|
|
|
+ }
|
|
|
+
|
|
|
+ ins := *iter.Ins
|
|
|
+ if fixup.Poison {
|
|
|
+ const badRelo = asm.BuiltinFunc(0xbad2310)
|
|
|
+
|
|
|
+ cpy = append(cpy, badRelo.Call())
|
|
|
+ if ins.OpCode.IsDWordLoad() {
|
|
|
+ // 64 bit constant loads occupy two raw bpf instructions, so
|
|
|
+ // we need to add another instruction as padding.
|
|
|
+ cpy = append(cpy, badRelo.Call())
|
|
|
+ }
|
|
|
+
|
|
|
+ continue
|
|
|
+ }
|
|
|
+
|
|
|
+ if err := fixup.apply(&ins); err != nil {
|
|
|
+ return nil, fmt.Errorf("instruction %d, offset %d: %s: %w", iter.Index, iter.Offset.Bytes(), fixup.Kind, err)
|
|
|
+ }
|
|
|
+
|
|
|
+ cpy = append(cpy, ins)
|
|
|
+ }
|
|
|
+
|
|
|
+ return cpy, nil
|
|
|
+}
|
|
|
+
|
|
|
+// COREKind is the type of CO-RE relocation
|
|
|
+type COREKind uint32
|
|
|
|
|
|
const (
|
|
|
- reloFieldByteOffset coreReloKind = iota /* field byte offset */
|
|
|
- reloFieldByteSize /* field size in bytes */
|
|
|
- reloFieldExists /* field existence in target kernel */
|
|
|
- reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
|
|
|
- reloFieldLShiftU64 /* bitfield-specific left bitshift */
|
|
|
- reloFieldRShiftU64 /* bitfield-specific right bitshift */
|
|
|
- reloTypeIDLocal /* type ID in local BPF object */
|
|
|
- reloTypeIDTarget /* type ID in target kernel */
|
|
|
- reloTypeExists /* type existence in target kernel */
|
|
|
- reloTypeSize /* type size in bytes */
|
|
|
- reloEnumvalExists /* enum value existence in target kernel */
|
|
|
- reloEnumvalValue /* enum value integer value */
|
|
|
+ reloFieldByteOffset COREKind = iota /* field byte offset */
|
|
|
+ reloFieldByteSize /* field size in bytes */
|
|
|
+ reloFieldExists /* field existence in target kernel */
|
|
|
+ reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
|
|
|
+ reloFieldLShiftU64 /* bitfield-specific left bitshift */
|
|
|
+ reloFieldRShiftU64 /* bitfield-specific right bitshift */
|
|
|
+ reloTypeIDLocal /* type ID in local BPF object */
|
|
|
+ reloTypeIDTarget /* type ID in target kernel */
|
|
|
+ reloTypeExists /* type existence in target kernel */
|
|
|
+ reloTypeSize /* type size in bytes */
|
|
|
+ reloEnumvalExists /* enum value existence in target kernel */
|
|
|
+ reloEnumvalValue /* enum value integer value */
|
|
|
)
|
|
|
|
|
|
-func (k coreReloKind) String() string {
|
|
|
+func (k COREKind) String() string {
|
|
|
switch k {
|
|
|
case reloFieldByteOffset:
|
|
|
return "byte_off"
|
|
@@ -70,103 +187,249 @@ func (k coreReloKind) String() string {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-func coreRelocate(local, target *Spec, coreRelos bpfCoreRelos) (map[uint64]Relocation, error) {
|
|
|
- if target == nil {
|
|
|
- var err error
|
|
|
- target, err = loadKernelSpec()
|
|
|
- if err != nil {
|
|
|
- return nil, err
|
|
|
- }
|
|
|
- }
|
|
|
+func (k COREKind) checksForExistence() bool {
|
|
|
+ return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists
|
|
|
+}
|
|
|
|
|
|
+func coreRelocate(local, target *Spec, relos coreRelos) (COREFixups, error) {
|
|
|
if local.byteOrder != target.byteOrder {
|
|
|
return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
|
|
|
}
|
|
|
|
|
|
- relocations := make(map[uint64]Relocation, len(coreRelos))
|
|
|
- for _, relo := range coreRelos {
|
|
|
- accessorStr, err := local.strings.Lookup(relo.AccessStrOff)
|
|
|
- if err != nil {
|
|
|
- return nil, err
|
|
|
+ var ids []TypeID
|
|
|
+ relosByID := make(map[TypeID]coreRelos)
|
|
|
+ result := make(COREFixups, len(relos))
|
|
|
+ for _, relo := range relos {
|
|
|
+ if relo.kind == reloTypeIDLocal {
|
|
|
+ // Filtering out reloTypeIDLocal here makes our lives a lot easier
|
|
|
+ // down the line, since it doesn't have a target at all.
|
|
|
+ if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
|
|
|
+ return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
|
|
|
+ }
|
|
|
+
|
|
|
+ result[uint64(relo.insnOff)] = COREFixup{
|
|
|
+ relo.kind,
|
|
|
+ uint32(relo.typeID),
|
|
|
+ uint32(relo.typeID),
|
|
|
+ false,
|
|
|
+ }
|
|
|
+ continue
|
|
|
}
|
|
|
|
|
|
- accessor, err := parseCoreAccessor(accessorStr)
|
|
|
+ relos, ok := relosByID[relo.typeID]
|
|
|
+ if !ok {
|
|
|
+ ids = append(ids, relo.typeID)
|
|
|
+ }
|
|
|
+ relosByID[relo.typeID] = append(relos, relo)
|
|
|
+ }
|
|
|
+
|
|
|
+ // Ensure we work on relocations in a deterministic order.
|
|
|
+ sort.Slice(ids, func(i, j int) bool {
|
|
|
+ return ids[i] < ids[j]
|
|
|
+ })
|
|
|
+
|
|
|
+ for _, id := range ids {
|
|
|
+ if int(id) >= len(local.types) {
|
|
|
+ return nil, fmt.Errorf("invalid type id %d", id)
|
|
|
+ }
|
|
|
+
|
|
|
+ localType := local.types[id]
|
|
|
+ named, ok := localType.(namedType)
|
|
|
+ if !ok || named.name() == "" {
|
|
|
+ return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported)
|
|
|
+ }
|
|
|
+
|
|
|
+ relos := relosByID[id]
|
|
|
+ targets := target.namedTypes[named.essentialName()]
|
|
|
+ fixups, err := coreCalculateFixups(localType, targets, relos)
|
|
|
if err != nil {
|
|
|
- return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
|
|
|
+ return nil, fmt.Errorf("relocate %s: %w", localType, err)
|
|
|
}
|
|
|
|
|
|
- if int(relo.TypeID) >= len(local.types) {
|
|
|
- return nil, fmt.Errorf("invalid type id %d", relo.TypeID)
|
|
|
+ for i, relo := range relos {
|
|
|
+ result[uint64(relo.insnOff)] = fixups[i]
|
|
|
}
|
|
|
+ }
|
|
|
|
|
|
- typ := local.types[relo.TypeID]
|
|
|
+ return result, nil
|
|
|
+}
|
|
|
+
|
|
|
+var errAmbiguousRelocation = errors.New("ambiguous relocation")
|
|
|
+var errImpossibleRelocation = errors.New("impossible relocation")
|
|
|
+
|
|
|
+// coreCalculateFixups calculates the fixups for the given relocations using
|
|
|
+// the "best" target.
|
|
|
+//
|
|
|
+// The best target is determined by scoring: the less poisoning we have to do
|
|
|
+// the better the target is.
|
|
|
+func coreCalculateFixups(local Type, targets []namedType, relos coreRelos) ([]COREFixup, error) {
|
|
|
+ localID := local.ID()
|
|
|
+ local, err := copyType(local, skipQualifierAndTypedef)
|
|
|
+ if err != nil {
|
|
|
+ return nil, err
|
|
|
+ }
|
|
|
+
|
|
|
+ bestScore := len(relos)
|
|
|
+ var bestFixups []COREFixup
|
|
|
+ for i := range targets {
|
|
|
+ targetID := targets[i].ID()
|
|
|
+ target, err := copyType(targets[i], skipQualifierAndTypedef)
|
|
|
+ if err != nil {
|
|
|
+ return nil, err
|
|
|
+ }
|
|
|
|
|
|
- if relo.ReloKind == reloTypeIDLocal {
|
|
|
- relocations[uint64(relo.InsnOff)] = Relocation{
|
|
|
- uint32(typ.ID()),
|
|
|
- uint32(typ.ID()),
|
|
|
+ score := 0 // lower is better
|
|
|
+ fixups := make([]COREFixup, 0, len(relos))
|
|
|
+ for _, relo := range relos {
|
|
|
+ fixup, err := coreCalculateFixup(local, localID, target, targetID, relo)
|
|
|
+ if err != nil {
|
|
|
+ return nil, fmt.Errorf("target %s: %w", target, err)
|
|
|
+ }
|
|
|
+ if fixup.Poison || fixup.isNonExistant() {
|
|
|
+ score++
|
|
|
}
|
|
|
+ fixups = append(fixups, fixup)
|
|
|
+ }
|
|
|
+
|
|
|
+ if score > bestScore {
|
|
|
+ // We have a better target already, ignore this one.
|
|
|
continue
|
|
|
}
|
|
|
|
|
|
- named, ok := typ.(namedType)
|
|
|
- if !ok || named.name() == "" {
|
|
|
- return nil, fmt.Errorf("relocate anonymous type %s: %w", typ.String(), ErrNotSupported)
|
|
|
+ if score < bestScore {
|
|
|
+ // This is the best target yet, use it.
|
|
|
+ bestScore = score
|
|
|
+ bestFixups = fixups
|
|
|
+ continue
|
|
|
}
|
|
|
|
|
|
- name := essentialName(named.name())
|
|
|
- res, err := coreCalculateRelocation(typ, target.namedTypes[name], relo.ReloKind, accessor)
|
|
|
- if err != nil {
|
|
|
- return nil, fmt.Errorf("relocate %s: %w", name, err)
|
|
|
+ // Some other target has the same score as the current one. Make sure
|
|
|
+ // the fixups agree with each other.
|
|
|
+ for i, fixup := range bestFixups {
|
|
|
+ if !fixup.equal(fixups[i]) {
|
|
|
+ return nil, fmt.Errorf("%s: multiple types match: %w", fixup.Kind, errAmbiguousRelocation)
|
|
|
+ }
|
|
|
}
|
|
|
+ }
|
|
|
|
|
|
- relocations[uint64(relo.InsnOff)] = res
|
|
|
+ if bestFixups == nil {
|
|
|
+ // Nothing at all matched, probably because there are no suitable
|
|
|
+ // targets at all. Poison everything!
|
|
|
+ bestFixups = make([]COREFixup, len(relos))
|
|
|
+ for i, relo := range relos {
|
|
|
+ bestFixups[i] = COREFixup{Kind: relo.kind, Poison: true}
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- return relocations, nil
|
|
|
+ return bestFixups, nil
|
|
|
}
|
|
|
|
|
|
-var errAmbiguousRelocation = errors.New("ambiguous relocation")
|
|
|
+// coreCalculateFixup calculates the fixup for a single local type, target type
|
|
|
+// and relocation.
|
|
|
+func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID, relo coreRelo) (COREFixup, error) {
|
|
|
+ fixup := func(local, target uint32) (COREFixup, error) {
|
|
|
+ return COREFixup{relo.kind, local, target, false}, nil
|
|
|
+ }
|
|
|
+ poison := func() (COREFixup, error) {
|
|
|
+ if relo.kind.checksForExistence() {
|
|
|
+ return fixup(1, 0)
|
|
|
+ }
|
|
|
+ return COREFixup{relo.kind, 0, 0, true}, nil
|
|
|
+ }
|
|
|
+ zero := COREFixup{}
|
|
|
+
|
|
|
+ switch relo.kind {
|
|
|
+ case reloTypeIDTarget, reloTypeSize, reloTypeExists:
|
|
|
+ if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
|
|
|
+ return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
|
|
|
+ }
|
|
|
+
|
|
|
+ err := coreAreTypesCompatible(local, target)
|
|
|
+ if errors.Is(err, errImpossibleRelocation) {
|
|
|
+ return poison()
|
|
|
+ }
|
|
|
+ if err != nil {
|
|
|
+ return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
|
|
|
+ }
|
|
|
+
|
|
|
+ switch relo.kind {
|
|
|
+ case reloTypeExists:
|
|
|
+ return fixup(1, 1)
|
|
|
|
|
|
-func coreCalculateRelocation(local Type, targets []namedType, kind coreReloKind, localAccessor coreAccessor) (Relocation, error) {
|
|
|
- var relos []Relocation
|
|
|
- var matches []Type
|
|
|
- for _, target := range targets {
|
|
|
- switch kind {
|
|
|
case reloTypeIDTarget:
|
|
|
- if localAccessor[0] != 0 {
|
|
|
- return Relocation{}, fmt.Errorf("%s: unexpected non-zero accessor", kind)
|
|
|
+ return fixup(uint32(localID), uint32(targetID))
|
|
|
+
|
|
|
+ case reloTypeSize:
|
|
|
+ localSize, err := Sizeof(local)
|
|
|
+ if err != nil {
|
|
|
+ return zero, err
|
|
|
}
|
|
|
|
|
|
- if compat, err := coreAreTypesCompatible(local, target); err != nil {
|
|
|
- return Relocation{}, fmt.Errorf("%s: %s", kind, err)
|
|
|
- } else if !compat {
|
|
|
- continue
|
|
|
+ targetSize, err := Sizeof(target)
|
|
|
+ if err != nil {
|
|
|
+ return zero, err
|
|
|
}
|
|
|
|
|
|
- relos = append(relos, Relocation{uint32(target.ID()), uint32(target.ID())})
|
|
|
+ return fixup(uint32(localSize), uint32(targetSize))
|
|
|
+ }
|
|
|
|
|
|
- default:
|
|
|
- return Relocation{}, fmt.Errorf("relocation %s: %w", kind, ErrNotSupported)
|
|
|
+ case reloEnumvalValue, reloEnumvalExists:
|
|
|
+ localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target)
|
|
|
+ if errors.Is(err, errImpossibleRelocation) {
|
|
|
+ return poison()
|
|
|
+ }
|
|
|
+ if err != nil {
|
|
|
+ return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
|
|
|
}
|
|
|
- matches = append(matches, target)
|
|
|
- }
|
|
|
|
|
|
- if len(relos) == 0 {
|
|
|
- // TODO: Add switch for existence checks like reloEnumvalExists here.
|
|
|
+ switch relo.kind {
|
|
|
+ case reloEnumvalExists:
|
|
|
+ return fixup(1, 1)
|
|
|
|
|
|
- // TODO: This might have to be poisoned.
|
|
|
- return Relocation{}, fmt.Errorf("no relocation found, tried %v", targets)
|
|
|
- }
|
|
|
+ case reloEnumvalValue:
|
|
|
+ return fixup(uint32(localValue.Value), uint32(targetValue.Value))
|
|
|
+ }
|
|
|
+
|
|
|
+ case reloFieldByteOffset, reloFieldByteSize, reloFieldExists:
|
|
|
+ if _, ok := target.(*Fwd); ok {
|
|
|
+ // We can't relocate fields using a forward declaration, so
|
|
|
+ // skip it. If a non-forward declaration is present in the BTF
|
|
|
+ // we'll find it in one of the other iterations.
|
|
|
+ return poison()
|
|
|
+ }
|
|
|
+
|
|
|
+ localField, targetField, err := coreFindField(local, relo.accessor, target)
|
|
|
+ if errors.Is(err, errImpossibleRelocation) {
|
|
|
+ return poison()
|
|
|
+ }
|
|
|
+ if err != nil {
|
|
|
+ return zero, fmt.Errorf("target %s: %w", target, err)
|
|
|
+ }
|
|
|
+
|
|
|
+ switch relo.kind {
|
|
|
+ case reloFieldExists:
|
|
|
+ return fixup(1, 1)
|
|
|
+
|
|
|
+ case reloFieldByteOffset:
|
|
|
+ return fixup(localField.offset/8, targetField.offset/8)
|
|
|
+
|
|
|
+ case reloFieldByteSize:
|
|
|
+ localSize, err := Sizeof(localField.Type)
|
|
|
+ if err != nil {
|
|
|
+ return zero, err
|
|
|
+ }
|
|
|
+
|
|
|
+ targetSize, err := Sizeof(targetField.Type)
|
|
|
+ if err != nil {
|
|
|
+ return zero, err
|
|
|
+ }
|
|
|
+
|
|
|
+ return fixup(uint32(localSize), uint32(targetSize))
|
|
|
|
|
|
- relo := relos[0]
|
|
|
- for _, altRelo := range relos[1:] {
|
|
|
- if !altRelo.equal(relo) {
|
|
|
- return Relocation{}, fmt.Errorf("multiple types %v match: %w", matches, errAmbiguousRelocation)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- return relo, nil
|
|
|
+ return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported)
|
|
|
}
|
|
|
|
|
|
/* coreAccessor contains a path through a struct. It contains at least one index.
|
|
@@ -219,6 +482,240 @@ func parseCoreAccessor(accessor string) (coreAccessor, error) {
|
|
|
return result, nil
|
|
|
}
|
|
|
|
|
|
+func (ca coreAccessor) String() string {
|
|
|
+ strs := make([]string, 0, len(ca))
|
|
|
+ for _, i := range ca {
|
|
|
+ strs = append(strs, strconv.Itoa(i))
|
|
|
+ }
|
|
|
+ return strings.Join(strs, ":")
|
|
|
+}
|
|
|
+
|
|
|
+func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
|
|
|
+ e, ok := t.(*Enum)
|
|
|
+ if !ok {
|
|
|
+ return nil, fmt.Errorf("not an enum: %s", t)
|
|
|
+ }
|
|
|
+
|
|
|
+ if len(ca) > 1 {
|
|
|
+ return nil, fmt.Errorf("invalid accessor %s for enum", ca)
|
|
|
+ }
|
|
|
+
|
|
|
+ i := ca[0]
|
|
|
+ if i >= len(e.Values) {
|
|
|
+ return nil, fmt.Errorf("invalid index %d for %s", i, e)
|
|
|
+ }
|
|
|
+
|
|
|
+ return &e.Values[i], nil
|
|
|
+}
|
|
|
+
|
|
|
+type coreField struct {
|
|
|
+ Type Type
|
|
|
+ offset uint32
|
|
|
+}
|
|
|
+
|
|
|
+func adjustOffset(base uint32, t Type, n int) (uint32, error) {
|
|
|
+ size, err := Sizeof(t)
|
|
|
+ if err != nil {
|
|
|
+ return 0, err
|
|
|
+ }
|
|
|
+
|
|
|
+ return base + (uint32(n) * uint32(size) * 8), nil
|
|
|
+}
|
|
|
+
|
|
|
+// coreFindField descends into the local type using the accessor and tries to
|
|
|
+// find an equivalent field in target at each step.
|
|
|
+//
|
|
|
+// Returns the field and the offset of the field from the start of
|
|
|
+// target in bits.
|
|
|
+func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreField, _ error) {
|
|
|
+ // The first index is used to offset a pointer of the base type like
|
|
|
+ // when accessing an array.
|
|
|
+ localOffset, err := adjustOffset(0, local, localAcc[0])
|
|
|
+ if err != nil {
|
|
|
+ return coreField{}, coreField{}, err
|
|
|
+ }
|
|
|
+
|
|
|
+ targetOffset, err := adjustOffset(0, target, localAcc[0])
|
|
|
+ if err != nil {
|
|
|
+ return coreField{}, coreField{}, err
|
|
|
+ }
|
|
|
+
|
|
|
+ if err := coreAreMembersCompatible(local, target); err != nil {
|
|
|
+ return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
|
|
|
+ }
|
|
|
+
|
|
|
+ var localMaybeFlex, targetMaybeFlex bool
|
|
|
+ for _, acc := range localAcc[1:] {
|
|
|
+ switch localType := local.(type) {
|
|
|
+ case composite:
|
|
|
+ // For composite types acc is used to find the field in the local type,
|
|
|
+ // and then we try to find a field in target with the same name.
|
|
|
+ localMembers := localType.members()
|
|
|
+ if acc >= len(localMembers) {
|
|
|
+ return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, local)
|
|
|
+ }
|
|
|
+
|
|
|
+ localMember := localMembers[acc]
|
|
|
+ if localMember.Name == "" {
|
|
|
+ _, ok := localMember.Type.(composite)
|
|
|
+ if !ok {
|
|
|
+ return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported)
|
|
|
+ }
|
|
|
+
|
|
|
+ // This is an anonymous struct or union, ignore it.
|
|
|
+ local = localMember.Type
|
|
|
+ localOffset += localMember.Offset
|
|
|
+ localMaybeFlex = false
|
|
|
+ continue
|
|
|
+ }
|
|
|
+
|
|
|
+ targetType, ok := target.(composite)
|
|
|
+ if !ok {
|
|
|
+ return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
|
|
|
+ }
|
|
|
+
|
|
|
+ targetMember, last, err := coreFindMember(targetType, localMember.Name)
|
|
|
+ if err != nil {
|
|
|
+ return coreField{}, coreField{}, err
|
|
|
+ }
|
|
|
+
|
|
|
+ if targetMember.BitfieldSize > 0 {
|
|
|
+ return coreField{}, coreField{}, fmt.Errorf("field %q is a bitfield: %w", targetMember.Name, ErrNotSupported)
|
|
|
+ }
|
|
|
+
|
|
|
+ local = localMember.Type
|
|
|
+ localMaybeFlex = acc == len(localMembers)-1
|
|
|
+ localOffset += localMember.Offset
|
|
|
+ target = targetMember.Type
|
|
|
+ targetMaybeFlex = last
|
|
|
+ targetOffset += targetMember.Offset
|
|
|
+
|
|
|
+ case *Array:
|
|
|
+ // For arrays, acc is the index in the target.
|
|
|
+ targetType, ok := target.(*Array)
|
|
|
+ if !ok {
|
|
|
+ return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
|
|
|
+ }
|
|
|
+
|
|
|
+ if localType.Nelems == 0 && !localMaybeFlex {
|
|
|
+ return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array")
|
|
|
+ }
|
|
|
+ if targetType.Nelems == 0 && !targetMaybeFlex {
|
|
|
+ return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array")
|
|
|
+ }
|
|
|
+
|
|
|
+ if localType.Nelems > 0 && acc >= int(localType.Nelems) {
|
|
|
+ return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc)
|
|
|
+ }
|
|
|
+ if targetType.Nelems > 0 && acc >= int(targetType.Nelems) {
|
|
|
+ return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation)
|
|
|
+ }
|
|
|
+
|
|
|
+ local = localType.Type
|
|
|
+ localMaybeFlex = false
|
|
|
+ localOffset, err = adjustOffset(localOffset, local, acc)
|
|
|
+ if err != nil {
|
|
|
+ return coreField{}, coreField{}, err
|
|
|
+ }
|
|
|
+
|
|
|
+ target = targetType.Type
|
|
|
+ targetMaybeFlex = false
|
|
|
+ targetOffset, err = adjustOffset(targetOffset, target, acc)
|
|
|
+ if err != nil {
|
|
|
+ return coreField{}, coreField{}, err
|
|
|
+ }
|
|
|
+
|
|
|
+ default:
|
|
|
+ return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported)
|
|
|
+ }
|
|
|
+
|
|
|
+ if err := coreAreMembersCompatible(local, target); err != nil {
|
|
|
+ return coreField{}, coreField{}, err
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return coreField{local, localOffset}, coreField{target, targetOffset}, nil
|
|
|
+}
|
|
|
+
|
|
|
+// coreFindMember finds a member in a composite type while handling anonymous
|
|
|
+// structs and unions.
|
|
|
+func coreFindMember(typ composite, name Name) (Member, bool, error) {
|
|
|
+ if name == "" {
|
|
|
+ return Member{}, false, errors.New("can't search for anonymous member")
|
|
|
+ }
|
|
|
+
|
|
|
+ type offsetTarget struct {
|
|
|
+ composite
|
|
|
+ offset uint32
|
|
|
+ }
|
|
|
+
|
|
|
+ targets := []offsetTarget{{typ, 0}}
|
|
|
+ visited := make(map[composite]bool)
|
|
|
+
|
|
|
+ for i := 0; i < len(targets); i++ {
|
|
|
+ target := targets[i]
|
|
|
+
|
|
|
+ // Only visit targets once to prevent infinite recursion.
|
|
|
+ if visited[target] {
|
|
|
+ continue
|
|
|
+ }
|
|
|
+ if len(visited) >= maxTypeDepth {
|
|
|
+ // This check is different than libbpf, which restricts the entire
|
|
|
+ // path to BPF_CORE_SPEC_MAX_LEN items.
|
|
|
+ return Member{}, false, fmt.Errorf("type is nested too deep")
|
|
|
+ }
|
|
|
+ visited[target] = true
|
|
|
+
|
|
|
+ members := target.members()
|
|
|
+ for j, member := range members {
|
|
|
+ if member.Name == name {
|
|
|
+ // NB: This is safe because member is a copy.
|
|
|
+ member.Offset += target.offset
|
|
|
+ return member, j == len(members)-1, nil
|
|
|
+ }
|
|
|
+
|
|
|
+ // The names don't match, but this member could be an anonymous struct
|
|
|
+ // or union.
|
|
|
+ if member.Name != "" {
|
|
|
+ continue
|
|
|
+ }
|
|
|
+
|
|
|
+ comp, ok := member.Type.(composite)
|
|
|
+ if !ok {
|
|
|
+ return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
|
|
|
+ }
|
|
|
+
|
|
|
+ targets = append(targets, offsetTarget{comp, target.offset + member.Offset})
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation)
|
|
|
+}
|
|
|
+
|
|
|
+// coreFindEnumValue follows localAcc to find the equivalent enum value in target.
|
|
|
+func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) {
|
|
|
+ localValue, err := localAcc.enumValue(local)
|
|
|
+ if err != nil {
|
|
|
+ return nil, nil, err
|
|
|
+ }
|
|
|
+
|
|
|
+ targetEnum, ok := target.(*Enum)
|
|
|
+ if !ok {
|
|
|
+ return nil, nil, errImpossibleRelocation
|
|
|
+ }
|
|
|
+
|
|
|
+ localName := localValue.Name.essentialName()
|
|
|
+ for i, targetValue := range targetEnum.Values {
|
|
|
+ if targetValue.Name.essentialName() != localName {
|
|
|
+ continue
|
|
|
+ }
|
|
|
+
|
|
|
+ return localValue, &targetEnum.Values[i], nil
|
|
|
+ }
|
|
|
+
|
|
|
+ return nil, nil, errImpossibleRelocation
|
|
|
+}
|
|
|
+
|
|
|
/* The comment below is from bpf_core_types_are_compat in libbpf.c:
|
|
|
*
|
|
|
* Check local and target types for compatibility. This check is used for
|
|
@@ -239,8 +736,10 @@ func parseCoreAccessor(accessor string) (coreAccessor, error) {
|
|
|
* number of input args and compatible return and argument types.
|
|
|
* These rules are not set in stone and probably will be adjusted as we get
|
|
|
* more experience with using BPF CO-RE relocations.
|
|
|
+ *
|
|
|
+ * Returns errImpossibleRelocation if types are not compatible.
|
|
|
*/
|
|
|
-func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
|
|
|
+func coreAreTypesCompatible(localType Type, targetType Type) error {
|
|
|
var (
|
|
|
localTs, targetTs typeDeque
|
|
|
l, t = &localType, &targetType
|
|
@@ -249,14 +748,14 @@ func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
|
|
|
|
|
|
for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
|
|
|
if depth >= maxTypeDepth {
|
|
|
- return false, errors.New("types are nested too deep")
|
|
|
+ return errors.New("types are nested too deep")
|
|
|
}
|
|
|
|
|
|
- localType = skipQualifierAndTypedef(*l)
|
|
|
- targetType = skipQualifierAndTypedef(*t)
|
|
|
+ localType = *l
|
|
|
+ targetType = *t
|
|
|
|
|
|
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
|
|
|
- return false, nil
|
|
|
+ return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
|
|
|
}
|
|
|
|
|
|
switch lv := (localType).(type) {
|
|
@@ -266,7 +765,7 @@ func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
|
|
|
case *Int:
|
|
|
tv := targetType.(*Int)
|
|
|
if lv.isBitfield() || tv.isBitfield() {
|
|
|
- return false, nil
|
|
|
+ return fmt.Errorf("bitfield: %w", errImpossibleRelocation)
|
|
|
}
|
|
|
|
|
|
case *Pointer, *Array:
|
|
@@ -277,7 +776,7 @@ func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
|
|
|
case *FuncProto:
|
|
|
tv := targetType.(*FuncProto)
|
|
|
if len(lv.Params) != len(tv.Params) {
|
|
|
- return false, nil
|
|
|
+ return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation)
|
|
|
}
|
|
|
|
|
|
depth++
|
|
@@ -285,22 +784,24 @@ func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
|
|
|
targetType.walk(&targetTs)
|
|
|
|
|
|
default:
|
|
|
- return false, fmt.Errorf("unsupported type %T", localType)
|
|
|
+ return fmt.Errorf("unsupported type %T", localType)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
if l != nil {
|
|
|
- return false, fmt.Errorf("dangling local type %T", *l)
|
|
|
+ return fmt.Errorf("dangling local type %T", *l)
|
|
|
}
|
|
|
|
|
|
if t != nil {
|
|
|
- return false, fmt.Errorf("dangling target type %T", *t)
|
|
|
+ return fmt.Errorf("dangling target type %T", *t)
|
|
|
}
|
|
|
|
|
|
- return true, nil
|
|
|
+ return nil
|
|
|
}
|
|
|
|
|
|
-/* The comment below is from bpf_core_fields_are_compat in libbpf.c:
|
|
|
+/* coreAreMembersCompatible checks two types for field-based relocation compatibility.
|
|
|
+ *
|
|
|
+ * The comment below is from bpf_core_fields_are_compat in libbpf.c:
|
|
|
*
|
|
|
* Check two types for compatibility for the purpose of field access
|
|
|
* relocation. const/volatile/restrict and typedefs are skipped to ensure we
|
|
@@ -314,65 +815,63 @@ func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
|
|
|
* - for INT, size and signedness are ignored;
|
|
|
* - for ARRAY, dimensionality is ignored, element types are checked for
|
|
|
* compatibility recursively;
|
|
|
+ * [ NB: coreAreMembersCompatible doesn't recurse, this check is done
|
|
|
+ * by coreFindField. ]
|
|
|
* - everything else shouldn't be ever a target of relocation.
|
|
|
* These rules are not set in stone and probably will be adjusted as we get
|
|
|
* more experience with using BPF CO-RE relocations.
|
|
|
+ *
|
|
|
+ * Returns errImpossibleRelocation if the members are not compatible.
|
|
|
*/
|
|
|
-func coreAreMembersCompatible(localType Type, targetType Type) (bool, error) {
|
|
|
- doNamesMatch := func(a, b string) bool {
|
|
|
+func coreAreMembersCompatible(localType Type, targetType Type) error {
|
|
|
+ doNamesMatch := func(a, b string) error {
|
|
|
if a == "" || b == "" {
|
|
|
// allow anonymous and named type to match
|
|
|
- return true
|
|
|
- }
|
|
|
-
|
|
|
- return essentialName(a) == essentialName(b)
|
|
|
- }
|
|
|
-
|
|
|
- for depth := 0; depth <= maxTypeDepth; depth++ {
|
|
|
- localType = skipQualifierAndTypedef(localType)
|
|
|
- targetType = skipQualifierAndTypedef(targetType)
|
|
|
-
|
|
|
- _, lok := localType.(composite)
|
|
|
- _, tok := targetType.(composite)
|
|
|
- if lok && tok {
|
|
|
- return true, nil
|
|
|
+ return nil
|
|
|
}
|
|
|
|
|
|
- if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
|
|
|
- return false, nil
|
|
|
+ if essentialName(a) == essentialName(b) {
|
|
|
+ return nil
|
|
|
}
|
|
|
|
|
|
- switch lv := localType.(type) {
|
|
|
- case *Pointer:
|
|
|
- return true, nil
|
|
|
+ return fmt.Errorf("names don't match: %w", errImpossibleRelocation)
|
|
|
+ }
|
|
|
|
|
|
- case *Enum:
|
|
|
- tv := targetType.(*Enum)
|
|
|
- return doNamesMatch(lv.name(), tv.name()), nil
|
|
|
+ _, lok := localType.(composite)
|
|
|
+ _, tok := targetType.(composite)
|
|
|
+ if lok && tok {
|
|
|
+ return nil
|
|
|
+ }
|
|
|
|
|
|
- case *Fwd:
|
|
|
- tv := targetType.(*Fwd)
|
|
|
- return doNamesMatch(lv.name(), tv.name()), nil
|
|
|
+ if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
|
|
|
+ return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
|
|
|
+ }
|
|
|
|
|
|
- case *Int:
|
|
|
- tv := targetType.(*Int)
|
|
|
- return !lv.isBitfield() && !tv.isBitfield(), nil
|
|
|
+ switch lv := localType.(type) {
|
|
|
+ case *Array, *Pointer:
|
|
|
+ return nil
|
|
|
|
|
|
- case *Array:
|
|
|
- tv := targetType.(*Array)
|
|
|
+ case *Enum:
|
|
|
+ tv := targetType.(*Enum)
|
|
|
+ return doNamesMatch(lv.name(), tv.name())
|
|
|
|
|
|
- localType = lv.Type
|
|
|
- targetType = tv.Type
|
|
|
+ case *Fwd:
|
|
|
+ tv := targetType.(*Fwd)
|
|
|
+ return doNamesMatch(lv.name(), tv.name())
|
|
|
|
|
|
- default:
|
|
|
- return false, fmt.Errorf("unsupported type %T", localType)
|
|
|
+ case *Int:
|
|
|
+ tv := targetType.(*Int)
|
|
|
+ if lv.isBitfield() || tv.isBitfield() {
|
|
|
+ return fmt.Errorf("bitfield: %w", errImpossibleRelocation)
|
|
|
}
|
|
|
- }
|
|
|
+ return nil
|
|
|
|
|
|
- return false, errors.New("types are nested too deep")
|
|
|
+ default:
|
|
|
+ return fmt.Errorf("type %s: %w", localType, ErrNotSupported)
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-func skipQualifierAndTypedef(typ Type) Type {
|
|
|
+func skipQualifierAndTypedef(typ Type) (Type, error) {
|
|
|
result := typ
|
|
|
for depth := 0; depth <= maxTypeDepth; depth++ {
|
|
|
switch v := (result).(type) {
|
|
@@ -381,8 +880,8 @@ func skipQualifierAndTypedef(typ Type) Type {
|
|
|
case *Typedef:
|
|
|
result = v.Type
|
|
|
default:
|
|
|
- return result
|
|
|
+ return result, nil
|
|
|
}
|
|
|
}
|
|
|
- return typ
|
|
|
+ return nil, errors.New("exceeded type depth")
|
|
|
}
|