vendor: github.com/cilium/ebpf v0.6.2

Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
This commit is contained in:
Akihiro Suda 2021-07-19 15:57:52 +09:00
parent 1256aa0241
commit 9f9a0b872c
No known key found for this signature in database
GPG key ID: 49524C6F9F638F1A
39 changed files with 1951 additions and 565 deletions

View file

@ -136,7 +136,7 @@ github.com/containerd/go-runc 16b287bc67d069a60fa48db15f33
github.com/containerd/typeurl 5e43fb8b75ed2f2305fc04e6918c8d10636771bc # v1.0.2
github.com/containerd/ttrpc bfba540dc45464586c106b1f31c8547933c1eb41 # v1.0.2
github.com/gogo/googleapis 01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2
github.com/cilium/ebpf ef54c303d1fff1e80a9bf20f00a378fde5419d61 # v0.5.0
github.com/cilium/ebpf ca492085341e0e917f48ec30704d5054c5d42ca8 # v0.6.2
github.com/klauspost/compress a3b7545c88eea469c2246bee0e6c130525d56190 # v1.11.13
github.com/pelletier/go-toml 65ca8064882c8c308e5c804c5d5443d409e0738c # v1.8.1

View file

@ -132,6 +132,58 @@ const (
FnSkStorageDelete
FnSendSignal
FnTcpGenSyncookie
FnSkbOutput
FnProbeReadUser
FnProbeReadKernel
FnProbeReadUserStr
FnProbeReadKernelStr
FnTcpSendAck
FnSendSignalThread
FnJiffies64
FnReadBranchRecords
FnGetNsCurrentPidTgid
FnXdpOutput
FnGetNetnsCookie
FnGetCurrentAncestorCgroupId
FnSkAssign
FnKtimeGetBootNs
FnSeqPrintf
FnSeqWrite
FnSkCgroupId
FnSkAncestorCgroupId
FnRingbufOutput
FnRingbufReserve
FnRingbufSubmit
FnRingbufDiscard
FnRingbufQuery
FnCsumLevel
FnSkcToTcp6Sock
FnSkcToTcpSock
FnSkcToTcpTimewaitSock
FnSkcToTcpRequestSock
FnSkcToUdp6Sock
FnGetTaskStack
FnLoadHdrOpt
FnStoreHdrOpt
FnReserveHdrOpt
FnInodeStorageGet
FnInodeStorageDelete
FnDPath
FnCopyFromUser
FnSnprintfBtf
FnSeqPrintfBtf
FnSkbCgroupClassid
FnRedirectNeigh
FnPerCpuPtr
FnThisCpuPtr
FnRedirectPeer
FnTaskStorageGet
FnTaskStorageDelete
FnGetCurrentTaskBtf
FnBprmOptsSet
FnKtimeGetCoarseNs
FnImaInodeHash
FnSockFromFile
)
// Call emits a function call.

View file

@ -119,11 +119,63 @@ func _() {
_ = x[FnSkStorageDelete-108]
_ = x[FnSendSignal-109]
_ = x[FnTcpGenSyncookie-110]
_ = x[FnSkbOutput-111]
_ = x[FnProbeReadUser-112]
_ = x[FnProbeReadKernel-113]
_ = x[FnProbeReadUserStr-114]
_ = x[FnProbeReadKernelStr-115]
_ = x[FnTcpSendAck-116]
_ = x[FnSendSignalThread-117]
_ = x[FnJiffies64-118]
_ = x[FnReadBranchRecords-119]
_ = x[FnGetNsCurrentPidTgid-120]
_ = x[FnXdpOutput-121]
_ = x[FnGetNetnsCookie-122]
_ = x[FnGetCurrentAncestorCgroupId-123]
_ = x[FnSkAssign-124]
_ = x[FnKtimeGetBootNs-125]
_ = x[FnSeqPrintf-126]
_ = x[FnSeqWrite-127]
_ = x[FnSkCgroupId-128]
_ = x[FnSkAncestorCgroupId-129]
_ = x[FnRingbufOutput-130]
_ = x[FnRingbufReserve-131]
_ = x[FnRingbufSubmit-132]
_ = x[FnRingbufDiscard-133]
_ = x[FnRingbufQuery-134]
_ = x[FnCsumLevel-135]
_ = x[FnSkcToTcp6Sock-136]
_ = x[FnSkcToTcpSock-137]
_ = x[FnSkcToTcpTimewaitSock-138]
_ = x[FnSkcToTcpRequestSock-139]
_ = x[FnSkcToUdp6Sock-140]
_ = x[FnGetTaskStack-141]
_ = x[FnLoadHdrOpt-142]
_ = x[FnStoreHdrOpt-143]
_ = x[FnReserveHdrOpt-144]
_ = x[FnInodeStorageGet-145]
_ = x[FnInodeStorageDelete-146]
_ = x[FnDPath-147]
_ = x[FnCopyFromUser-148]
_ = x[FnSnprintfBtf-149]
_ = x[FnSeqPrintfBtf-150]
_ = x[FnSkbCgroupClassid-151]
_ = x[FnRedirectNeigh-152]
_ = x[FnPerCpuPtr-153]
_ = x[FnThisCpuPtr-154]
_ = x[FnRedirectPeer-155]
_ = x[FnTaskStorageGet-156]
_ = x[FnTaskStorageDelete-157]
_ = x[FnGetCurrentTaskBtf-158]
_ = x[FnBprmOptsSet-159]
_ = x[FnKtimeGetCoarseNs-160]
_ = x[FnImaInodeHash-161]
_ = x[FnSockFromFile-162]
}
const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookie"
const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFile"
var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632}
var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424}
func (i BuiltinFunc) String() string {
if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) {

View file

@ -57,7 +57,7 @@ func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, err
return 0, fmt.Errorf("can't unmarshal registers: %s", err)
}
if !bi.OpCode.isDWordLoad() {
if !bi.OpCode.IsDWordLoad() {
return InstructionSize, nil
}
@ -80,7 +80,7 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error)
return 0, errors.New("invalid opcode")
}
isDWordLoad := ins.OpCode.isDWordLoad()
isDWordLoad := ins.OpCode.IsDWordLoad()
cons := int32(ins.Constant)
if isDWordLoad {
@ -123,7 +123,7 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error)
//
// Returns an error if the instruction doesn't load a map.
func (ins *Instruction) RewriteMapPtr(fd int) error {
if !ins.OpCode.isDWordLoad() {
if !ins.OpCode.IsDWordLoad() {
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
}
@ -138,15 +138,19 @@ func (ins *Instruction) RewriteMapPtr(fd int) error {
return nil
}
func (ins *Instruction) mapPtr() uint32 {
return uint32(uint64(ins.Constant) & math.MaxUint32)
// MapPtr returns the map fd for this instruction.
//
// The result is undefined if the instruction is not a load from a map,
// see IsLoadFromMap.
func (ins *Instruction) MapPtr() int {
return int(int32(uint64(ins.Constant) & math.MaxUint32))
}
// RewriteMapOffset changes the offset of a direct load from a map.
//
// Returns an error if the instruction is not a direct load.
func (ins *Instruction) RewriteMapOffset(offset uint32) error {
if !ins.OpCode.isDWordLoad() {
if !ins.OpCode.IsDWordLoad() {
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
}
@ -163,10 +167,10 @@ func (ins *Instruction) mapOffset() uint32 {
return uint32(uint64(ins.Constant) >> 32)
}
// isLoadFromMap returns true if the instruction loads from a map.
// IsLoadFromMap returns true if the instruction loads from a map.
//
// This covers both loading the map pointer and direct map value loads.
func (ins *Instruction) isLoadFromMap() bool {
func (ins *Instruction) IsLoadFromMap() bool {
return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue)
}
@ -177,6 +181,12 @@ func (ins *Instruction) IsFunctionCall() bool {
return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall
}
// IsConstantLoad returns true if the instruction loads a constant of the
// given size.
func (ins *Instruction) IsConstantLoad(size Size) bool {
return ins.OpCode == LoadImmOp(size) && ins.Src == R0 && ins.Offset == 0
}
// Format implements fmt.Formatter.
func (ins Instruction) Format(f fmt.State, c rune) {
if c != 'v' {
@ -197,8 +207,8 @@ func (ins Instruction) Format(f fmt.State, c rune) {
return
}
if ins.isLoadFromMap() {
fd := int32(ins.mapPtr())
if ins.IsLoadFromMap() {
fd := ins.MapPtr()
switch ins.Src {
case PseudoMapFD:
fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd)
@ -403,7 +413,7 @@ func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) {
h := sha1.New()
for i, ins := range insns {
if ins.isLoadFromMap() {
if ins.IsLoadFromMap() {
ins.Constant = 0
}
_, err := ins.Marshal(h, bo)

View file

@ -111,7 +111,7 @@ func LoadMapPtr(dst Register, fd int) Instruction {
OpCode: LoadImmOp(DWord),
Dst: dst,
Src: PseudoMapFD,
Constant: int64(fd),
Constant: int64(uint32(fd)),
}
}

View file

@ -69,13 +69,13 @@ const InvalidOpCode OpCode = 0xff
// rawInstructions returns the number of BPF instructions required
// to encode this opcode.
func (op OpCode) rawInstructions() int {
if op.isDWordLoad() {
if op.IsDWordLoad() {
return 2
}
return 1
}
func (op OpCode) isDWordLoad() bool {
func (op OpCode) IsDWordLoad() bool {
return op == LoadImmOp(DWord)
}

View file

@ -3,6 +3,7 @@ package ebpf
import (
"errors"
"fmt"
"io"
"math"
"reflect"
"strings"
@ -89,8 +90,8 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
//
// The constant must be defined like so in the C program:
//
// static volatile const type foobar;
// static volatile const type foobar = default;
// volatile const type foobar;
// volatile const type foobar = default;
//
// Replacement values must be of the same length as the C sizeof(type).
// If necessary, they are marshalled according to the same rules as
@ -269,11 +270,21 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Co
}, nil
}
type btfHandleCache map[*btf.Spec]*btf.Handle
type handleCache struct {
btfHandles map[*btf.Spec]*btf.Handle
btfSpecs map[io.ReaderAt]*btf.Spec
}
func (btfs btfHandleCache) load(spec *btf.Spec) (*btf.Handle, error) {
if btfs[spec] != nil {
return btfs[spec], nil
func newHandleCache() *handleCache {
return &handleCache{
btfHandles: make(map[*btf.Spec]*btf.Handle),
btfSpecs: make(map[io.ReaderAt]*btf.Spec),
}
}
func (hc handleCache) btfHandle(spec *btf.Spec) (*btf.Handle, error) {
if hc.btfHandles[spec] != nil {
return hc.btfHandles[spec], nil
}
handle, err := btf.NewHandle(spec)
@ -281,14 +292,30 @@ func (btfs btfHandleCache) load(spec *btf.Spec) (*btf.Handle, error) {
return nil, err
}
btfs[spec] = handle
hc.btfHandles[spec] = handle
return handle, nil
}
func (btfs btfHandleCache) close() {
for _, handle := range btfs {
func (hc handleCache) btfSpec(rd io.ReaderAt) (*btf.Spec, error) {
if hc.btfSpecs[rd] != nil {
return hc.btfSpecs[rd], nil
}
spec, err := btf.LoadSpecFromReader(rd)
if err != nil {
return nil, err
}
hc.btfSpecs[rd] = spec
return spec, nil
}
func (hc handleCache) close() {
for _, handle := range hc.btfHandles {
handle.Close()
}
hc.btfHandles = nil
hc.btfSpecs = nil
}
func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
@ -300,12 +327,12 @@ func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
var (
maps = make(map[string]*Map)
progs = make(map[string]*Program)
btfs = make(btfHandleCache)
handles = newHandleCache()
skipMapsAndProgs = false
)
cleanup = func() {
btfs.close()
handles.close()
if skipMapsAndProgs {
return
@ -335,7 +362,7 @@ func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
return nil, fmt.Errorf("missing map %s", mapName)
}
m, err := newMapWithOptions(mapSpec, opts.Maps, btfs)
m, err := newMapWithOptions(mapSpec, opts.Maps, handles)
if err != nil {
return nil, fmt.Errorf("map %s: %w", mapName, err)
}
@ -360,7 +387,7 @@ func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
for i := range progSpec.Instructions {
ins := &progSpec.Instructions[i]
if ins.OpCode != asm.LoadImmOp(asm.DWord) || ins.Reference == "" {
if !ins.IsLoadFromMap() || ins.Reference == "" {
continue
}
@ -372,7 +399,7 @@ func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
m, err := loadMap(ins.Reference)
if err != nil {
return nil, fmt.Errorf("program %s: %s", progName, err)
return nil, fmt.Errorf("program %s: %w", progName, err)
}
fd := m.FD()
@ -384,7 +411,7 @@ func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
}
}
prog, err := newProgramWithOptions(progSpec, opts.Programs, btfs)
prog, err := newProgramWithOptions(progSpec, opts.Programs, handles)
if err != nil {
return nil, fmt.Errorf("program %s: %w", progName, err)
}
@ -534,7 +561,7 @@ func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Va
}
if err != nil {
return fmt.Errorf("field %s: %s", field.Name, err)
return fmt.Errorf("field %s: %w", field.Name, err)
}
}

View file

@ -96,7 +96,7 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
}
btfSpec, err := btf.LoadSpecFromReader(rd)
if err != nil {
if err != nil && !errors.Is(err, btf.ErrNotFound) {
return nil, fmt.Errorf("load BTF: %w", err)
}
@ -159,7 +159,7 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
}
if target.Flags&elf.SHF_STRINGS > 0 {
return nil, fmt.Errorf("section %q: string %q is not stack allocated: %w", section.Name, rel.Name, ErrNotSupported)
return nil, fmt.Errorf("section %q: string is not stack allocated: %w", section.Name, ErrNotSupported)
}
target.references++
@ -374,17 +374,25 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
}
case dataSection:
var offset uint32
switch typ {
case elf.STT_SECTION:
if bind != elf.STB_LOCAL {
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
}
// This is really a reference to a static symbol, which clang doesn't
// emit a symbol table entry for. Instead it encodes the offset in
// the instruction itself.
offset = uint32(uint64(ins.Constant))
case elf.STT_OBJECT:
if bind != elf.STB_GLOBAL {
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
}
offset = uint32(rel.Value)
default:
return fmt.Errorf("incorrect relocation type %v for direct map load", typ)
}
@ -394,10 +402,8 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
// it's not clear how to encode that into Instruction.
name = target.Name
// For some reason, clang encodes the offset of the symbol its
// section in the first basic BPF instruction, while the kernel
// expects it in the second one.
ins.Constant <<= 32
// The kernel expects the offset in the second basic BPF instruction.
ins.Constant = int64(uint64(offset) << 32)
ins.Src = asm.PseudoMapValue
// Mark the instruction as needing an update when creating the
@ -491,33 +497,38 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error {
return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
}
if maps[mapSym.Name] != nil {
mapName := mapSym.Name
if maps[mapName] != nil {
return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym)
}
lr := io.LimitReader(r, int64(size))
spec := MapSpec{
Name: SanitizeName(mapSym.Name, -1),
Name: SanitizeName(mapName, -1),
}
switch {
case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil:
return fmt.Errorf("map %v: missing type", mapSym)
return fmt.Errorf("map %s: missing type", mapName)
case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil:
return fmt.Errorf("map %v: missing key size", mapSym)
return fmt.Errorf("map %s: missing key size", mapName)
case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil:
return fmt.Errorf("map %v: missing value size", mapSym)
return fmt.Errorf("map %s: missing value size", mapName)
case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil:
return fmt.Errorf("map %v: missing max entries", mapSym)
return fmt.Errorf("map %s: missing max entries", mapName)
case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil:
return fmt.Errorf("map %v: missing flags", mapSym)
return fmt.Errorf("map %s: missing flags", mapName)
}
if _, err := io.Copy(internal.DiscardZeroes{}, lr); err != nil {
return fmt.Errorf("map %v: unknown and non-zero fields in definition", mapSym)
return fmt.Errorf("map %s: unknown and non-zero fields in definition", mapName)
}
maps[mapSym.Name] = &spec
if err := spec.clampPerfEventArraySize(); err != nil {
return fmt.Errorf("map %s: %w", mapName, err)
}
maps[mapName] = &spec
}
}
@ -565,6 +576,10 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error {
return fmt.Errorf("map %v: %w", name, err)
}
if err := mapSpec.clampPerfEventArraySize(); err != nil {
return fmt.Errorf("map %v: %w", name, err)
}
maps[name] = mapSpec
}
}
@ -847,6 +862,8 @@ func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) {
"uretprobe/": {Kprobe, AttachNone, 0},
"tracepoint/": {TracePoint, AttachNone, 0},
"raw_tracepoint/": {RawTracepoint, AttachNone, 0},
"raw_tp/": {RawTracepoint, AttachNone, 0},
"tp_btf/": {Tracing, AttachTraceRawTp, 0},
"xdp": {XDP, AttachNone, 0},
"perf_event": {PerfEvent, AttachNone, 0},
"lwt_in": {LWTIn, AttachNone, 0},
@ -860,6 +877,9 @@ func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) {
"lirc_mode2": {LircMode2, AttachLircMode2, 0},
"flow_dissector": {FlowDissector, AttachFlowDissector, 0},
"iter/": {Tracing, AttachTraceIter, 0},
"fentry/": {Tracing, AttachTraceFEntry, 0},
"fmod_ret/": {Tracing, AttachModifyReturn, 0},
"fexit/": {Tracing, AttachTraceFExit, 0},
"fentry.s/": {Tracing, AttachTraceFEntry, unix.BPF_F_SLEEPABLE},
"fmod_ret.s/": {Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE},
"fexit.s/": {Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE},

View file

@ -1,6 +1,13 @@
# eBPF Examples
- [kprobe](kprobe/) - Attach a program to the entry or exit of an arbitrary kernel symbol (function).
- [uprobe](uprobe/) - Like a kprobe, but for symbols in userspace binaries (e.g. `bash`).
- [uretprobe](uretprobe/) - Like a kprobe, but for symbols in userspace binaries (e.g. `bash`).
- [tracepoint](tracepoint/) - Attach a program to predetermined kernel tracepoints.
- Add your use case(s) here!
## How to run
```bash
cd ebpf/examples/
go run -exec sudo [./kprobe, ./uretprobe, ./tracepoint, ...]
```

View file

@ -3,7 +3,6 @@ module github.com/cilium/ebpf/examples
go 1.15
require (
github.com/cilium/ebpf v0.4.1-0.20210401155455-cb5b8b6084b4 // indirect
github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595
github.com/cilium/ebpf v0.6.1-0.20210610105443-1e7f01c7124c
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
)

View file

@ -10,7 +10,7 @@ struct bpf_map_def SEC("maps") kprobe_map = {
.max_entries = 1,
};
SEC("kprobe/__x64_sys_execve")
SEC("kprobe/sys_execve")
int kprobe_execve() {
u32 key = 0;
u64 initval = 1, *valp;

View file

@ -12,8 +12,8 @@ struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
} events SEC(".maps");
SEC("uprobe/bash_readline")
int uprobe_bash_readline(struct pt_regs *ctx) {
SEC("uretprobe/bash_readline")
int uretprobe_bash_readline(struct pt_regs *ctx) {
struct event_t event;
event.pid = bpf_get_current_pid_tgid();

View file

@ -35,7 +35,7 @@ type Spec struct {
namedTypes map[string][]namedType
funcInfos map[string]extInfo
lineInfos map[string]extInfo
coreRelos map[string]bpfCoreRelos
coreRelos map[string]coreRelos
byteOrder binary.ByteOrder
}
@ -53,7 +53,7 @@ type btfHeader struct {
// LoadSpecFromReader reads BTF sections from an ELF.
//
// Returns a nil Spec and no error if no BTF was present.
// Returns ErrNotFound if the reader contains no BTF.
func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
file, err := internal.NewSafeELFFile(rd)
if err != nil {
@ -67,7 +67,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
}
if btfSection == nil {
return nil, nil
return nil, fmt.Errorf("btf: %w", ErrNotFound)
}
symbols, err := file.Symbols()
@ -377,7 +377,7 @@ func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
for _, raw := range s.rawTypes {
switch {
case opts.StripFuncLinkage && raw.Kind() == kindFunc:
raw.SetLinkage(linkageStatic)
raw.SetLinkage(StaticFunc)
}
if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
@ -438,13 +438,13 @@ func (s *Spec) Program(name string, length uint64) (*Program, error) {
funcInfos, funcOK := s.funcInfos[name]
lineInfos, lineOK := s.lineInfos[name]
coreRelos, coreOK := s.coreRelos[name]
relos, coreOK := s.coreRelos[name]
if !funcOK && !lineOK && !coreOK {
return nil, fmt.Errorf("no extended BTF info for section %s", name)
}
return &Program{s, length, funcInfos, lineInfos, coreRelos}, nil
return &Program{s, length, funcInfos, lineInfos, relos}, nil
}
// Datasec returns the BTF required to create maps which represent data sections.
@ -491,7 +491,8 @@ func (s *Spec) FindType(name string, typ Type) error {
return fmt.Errorf("type %s: %w", name, ErrNotFound)
}
value := reflect.Indirect(reflect.ValueOf(copyType(candidate)))
cpy, _ := copyType(candidate, nil)
value := reflect.Indirect(reflect.ValueOf(cpy))
reflect.Indirect(reflect.ValueOf(typ)).Set(value)
return nil
}
@ -606,7 +607,7 @@ type Program struct {
spec *Spec
length uint64
funcInfos, lineInfos extInfo
coreRelos bpfCoreRelos
coreRelos coreRelos
}
// ProgramSpec returns the Spec needed for loading function and line infos into the kernel.
@ -665,16 +666,23 @@ func ProgramLineInfos(s *Program) (recordSize uint32, bytes []byte, err error) {
return s.lineInfos.recordSize, bytes, nil
}
// ProgramRelocations returns the CO-RE relocations required to adjust the
// program to the target.
// ProgramFixups returns the changes required to adjust the program to the target.
//
// This is a free function instead of a method to hide it from users
// of package ebpf.
func ProgramRelocations(s *Program, target *Spec) (map[uint64]Relocation, error) {
func ProgramFixups(s *Program, target *Spec) (COREFixups, error) {
if len(s.coreRelos) == 0 {
return nil, nil
}
if target == nil {
var err error
target, err = LoadKernelSpec()
if err != nil {
return nil, err
}
}
return coreRelocate(s.spec, target, s.coreRelos)
}
@ -771,7 +779,7 @@ var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() err
types.Func.SetKind(kindFunc)
types.Func.SizeType = 1 // aka FuncProto
types.Func.NameOff = 1
types.Func.SetLinkage(linkageGlobal)
types.Func.SetLinkage(GlobalFunc)
btf := marshalBTF(&types, strings, internal.NativeEndian)

View file

@ -6,6 +6,8 @@ import (
"io"
)
//go:generate stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage
// btfKind describes a Type.
type btfKind uint8
@ -31,14 +33,23 @@ const (
kindDatasec
)
// btfFuncLinkage describes BTF function linkage metadata.
type btfFuncLinkage uint8
// FuncLinkage describes BTF function linkage metadata.
type FuncLinkage int
// Equivalent of enum btf_func_linkage.
const (
linkageStatic btfFuncLinkage = iota
linkageGlobal
// linkageExtern // Currently unused in libbpf.
StaticFunc FuncLinkage = iota // static
GlobalFunc // global
ExternFunc // extern
)
// VarLinkage describes BTF variable linkage metadata.
type VarLinkage int
const (
StaticVar VarLinkage = iota // static
GlobalVar // global
ExternVar // extern
)
const (
@ -144,11 +155,11 @@ func (bt *btfType) KindFlag() bool {
return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1
}
func (bt *btfType) Linkage() btfFuncLinkage {
return btfFuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
func (bt *btfType) Linkage() FuncLinkage {
return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
}
func (bt *btfType) SetLinkage(linkage btfFuncLinkage) {
func (bt *btfType) SetLinkage(linkage FuncLinkage) {
bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift)
}

View file

@ -0,0 +1,44 @@
// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage"; DO NOT EDIT.
package btf
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[StaticFunc-0]
_ = x[GlobalFunc-1]
_ = x[ExternFunc-2]
}
const _FuncLinkage_name = "staticglobalextern"
var _FuncLinkage_index = [...]uint8{0, 6, 12, 18}
func (i FuncLinkage) String() string {
if i < 0 || i >= FuncLinkage(len(_FuncLinkage_index)-1) {
return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _FuncLinkage_name[_FuncLinkage_index[i]:_FuncLinkage_index[i+1]]
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[StaticVar-0]
_ = x[GlobalVar-1]
_ = x[ExternVar-2]
}
const _VarLinkage_name = "staticglobalextern"
var _VarLinkage_index = [...]uint8{0, 6, 12, 18}
func (i VarLinkage) String() string {
if i < 0 || i >= VarLinkage(len(_VarLinkage_index)-1) {
return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]]
}

View file

@ -3,43 +3,160 @@ package btf
import (
"errors"
"fmt"
"math"
"reflect"
"sort"
"strconv"
"strings"
"github.com/cilium/ebpf/asm"
)
// Code in this file is derived from libbpf, which is available under a BSD
// 2-Clause license.
// Relocation describes a CO-RE relocation.
type Relocation struct {
Current uint32
New uint32
// COREFixup is the result of computing a CO-RE relocation for a target.
type COREFixup struct {
Kind COREKind
Local uint32
Target uint32
Poison bool
}
func (r Relocation) equal(other Relocation) bool {
return r.Current == other.Current && r.New == other.New
func (f COREFixup) equal(other COREFixup) bool {
return f.Local == other.Local && f.Target == other.Target
}
// coreReloKind is the type of CO-RE relocation
type coreReloKind uint32
func (f COREFixup) String() string {
if f.Poison {
return fmt.Sprintf("%s=poison", f.Kind)
}
return fmt.Sprintf("%s=%d->%d", f.Kind, f.Local, f.Target)
}
func (f COREFixup) apply(ins *asm.Instruction) error {
if f.Poison {
return errors.New("can't poison individual instruction")
}
switch class := ins.OpCode.Class(); class {
case asm.LdXClass, asm.StClass, asm.StXClass:
if want := int16(f.Local); want != ins.Offset {
return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, want)
}
if f.Target > math.MaxInt16 {
return fmt.Errorf("offset %d exceeds MaxInt16", f.Target)
}
ins.Offset = int16(f.Target)
case asm.LdClass:
if !ins.IsConstantLoad(asm.DWord) {
return fmt.Errorf("not a dword-sized immediate load")
}
if want := int64(f.Local); want != ins.Constant {
return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want)
}
ins.Constant = int64(f.Target)
case asm.ALUClass:
if ins.OpCode.ALUOp() == asm.Swap {
return fmt.Errorf("relocation against swap")
}
fallthrough
case asm.ALU64Class:
if src := ins.OpCode.Source(); src != asm.ImmSource {
return fmt.Errorf("invalid source %s", src)
}
if want := int64(f.Local); want != ins.Constant {
return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want)
}
if f.Target > math.MaxInt32 {
return fmt.Errorf("immediate %d exceeds MaxInt32", f.Target)
}
ins.Constant = int64(f.Target)
default:
return fmt.Errorf("invalid class %s", class)
}
return nil
}
func (f COREFixup) isNonExistant() bool {
return f.Kind.checksForExistence() && f.Target == 0
}
type COREFixups map[uint64]COREFixup
// Apply a set of CO-RE relocations to a BPF program.
func (fs COREFixups) Apply(insns asm.Instructions) (asm.Instructions, error) {
if len(fs) == 0 {
cpy := make(asm.Instructions, len(insns))
copy(cpy, insns)
return insns, nil
}
cpy := make(asm.Instructions, 0, len(insns))
iter := insns.Iterate()
for iter.Next() {
fixup, ok := fs[iter.Offset.Bytes()]
if !ok {
cpy = append(cpy, *iter.Ins)
continue
}
ins := *iter.Ins
if fixup.Poison {
const badRelo = asm.BuiltinFunc(0xbad2310)
cpy = append(cpy, badRelo.Call())
if ins.OpCode.IsDWordLoad() {
// 64 bit constant loads occupy two raw bpf instructions, so
// we need to add another instruction as padding.
cpy = append(cpy, badRelo.Call())
}
continue
}
if err := fixup.apply(&ins); err != nil {
return nil, fmt.Errorf("instruction %d, offset %d: %s: %w", iter.Index, iter.Offset.Bytes(), fixup.Kind, err)
}
cpy = append(cpy, ins)
}
return cpy, nil
}
// COREKind is the type of CO-RE relocation
type COREKind uint32
const (
reloFieldByteOffset coreReloKind = iota /* field byte offset */
reloFieldByteSize /* field size in bytes */
reloFieldExists /* field existence in target kernel */
reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
reloFieldLShiftU64 /* bitfield-specific left bitshift */
reloFieldRShiftU64 /* bitfield-specific right bitshift */
reloTypeIDLocal /* type ID in local BPF object */
reloTypeIDTarget /* type ID in target kernel */
reloTypeExists /* type existence in target kernel */
reloTypeSize /* type size in bytes */
reloEnumvalExists /* enum value existence in target kernel */
reloEnumvalValue /* enum value integer value */
reloFieldByteOffset COREKind = iota /* field byte offset */
reloFieldByteSize /* field size in bytes */
reloFieldExists /* field existence in target kernel */
reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
reloFieldLShiftU64 /* bitfield-specific left bitshift */
reloFieldRShiftU64 /* bitfield-specific right bitshift */
reloTypeIDLocal /* type ID in local BPF object */
reloTypeIDTarget /* type ID in target kernel */
reloTypeExists /* type existence in target kernel */
reloTypeSize /* type size in bytes */
reloEnumvalExists /* enum value existence in target kernel */
reloEnumvalValue /* enum value integer value */
)
func (k coreReloKind) String() string {
func (k COREKind) String() string {
switch k {
case reloFieldByteOffset:
return "byte_off"
@ -70,103 +187,249 @@ func (k coreReloKind) String() string {
}
}
func coreRelocate(local, target *Spec, coreRelos bpfCoreRelos) (map[uint64]Relocation, error) {
if target == nil {
var err error
target, err = loadKernelSpec()
if err != nil {
return nil, err
}
}
func (k COREKind) checksForExistence() bool {
return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists
}
func coreRelocate(local, target *Spec, relos coreRelos) (COREFixups, error) {
if local.byteOrder != target.byteOrder {
return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
}
relocations := make(map[uint64]Relocation, len(coreRelos))
for _, relo := range coreRelos {
accessorStr, err := local.strings.Lookup(relo.AccessStrOff)
if err != nil {
return nil, err
}
var ids []TypeID
relosByID := make(map[TypeID]coreRelos)
result := make(COREFixups, len(relos))
for _, relo := range relos {
if relo.kind == reloTypeIDLocal {
// Filtering out reloTypeIDLocal here makes our lives a lot easier
// down the line, since it doesn't have a target at all.
if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
}
accessor, err := parseCoreAccessor(accessorStr)
if err != nil {
return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
}
if int(relo.TypeID) >= len(local.types) {
return nil, fmt.Errorf("invalid type id %d", relo.TypeID)
}
typ := local.types[relo.TypeID]
if relo.ReloKind == reloTypeIDLocal {
relocations[uint64(relo.InsnOff)] = Relocation{
uint32(typ.ID()),
uint32(typ.ID()),
result[uint64(relo.insnOff)] = COREFixup{
relo.kind,
uint32(relo.typeID),
uint32(relo.typeID),
false,
}
continue
}
named, ok := typ.(namedType)
if !ok || named.name() == "" {
return nil, fmt.Errorf("relocate anonymous type %s: %w", typ.String(), ErrNotSupported)
relos, ok := relosByID[relo.typeID]
if !ok {
ids = append(ids, relo.typeID)
}
name := essentialName(named.name())
res, err := coreCalculateRelocation(typ, target.namedTypes[name], relo.ReloKind, accessor)
if err != nil {
return nil, fmt.Errorf("relocate %s: %w", name, err)
}
relocations[uint64(relo.InsnOff)] = res
relosByID[relo.typeID] = append(relos, relo)
}
return relocations, nil
// Ensure we work on relocations in a deterministic order.
sort.Slice(ids, func(i, j int) bool {
return ids[i] < ids[j]
})
for _, id := range ids {
if int(id) >= len(local.types) {
return nil, fmt.Errorf("invalid type id %d", id)
}
localType := local.types[id]
named, ok := localType.(namedType)
if !ok || named.name() == "" {
return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported)
}
relos := relosByID[id]
targets := target.namedTypes[named.essentialName()]
fixups, err := coreCalculateFixups(localType, targets, relos)
if err != nil {
return nil, fmt.Errorf("relocate %s: %w", localType, err)
}
for i, relo := range relos {
result[uint64(relo.insnOff)] = fixups[i]
}
}
return result, nil
}
var errAmbiguousRelocation = errors.New("ambiguous relocation")
var errImpossibleRelocation = errors.New("impossible relocation")
// coreCalculateFixups calculates the fixups for the given relocations using
// the "best" target.
//
// The best target is determined by scoring: the less poisoning we have to do
// the better the target is.
func coreCalculateFixups(local Type, targets []namedType, relos coreRelos) ([]COREFixup, error) {
localID := local.ID()
local, err := copyType(local, skipQualifierAndTypedef)
if err != nil {
return nil, err
}
bestScore := len(relos)
var bestFixups []COREFixup
for i := range targets {
targetID := targets[i].ID()
target, err := copyType(targets[i], skipQualifierAndTypedef)
if err != nil {
return nil, err
}
score := 0 // lower is better
fixups := make([]COREFixup, 0, len(relos))
for _, relo := range relos {
fixup, err := coreCalculateFixup(local, localID, target, targetID, relo)
if err != nil {
return nil, fmt.Errorf("target %s: %w", target, err)
}
if fixup.Poison || fixup.isNonExistant() {
score++
}
fixups = append(fixups, fixup)
}
if score > bestScore {
// We have a better target already, ignore this one.
continue
}
if score < bestScore {
// This is the best target yet, use it.
bestScore = score
bestFixups = fixups
continue
}
// Some other target has the same score as the current one. Make sure
// the fixups agree with each other.
for i, fixup := range bestFixups {
if !fixup.equal(fixups[i]) {
return nil, fmt.Errorf("%s: multiple types match: %w", fixup.Kind, errAmbiguousRelocation)
}
}
}
if bestFixups == nil {
// Nothing at all matched, probably because there are no suitable
// targets at all. Poison everything!
bestFixups = make([]COREFixup, len(relos))
for i, relo := range relos {
bestFixups[i] = COREFixup{Kind: relo.kind, Poison: true}
}
}
return bestFixups, nil
}
// coreCalculateFixup calculates the fixup for a single local type, target type
// and relocation.
func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID, relo coreRelo) (COREFixup, error) {
fixup := func(local, target uint32) (COREFixup, error) {
return COREFixup{relo.kind, local, target, false}, nil
}
poison := func() (COREFixup, error) {
if relo.kind.checksForExistence() {
return fixup(1, 0)
}
return COREFixup{relo.kind, 0, 0, true}, nil
}
zero := COREFixup{}
switch relo.kind {
case reloTypeIDTarget, reloTypeSize, reloTypeExists:
if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
}
err := coreAreTypesCompatible(local, target)
if errors.Is(err, errImpossibleRelocation) {
return poison()
}
if err != nil {
return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
}
switch relo.kind {
case reloTypeExists:
return fixup(1, 1)
func coreCalculateRelocation(local Type, targets []namedType, kind coreReloKind, localAccessor coreAccessor) (Relocation, error) {
var relos []Relocation
var matches []Type
for _, target := range targets {
switch kind {
case reloTypeIDTarget:
if localAccessor[0] != 0 {
return Relocation{}, fmt.Errorf("%s: unexpected non-zero accessor", kind)
return fixup(uint32(localID), uint32(targetID))
case reloTypeSize:
localSize, err := Sizeof(local)
if err != nil {
return zero, err
}
if compat, err := coreAreTypesCompatible(local, target); err != nil {
return Relocation{}, fmt.Errorf("%s: %s", kind, err)
} else if !compat {
continue
targetSize, err := Sizeof(target)
if err != nil {
return zero, err
}
relos = append(relos, Relocation{uint32(target.ID()), uint32(target.ID())})
default:
return Relocation{}, fmt.Errorf("relocation %s: %w", kind, ErrNotSupported)
return fixup(uint32(localSize), uint32(targetSize))
}
matches = append(matches, target)
}
if len(relos) == 0 {
// TODO: Add switch for existence checks like reloEnumvalExists here.
case reloEnumvalValue, reloEnumvalExists:
localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target)
if errors.Is(err, errImpossibleRelocation) {
return poison()
}
if err != nil {
return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
}
// TODO: This might have to be poisoned.
return Relocation{}, fmt.Errorf("no relocation found, tried %v", targets)
}
switch relo.kind {
case reloEnumvalExists:
return fixup(1, 1)
case reloEnumvalValue:
return fixup(uint32(localValue.Value), uint32(targetValue.Value))
}
case reloFieldByteOffset, reloFieldByteSize, reloFieldExists:
if _, ok := target.(*Fwd); ok {
// We can't relocate fields using a forward declaration, so
// skip it. If a non-forward declaration is present in the BTF
// we'll find it in one of the other iterations.
return poison()
}
localField, targetField, err := coreFindField(local, relo.accessor, target)
if errors.Is(err, errImpossibleRelocation) {
return poison()
}
if err != nil {
return zero, fmt.Errorf("target %s: %w", target, err)
}
switch relo.kind {
case reloFieldExists:
return fixup(1, 1)
case reloFieldByteOffset:
return fixup(localField.offset/8, targetField.offset/8)
case reloFieldByteSize:
localSize, err := Sizeof(localField.Type)
if err != nil {
return zero, err
}
targetSize, err := Sizeof(targetField.Type)
if err != nil {
return zero, err
}
return fixup(uint32(localSize), uint32(targetSize))
relo := relos[0]
for _, altRelo := range relos[1:] {
if !altRelo.equal(relo) {
return Relocation{}, fmt.Errorf("multiple types %v match: %w", matches, errAmbiguousRelocation)
}
}
return relo, nil
return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported)
}
/* coreAccessor contains a path through a struct. It contains at least one index.
@ -219,6 +482,240 @@ func parseCoreAccessor(accessor string) (coreAccessor, error) {
return result, nil
}
func (ca coreAccessor) String() string {
strs := make([]string, 0, len(ca))
for _, i := range ca {
strs = append(strs, strconv.Itoa(i))
}
return strings.Join(strs, ":")
}
func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
e, ok := t.(*Enum)
if !ok {
return nil, fmt.Errorf("not an enum: %s", t)
}
if len(ca) > 1 {
return nil, fmt.Errorf("invalid accessor %s for enum", ca)
}
i := ca[0]
if i >= len(e.Values) {
return nil, fmt.Errorf("invalid index %d for %s", i, e)
}
return &e.Values[i], nil
}
type coreField struct {
Type Type
offset uint32
}
func adjustOffset(base uint32, t Type, n int) (uint32, error) {
size, err := Sizeof(t)
if err != nil {
return 0, err
}
return base + (uint32(n) * uint32(size) * 8), nil
}
// coreFindField descends into the local type using the accessor and tries to
// find an equivalent field in target at each step.
//
// Returns the field and the offset of the field from the start of
// target in bits.
func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreField, _ error) {
// The first index is used to offset a pointer of the base type like
// when accessing an array.
localOffset, err := adjustOffset(0, local, localAcc[0])
if err != nil {
return coreField{}, coreField{}, err
}
targetOffset, err := adjustOffset(0, target, localAcc[0])
if err != nil {
return coreField{}, coreField{}, err
}
if err := coreAreMembersCompatible(local, target); err != nil {
return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
}
var localMaybeFlex, targetMaybeFlex bool
for _, acc := range localAcc[1:] {
switch localType := local.(type) {
case composite:
// For composite types acc is used to find the field in the local type,
// and then we try to find a field in target with the same name.
localMembers := localType.members()
if acc >= len(localMembers) {
return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, local)
}
localMember := localMembers[acc]
if localMember.Name == "" {
_, ok := localMember.Type.(composite)
if !ok {
return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported)
}
// This is an anonymous struct or union, ignore it.
local = localMember.Type
localOffset += localMember.Offset
localMaybeFlex = false
continue
}
targetType, ok := target.(composite)
if !ok {
return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
}
targetMember, last, err := coreFindMember(targetType, localMember.Name)
if err != nil {
return coreField{}, coreField{}, err
}
if targetMember.BitfieldSize > 0 {
return coreField{}, coreField{}, fmt.Errorf("field %q is a bitfield: %w", targetMember.Name, ErrNotSupported)
}
local = localMember.Type
localMaybeFlex = acc == len(localMembers)-1
localOffset += localMember.Offset
target = targetMember.Type
targetMaybeFlex = last
targetOffset += targetMember.Offset
case *Array:
// For arrays, acc is the index in the target.
targetType, ok := target.(*Array)
if !ok {
return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
}
if localType.Nelems == 0 && !localMaybeFlex {
return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array")
}
if targetType.Nelems == 0 && !targetMaybeFlex {
return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array")
}
if localType.Nelems > 0 && acc >= int(localType.Nelems) {
return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc)
}
if targetType.Nelems > 0 && acc >= int(targetType.Nelems) {
return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation)
}
local = localType.Type
localMaybeFlex = false
localOffset, err = adjustOffset(localOffset, local, acc)
if err != nil {
return coreField{}, coreField{}, err
}
target = targetType.Type
targetMaybeFlex = false
targetOffset, err = adjustOffset(targetOffset, target, acc)
if err != nil {
return coreField{}, coreField{}, err
}
default:
return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported)
}
if err := coreAreMembersCompatible(local, target); err != nil {
return coreField{}, coreField{}, err
}
}
return coreField{local, localOffset}, coreField{target, targetOffset}, nil
}
// coreFindMember finds a member in a composite type while handling anonymous
// structs and unions.
func coreFindMember(typ composite, name Name) (Member, bool, error) {
if name == "" {
return Member{}, false, errors.New("can't search for anonymous member")
}
type offsetTarget struct {
composite
offset uint32
}
targets := []offsetTarget{{typ, 0}}
visited := make(map[composite]bool)
for i := 0; i < len(targets); i++ {
target := targets[i]
// Only visit targets once to prevent infinite recursion.
if visited[target] {
continue
}
if len(visited) >= maxTypeDepth {
// This check is different than libbpf, which restricts the entire
// path to BPF_CORE_SPEC_MAX_LEN items.
return Member{}, false, fmt.Errorf("type is nested too deep")
}
visited[target] = true
members := target.members()
for j, member := range members {
if member.Name == name {
// NB: This is safe because member is a copy.
member.Offset += target.offset
return member, j == len(members)-1, nil
}
// The names don't match, but this member could be an anonymous struct
// or union.
if member.Name != "" {
continue
}
comp, ok := member.Type.(composite)
if !ok {
return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
}
targets = append(targets, offsetTarget{comp, target.offset + member.Offset})
}
}
return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation)
}
// coreFindEnumValue follows localAcc to find the equivalent enum value in target.
func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) {
localValue, err := localAcc.enumValue(local)
if err != nil {
return nil, nil, err
}
targetEnum, ok := target.(*Enum)
if !ok {
return nil, nil, errImpossibleRelocation
}
localName := localValue.Name.essentialName()
for i, targetValue := range targetEnum.Values {
if targetValue.Name.essentialName() != localName {
continue
}
return localValue, &targetEnum.Values[i], nil
}
return nil, nil, errImpossibleRelocation
}
/* The comment below is from bpf_core_types_are_compat in libbpf.c:
*
* Check local and target types for compatibility. This check is used for
@ -239,8 +736,10 @@ func parseCoreAccessor(accessor string) (coreAccessor, error) {
* number of input args and compatible return and argument types.
* These rules are not set in stone and probably will be adjusted as we get
* more experience with using BPF CO-RE relocations.
*
* Returns errImpossibleRelocation if types are not compatible.
*/
func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
func coreAreTypesCompatible(localType Type, targetType Type) error {
var (
localTs, targetTs typeDeque
l, t = &localType, &targetType
@ -249,14 +748,14 @@ func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
if depth >= maxTypeDepth {
return false, errors.New("types are nested too deep")
return errors.New("types are nested too deep")
}
localType = skipQualifierAndTypedef(*l)
targetType = skipQualifierAndTypedef(*t)
localType = *l
targetType = *t
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
return false, nil
return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
}
switch lv := (localType).(type) {
@ -266,7 +765,7 @@ func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
case *Int:
tv := targetType.(*Int)
if lv.isBitfield() || tv.isBitfield() {
return false, nil
return fmt.Errorf("bitfield: %w", errImpossibleRelocation)
}
case *Pointer, *Array:
@ -277,7 +776,7 @@ func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
case *FuncProto:
tv := targetType.(*FuncProto)
if len(lv.Params) != len(tv.Params) {
return false, nil
return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation)
}
depth++
@ -285,22 +784,24 @@ func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
targetType.walk(&targetTs)
default:
return false, fmt.Errorf("unsupported type %T", localType)
return fmt.Errorf("unsupported type %T", localType)
}
}
if l != nil {
return false, fmt.Errorf("dangling local type %T", *l)
return fmt.Errorf("dangling local type %T", *l)
}
if t != nil {
return false, fmt.Errorf("dangling target type %T", *t)
return fmt.Errorf("dangling target type %T", *t)
}
return true, nil
return nil
}
/* The comment below is from bpf_core_fields_are_compat in libbpf.c:
/* coreAreMembersCompatible checks two types for field-based relocation compatibility.
*
* The comment below is from bpf_core_fields_are_compat in libbpf.c:
*
* Check two types for compatibility for the purpose of field access
* relocation. const/volatile/restrict and typedefs are skipped to ensure we
@ -314,65 +815,63 @@ func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
* - for INT, size and signedness are ignored;
* - for ARRAY, dimensionality is ignored, element types are checked for
* compatibility recursively;
* [ NB: coreAreMembersCompatible doesn't recurse, this check is done
* by coreFindField. ]
* - everything else shouldn't be ever a target of relocation.
* These rules are not set in stone and probably will be adjusted as we get
* more experience with using BPF CO-RE relocations.
*
* Returns errImpossibleRelocation if the members are not compatible.
*/
func coreAreMembersCompatible(localType Type, targetType Type) (bool, error) {
doNamesMatch := func(a, b string) bool {
func coreAreMembersCompatible(localType Type, targetType Type) error {
doNamesMatch := func(a, b string) error {
if a == "" || b == "" {
// allow anonymous and named type to match
return true
return nil
}
return essentialName(a) == essentialName(b)
if essentialName(a) == essentialName(b) {
return nil
}
return fmt.Errorf("names don't match: %w", errImpossibleRelocation)
}
for depth := 0; depth <= maxTypeDepth; depth++ {
localType = skipQualifierAndTypedef(localType)
targetType = skipQualifierAndTypedef(targetType)
_, lok := localType.(composite)
_, tok := targetType.(composite)
if lok && tok {
return true, nil
}
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
return false, nil
}
switch lv := localType.(type) {
case *Pointer:
return true, nil
case *Enum:
tv := targetType.(*Enum)
return doNamesMatch(lv.name(), tv.name()), nil
case *Fwd:
tv := targetType.(*Fwd)
return doNamesMatch(lv.name(), tv.name()), nil
case *Int:
tv := targetType.(*Int)
return !lv.isBitfield() && !tv.isBitfield(), nil
case *Array:
tv := targetType.(*Array)
localType = lv.Type
targetType = tv.Type
default:
return false, fmt.Errorf("unsupported type %T", localType)
}
_, lok := localType.(composite)
_, tok := targetType.(composite)
if lok && tok {
return nil
}
return false, errors.New("types are nested too deep")
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
}
switch lv := localType.(type) {
case *Array, *Pointer:
return nil
case *Enum:
tv := targetType.(*Enum)
return doNamesMatch(lv.name(), tv.name())
case *Fwd:
tv := targetType.(*Fwd)
return doNamesMatch(lv.name(), tv.name())
case *Int:
tv := targetType.(*Int)
if lv.isBitfield() || tv.isBitfield() {
return fmt.Errorf("bitfield: %w", errImpossibleRelocation)
}
return nil
default:
return fmt.Errorf("type %s: %w", localType, ErrNotSupported)
}
}
func skipQualifierAndTypedef(typ Type) Type {
func skipQualifierAndTypedef(typ Type) (Type, error) {
result := typ
for depth := 0; depth <= maxTypeDepth; depth++ {
switch v := (result).(type) {
@ -381,8 +880,8 @@ func skipQualifierAndTypedef(typ Type) Type {
case *Typedef:
result = v.Type
default:
return result
return result, nil
}
}
return typ
return nil, errors.New("exceeded type depth")
}

View file

@ -30,7 +30,7 @@ type btfExtCoreHeader struct {
CoreReloLen uint32
}
func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, coreRelos map[string]bpfCoreRelos, err error) {
func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, relos map[string]coreRelos, err error) {
var header btfExtHeader
var coreHeader btfExtCoreHeader
if err := binary.Read(r, bo, &header); err != nil {
@ -94,13 +94,13 @@ func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (f
return nil, nil, nil, fmt.Errorf("can't seek to CO-RE relocation section: %v", err)
}
coreRelos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings)
relos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings)
if err != nil {
return nil, nil, nil, fmt.Errorf("CO-RE relocation info: %w", err)
}
}
return funcInfo, lineInfo, coreRelos, nil
return funcInfo, lineInfo, relos, nil
}
type btfExtInfoSec struct {
@ -208,18 +208,25 @@ type bpfCoreRelo struct {
InsnOff uint32
TypeID TypeID
AccessStrOff uint32
ReloKind coreReloKind
Kind COREKind
}
type bpfCoreRelos []bpfCoreRelo
type coreRelo struct {
insnOff uint32
typeID TypeID
accessor coreAccessor
kind COREKind
}
type coreRelos []coreRelo
// append two slices of extInfoRelo to each other. The InsnOff of b are adjusted
// by offset.
func (r bpfCoreRelos) append(other bpfCoreRelos, offset uint64) bpfCoreRelos {
result := make([]bpfCoreRelo, 0, len(r)+len(other))
func (r coreRelos) append(other coreRelos, offset uint64) coreRelos {
result := make([]coreRelo, 0, len(r)+len(other))
result = append(result, r...)
for _, relo := range other {
relo.InsnOff += uint32(offset)
relo.insnOff += uint32(offset)
result = append(result, relo)
}
return result
@ -227,7 +234,7 @@ func (r bpfCoreRelos) append(other bpfCoreRelos, offset uint64) bpfCoreRelos {
var extInfoReloSize = binary.Size(bpfCoreRelo{})
func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]bpfCoreRelos, error) {
func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]coreRelos, error) {
var recordSize uint32
if err := binary.Read(r, bo, &recordSize); err != nil {
return nil, fmt.Errorf("read record size: %v", err)
@ -237,14 +244,14 @@ func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (m
return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
}
result := make(map[string]bpfCoreRelos)
result := make(map[string]coreRelos)
for {
secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
if errors.Is(err, io.EOF) {
return result, nil
}
var relos []bpfCoreRelo
var relos coreRelos
for i := uint32(0); i < infoHeader.NumInfo; i++ {
var relo bpfCoreRelo
if err := binary.Read(r, bo, &relo); err != nil {
@ -255,7 +262,22 @@ func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (m
return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, relo.InsnOff)
}
relos = append(relos, relo)
accessorStr, err := strings.Lookup(relo.AccessStrOff)
if err != nil {
return nil, err
}
accessor, err := parseCoreAccessor(accessorStr)
if err != nil {
return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
}
relos = append(relos, coreRelo{
relo.InsnOff,
relo.TypeID,
accessor,
relo.Kind,
})
}
result[secName] = relos

View file

@ -1,7 +1,6 @@
package btf
import (
"errors"
"fmt"
"math"
"strings"
@ -37,6 +36,7 @@ type Type interface {
type namedType interface {
Type
name() string
essentialName() string
}
// Name identifies a type.
@ -48,6 +48,10 @@ func (n Name) name() string {
return string(n)
}
func (n Name) essentialName() string {
return essentialName(string(n))
}
// Void is the unit type of BTF.
type Void struct{}
@ -174,8 +178,7 @@ func (s *Struct) walk(tdq *typeDeque) {
func (s *Struct) copy() Type {
cpy := *s
cpy.Members = make([]Member, len(s.Members))
copy(cpy.Members, s.Members)
cpy.Members = copyMembers(s.Members)
return &cpy
}
@ -206,8 +209,7 @@ func (u *Union) walk(tdq *typeDeque) {
func (u *Union) copy() Type {
cpy := *u
cpy.Members = make([]Member, len(u.Members))
copy(cpy.Members, u.Members)
cpy.Members = copyMembers(u.Members)
return &cpy
}
@ -215,6 +217,12 @@ func (u *Union) members() []Member {
return u.Members
}
func copyMembers(orig []Member) []Member {
cpy := make([]Member, len(orig))
copy(cpy, orig)
return cpy
}
type composite interface {
members() []Member
}
@ -372,11 +380,12 @@ func (r *Restrict) copy() Type {
type Func struct {
TypeID
Name
Type Type
Type Type
Linkage FuncLinkage
}
func (f *Func) String() string {
return fmt.Sprintf("func#%d[%q proto=#%d]", f.TypeID, f.Name, f.Type.ID())
return fmt.Sprintf("func#%d[%s %q proto=#%d]", f.TypeID, f.Linkage, f.Name, f.Type.ID())
}
func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) }
@ -425,12 +434,12 @@ type FuncParam struct {
type Var struct {
TypeID
Name
Type Type
Type Type
Linkage VarLinkage
}
func (v *Var) String() string {
// TODO: Linkage
return fmt.Sprintf("var#%d[%q]", v.TypeID, v.Name)
return fmt.Sprintf("var#%d[%s %q]", v.TypeID, v.Linkage, v.Name)
}
func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) }
@ -511,7 +520,7 @@ func Sizeof(typ Type) (int, error) {
switch v := typ.(type) {
case *Array:
if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
return 0, errors.New("overflow")
return 0, fmt.Errorf("type %s: overflow", typ)
}
// Arrays may be of zero length, which allows
@ -532,28 +541,30 @@ func Sizeof(typ Type) (int, error) {
continue
default:
return 0, fmt.Errorf("unrecognized type %T", typ)
return 0, fmt.Errorf("unsized type %T", typ)
}
if n > 0 && elem > math.MaxInt64/n {
return 0, errors.New("overflow")
return 0, fmt.Errorf("type %s: overflow", typ)
}
size := n * elem
if int64(int(size)) != size {
return 0, errors.New("overflow")
return 0, fmt.Errorf("type %s: overflow", typ)
}
return int(size), nil
}
return 0, errors.New("exceeded type depth")
return 0, fmt.Errorf("type %s: exceeded type depth", typ)
}
// copy a Type recursively.
//
// typ may form a cycle.
func copyType(typ Type) Type {
//
// Returns any errors from transform verbatim.
func copyType(typ Type, transform func(Type) (Type, error)) (Type, error) {
var (
copies = make(map[Type]Type)
work typeDeque
@ -566,7 +577,17 @@ func copyType(typ Type) Type {
continue
}
cpy := (*t).copy()
var cpy Type
if transform != nil {
tf, err := transform(*t)
if err != nil {
return nil, fmt.Errorf("copy %s: %w", typ, err)
}
cpy = tf.copy()
} else {
cpy = (*t).copy()
}
copies[*t] = cpy
*t = cpy
@ -574,7 +595,7 @@ func copyType(typ Type) Type {
cpy.walk(&work)
}
return typ
return typ, nil
}
// typeDeque keeps track of pointers to types which still
@ -783,7 +804,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type,
typ = restrict
case kindFunc:
fn := &Func{id, name, nil}
fn := &Func{id, name, nil, raw.Linkage()}
fixup(raw.Type(), kindFuncProto, &fn.Type)
typ = fn
@ -808,7 +829,8 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type,
typ = fp
case kindVar:
v := &Var{id, name, nil}
variable := raw.data.(*btfVariable)
v := &Var{id, name, nil, VarLinkage(variable.Linkage)}
fixup(raw.Type(), kindUnknown, &v.Type)
typ = v

View file

@ -50,3 +50,19 @@ func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) {
syms, err = se.File.Symbols()
return
}
// DynamicSymbols is the safe version of elf.File.DynamicSymbols.
func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) {
defer func() {
r := recover()
if r == nil {
return
}
syms = nil
err = fmt.Errorf("reading ELF dynamic symbols panicked: %s", r)
}()
syms, err = se.File.DynamicSymbols()
return
}

View file

@ -9,11 +9,16 @@ import (
// depending on the host's endianness.
var NativeEndian binary.ByteOrder
// Clang is set to either "el" or "eb" depending on the host's endianness.
var ClangEndian string
func init() {
if isBigEndian() {
NativeEndian = binary.BigEndian
ClangEndian = "eb"
} else {
NativeEndian = binary.LittleEndian
ClangEndian = "el"
}
}

View file

@ -29,6 +29,10 @@ type VerifierError struct {
log string
}
func (le *VerifierError) Unwrap() error {
return le.cause
}
func (le *VerifierError) Error() string {
if le.log == "" {
return le.cause.Error()

View file

@ -22,10 +22,6 @@ func NewSlicePointer(buf []byte) Pointer {
// NewStringPointer creates a 64-bit pointer from a string.
func NewStringPointer(str string) Pointer {
if str == "" {
return Pointer{}
}
p, err := unix.BytePtrFromString(str)
if err != nil {
return Pointer{}

View file

@ -4,6 +4,7 @@ import (
"fmt"
"path/filepath"
"runtime"
"syscall"
"unsafe"
"github.com/cilium/ebpf/internal/unix"
@ -61,7 +62,7 @@ func BPF(cmd BPFCmd, attr unsafe.Pointer, size uintptr) (uintptr, error) {
var err error
if errNo != 0 {
err = errNo
err = wrappedErrno{errNo}
}
return r1, err
@ -178,3 +179,67 @@ func BPFObjGetInfoByFD(fd *FD, info unsafe.Pointer, size uintptr) error {
}
return nil
}
// BPFObjName is a null-terminated string made up of
// 'A-Za-z0-9_' characters.
type BPFObjName [unix.BPF_OBJ_NAME_LEN]byte
// NewBPFObjName truncates the result if it is too long.
func NewBPFObjName(name string) BPFObjName {
var result BPFObjName
copy(result[:unix.BPF_OBJ_NAME_LEN-1], name)
return result
}
type BPFMapCreateAttr struct {
MapType uint32
KeySize uint32
ValueSize uint32
MaxEntries uint32
Flags uint32
InnerMapFd uint32 // since 4.12 56f668dfe00d
NumaNode uint32 // since 4.14 96eabe7a40aa
MapName BPFObjName // since 4.15 ad5b177bd73f
MapIfIndex uint32
BTFFd uint32
BTFKeyTypeID uint32
BTFValueTypeID uint32
}
func BPFMapCreate(attr *BPFMapCreateAttr) (*FD, error) {
fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
if err != nil {
return nil, err
}
return NewFD(uint32(fd)), nil
}
// wrappedErrno wraps syscall.Errno to prevent direct comparisons with
// syscall.E* or unix.E* constants.
//
// You should never export an error of this type.
type wrappedErrno struct {
syscall.Errno
}
func (we wrappedErrno) Unwrap() error {
return we.Errno
}
type syscallError struct {
error
errno syscall.Errno
}
func SyscallError(err error, errno syscall.Errno) error {
return &syscallError{err, errno}
}
func (se *syscallError) Is(target error) bool {
return target == se.error
}
func (se *syscallError) Unwrap() error {
return se.errno
}

View file

@ -31,6 +31,8 @@ const (
BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG
BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG
BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE
BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE
BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP
BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN
BPF_TAG_SIZE = linux.BPF_TAG_SIZE
SYS_BPF = linux.SYS_BPF
@ -42,6 +44,7 @@ const (
PROT_READ = linux.PROT_READ
PROT_WRITE = linux.PROT_WRITE
MAP_SHARED = linux.MAP_SHARED
PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1
PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE
PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT
PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT

View file

@ -31,6 +31,8 @@ const (
BPF_F_RDONLY_PROG = 0
BPF_F_WRONLY_PROG = 0
BPF_F_SLEEPABLE = 0
BPF_F_MMAPABLE = 0
BPF_F_INNER_MAP = 0
BPF_OBJ_NAME_LEN = 0x10
BPF_TAG_SIZE = 0x8
SYS_BPF = 321
@ -43,6 +45,7 @@ const (
PROT_READ = 0x1
PROT_WRITE = 0x2
MAP_SHARED = 0x1
PERF_ATTR_SIZE_VER1 = 0
PERF_TYPE_SOFTWARE = 0x1
PERF_TYPE_TRACEPOINT = 0
PERF_COUNT_SW_BPF_OUTPUT = 0xa

View file

@ -3,8 +3,10 @@ package link
import (
"fmt"
"io"
"unsafe"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/internal"
)
type IterOptions struct {
@ -15,19 +17,45 @@ type IterOptions struct {
// AttachTo requires the kernel to include BTF of itself,
// and it to be compiled with a recent pahole (>= 1.16).
Program *ebpf.Program
// Map specifies the target map for bpf_map_elem and sockmap iterators.
// It may be nil.
Map *ebpf.Map
}
// AttachIter attaches a BPF seq_file iterator.
func AttachIter(opts IterOptions) (*Iter, error) {
link, err := AttachRawLink(RawLinkOptions{
Program: opts.Program,
Attach: ebpf.AttachTraceIter,
})
if err := haveBPFLink(); err != nil {
return nil, err
}
progFd := opts.Program.FD()
if progFd < 0 {
return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd)
}
var info bpfIterLinkInfoMap
if opts.Map != nil {
mapFd := opts.Map.FD()
if mapFd < 0 {
return nil, fmt.Errorf("invalid map: %w", internal.ErrClosedFd)
}
info.map_fd = uint32(mapFd)
}
attr := bpfLinkCreateIterAttr{
prog_fd: uint32(progFd),
attach_type: ebpf.AttachTraceIter,
iter_info: internal.NewPointer(unsafe.Pointer(&info)),
iter_info_len: uint32(unsafe.Sizeof(info)),
}
fd, err := bpfLinkCreateIter(&attr)
if err != nil {
return nil, fmt.Errorf("can't link iterator: %w", err)
}
return &Iter{*link}, err
return &Iter{RawLink{fd, ""}}, err
}
// LoadPinnedIter loads a pinned iterator from a bpffs.
@ -65,3 +93,8 @@ func (it *Iter) Open() (io.ReadCloser, error) {
return fd.File("bpf_iter"), nil
}
// union bpf_iter_link_info.map
type bpfIterLinkInfoMap struct {
map_fd uint32
}

View file

@ -1,12 +1,16 @@
package link
import (
"bytes"
"crypto/rand"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"sync"
"unsafe"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/internal"
@ -15,13 +19,60 @@ import (
var (
kprobeEventsPath = filepath.Join(tracefsPath, "kprobe_events")
kprobeRetprobeBit = struct {
once sync.Once
value uint64
err error
}{}
)
type probeType uint8
const (
kprobeType probeType = iota
uprobeType
)
func (pt probeType) String() string {
if pt == kprobeType {
return "kprobe"
}
return "uprobe"
}
func (pt probeType) EventsPath() string {
if pt == kprobeType {
return kprobeEventsPath
}
return uprobeEventsPath
}
func (pt probeType) PerfEventType(ret bool) perfEventType {
if pt == kprobeType {
if ret {
return kretprobeEvent
}
return kprobeEvent
}
if ret {
return uretprobeEvent
}
return uprobeEvent
}
func (pt probeType) RetprobeBit() (uint64, error) {
if pt == kprobeType {
return kretprobeBit()
}
return uretprobeBit()
}
// Kprobe attaches the given eBPF program to a perf event that fires when the
// given kernel symbol starts executing. See /proc/kallsyms for available
// symbols. For example, printk():
//
// Kprobe("printk")
// Kprobe("printk", prog)
//
// The resulting Link must be Closed during program shutdown to avoid leaking
// system resources.
@ -44,7 +95,7 @@ func Kprobe(symbol string, prog *ebpf.Program) (Link, error) {
// before the given kernel symbol exits, with the function stack left intact.
// See /proc/kallsyms for available symbols. For example, printk():
//
// Kretprobe("printk")
// Kretprobe("printk", prog)
//
// The resulting Link must be Closed during program shutdown to avoid leaking
// system resources.
@ -80,7 +131,10 @@ func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) {
}
// Use kprobe PMU if the kernel has it available.
tp, err := pmuKprobe(symbol, ret)
tp, err := pmuKprobe(platformPrefix(symbol), ret)
if errors.Is(err, os.ErrNotExist) {
tp, err = pmuKprobe(symbol, ret)
}
if err == nil {
return tp, nil
}
@ -89,7 +143,10 @@ func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) {
}
// Use tracefs if kprobe PMU is missing.
tp, err = tracefsKprobe(symbol, ret)
tp, err = tracefsKprobe(platformPrefix(symbol), ret)
if errors.Is(err, os.ErrNotExist) {
tp, err = tracefsKprobe(symbol, ret)
}
if err != nil {
return nil, fmt.Errorf("creating trace event '%s' in tracefs: %w", symbol, err)
}
@ -97,36 +154,70 @@ func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) {
return tp, nil
}
// pmuKprobe opens a perf event based on a Performance Monitoring Unit.
// Requires at least 4.17 (e12f03d7031a "perf/core: Implement the
// 'perf_kprobe' PMU").
// Returns ErrNotSupported if the kernel doesn't support perf_kprobe PMU,
// or os.ErrNotExist if the given symbol does not exist in the kernel.
// pmuKprobe opens a perf event based on the kprobe PMU.
// Returns os.ErrNotExist if the given symbol does not exist in the kernel.
func pmuKprobe(symbol string, ret bool) (*perfEvent, error) {
return pmuProbe(kprobeType, symbol, "", 0, ret)
}
// pmuProbe opens a perf event based on a Performance Monitoring Unit.
//
// Requires at least a 4.17 kernel.
// e12f03d7031a "perf/core: Implement the 'perf_kprobe' PMU"
// 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU"
//
// Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU
func pmuProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*perfEvent, error) {
// Getting the PMU type will fail if the kernel doesn't support
// the perf_kprobe PMU.
et, err := getPMUEventType("kprobe")
// the perf_[k,u]probe PMU.
et, err := getPMUEventType(typ)
if err != nil {
return nil, err
}
// Create a pointer to a NUL-terminated string for the kernel.
sp, err := unsafeStringPtr(symbol)
if err != nil {
return nil, err
}
// TODO: Parse the position of the bit from /sys/bus/event_source/devices/%s/format/retprobe.
config := 0
var config uint64
if ret {
config = 1
bit, err := typ.RetprobeBit()
if err != nil {
return nil, err
}
config |= 1 << bit
}
attr := unix.PerfEventAttr{
Type: uint32(et), // PMU event type read from sysfs
Ext1: uint64(uintptr(sp)), // Kernel symbol to trace
Config: uint64(config), // perf_kprobe PMU treats config as flags
var (
attr unix.PerfEventAttr
sp unsafe.Pointer
)
switch typ {
case kprobeType:
// Create a pointer to a NUL-terminated string for the kernel.
sp, err := unsafeStringPtr(symbol)
if err != nil {
return nil, err
}
attr = unix.PerfEventAttr{
Type: uint32(et), // PMU event type read from sysfs
Ext1: uint64(uintptr(sp)), // Kernel symbol to trace
Config: config, // Retprobe flag
}
case uprobeType:
sp, err := unsafeStringPtr(path)
if err != nil {
return nil, err
}
attr = unix.PerfEventAttr{
// The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1,
// since it added the config2 (Ext2) field. The Size field controls the
// size of the internal buffer the kernel allocates for reading the
// perf_event_attr argument from userspace.
Size: unix.PERF_ATTR_SIZE_VER1,
Type: uint32(et), // PMU event type read from sysfs
Ext1: uint64(uintptr(sp)), // Uprobe path
Ext2: offset, // Uprobe offset
Config: config, // Retprobe flag
}
}
fd, err := unix.PerfEventOpen(&attr, perfAllThreads, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
@ -144,22 +235,27 @@ func pmuKprobe(symbol string, ret bool) (*perfEvent, error) {
// Ensure the string pointer is not collected before PerfEventOpen returns.
runtime.KeepAlive(sp)
// Kernel has perf_kprobe PMU available, initialize perf event.
// Kernel has perf_[k,u]probe PMU available, initialize perf event.
return &perfEvent{
fd: internal.NewFD(uint32(fd)),
pmuID: et,
name: symbol,
ret: ret,
progType: ebpf.Kprobe,
fd: internal.NewFD(uint32(fd)),
pmuID: et,
name: symbol,
typ: typ.PerfEventType(ret),
}, nil
}
// tracefsKprobe creates a trace event by writing an entry to <tracefs>/kprobe_events.
// A new trace event group name is generated on every call to support creating
// multiple trace events for the same kernel symbol. A perf event is then opened
// on the newly-created trace event and returned to the caller.
// tracefsKprobe creates a Kprobe tracefs entry.
func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) {
return tracefsProbe(kprobeType, symbol, "", 0, ret)
}
// tracefsProbe creates a trace event by writing an entry to <tracefs>/[k,u]probe_events.
// A new trace event group name is generated on every call to support creating
// multiple trace events for the same kernel or userspace symbol.
// Path and offset are only set in the case of uprobe(s) and are used to set
// the executable/library path on the filesystem and the offset where the probe is inserted.
// A perf event is then opened on the newly-created trace event and returned to the caller.
func tracefsProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*perfEvent, error) {
// Generate a random string for each trace event we attempt to create.
// This value is used as the 'group' token in tracefs to allow creating
// multiple kprobe trace events with the same name.
@ -176,14 +272,13 @@ func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) {
if err == nil {
return nil, fmt.Errorf("trace event already exists: %s/%s", group, symbol)
}
// The read is expected to fail with ErrNotSupported due to a non-existing event.
if err != nil && !errors.Is(err, ErrNotSupported) {
if err != nil && !errors.Is(err, os.ErrNotExist) {
return nil, fmt.Errorf("checking trace event %s/%s: %w", group, symbol, err)
}
// Create the kprobe trace event using tracefs.
if err := createTraceFSKprobeEvent(group, symbol, ret); err != nil {
return nil, fmt.Errorf("creating kprobe event on tracefs: %w", err)
// Create the [k,u]probe trace event using tracefs.
if err := createTraceFSProbeEvent(typ, group, symbol, path, offset, ret); err != nil {
return nil, fmt.Errorf("creating probe entry on tracefs: %w", err)
}
// Get the newly-created trace event's id.
@ -202,65 +297,83 @@ func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) {
fd: fd,
group: group,
name: symbol,
ret: ret,
tracefsID: tid,
progType: ebpf.Kprobe, // kernel only allows attaching kprobe programs to kprobe events
typ: typ.PerfEventType(ret),
}, nil
}
// createTraceFSKprobeEvent creates a new ephemeral trace event by writing to
// <tracefs>/kprobe_events. Returns ErrNotSupported if symbol is not a valid
// kernel symbol, or if it is not traceable with kprobes.
func createTraceFSKprobeEvent(group, symbol string, ret bool) error {
// createTraceFSProbeEvent creates a new ephemeral trace event by writing to
// <tracefs>/[k,u]probe_events. Returns os.ErrNotExist if symbol is not a valid
// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist
// if a probe with the same group and symbol already exists.
func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset uint64, ret bool) error {
// Open the kprobe_events file in tracefs.
f, err := os.OpenFile(kprobeEventsPath, os.O_APPEND|os.O_WRONLY, 0666)
f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
if err != nil {
return fmt.Errorf("error opening kprobe_events: %w", err)
return fmt.Errorf("error opening '%s': %w", typ.EventsPath(), err)
}
defer f.Close()
// The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt):
// p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
// r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
// -:[GRP/]EVENT : Clear a probe
//
// Some examples:
// r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy
// p:ebpf_5678/p_my_kprobe __x64_sys_execve
//
// Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the
// kernel default to NR_CPUS. This is desired in most eBPF cases since
// subsampling or rate limiting logic can be more accurately implemented in
// the eBPF program itself. See Documentation/kprobes.txt for more details.
pe := fmt.Sprintf("%s:%s/%s %s", kprobePrefix(ret), group, symbol, symbol)
var pe string
switch typ {
case kprobeType:
// The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt):
// p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
// r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
// -:[GRP/]EVENT : Clear a probe
//
// Some examples:
// r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy
// p:ebpf_5678/p_my_kprobe __x64_sys_execve
//
// Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the
// kernel default to NR_CPUS. This is desired in most eBPF cases since
// subsampling or rate limiting logic can be more accurately implemented in
// the eBPF program itself.
// See Documentation/kprobes.txt for more details.
pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, symbol)
case uprobeType:
// The uprobe_events syntax is as follows:
// p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe
// r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe
// -:[GRP/]EVENT : Clear a probe
//
// Some examples:
// r:ebpf_1234/readline /bin/bash:0x12345
// p:ebpf_5678/main_mySymbol /bin/mybin:0x12345
//
// See Documentation/trace/uprobetracer.txt for more details.
pathOffset := uprobePathOffset(path, offset)
pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, pathOffset)
}
_, err = f.WriteString(pe)
// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
// when trying to create a kretprobe for a missing symbol. Make sure ENOENT
// is returned to the caller.
if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
return fmt.Errorf("kernel symbol %s not found: %w", symbol, os.ErrNotExist)
return fmt.Errorf("symbol %s not found: %w", symbol, os.ErrNotExist)
}
if err != nil {
return fmt.Errorf("writing '%s' to kprobe_events: %w", pe, err)
return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err)
}
return nil
}
// closeTraceFSKprobeEvent removes the kprobe with the given group, symbol and kind
// from <tracefs>/kprobe_events.
func closeTraceFSKprobeEvent(group, symbol string) error {
f, err := os.OpenFile(kprobeEventsPath, os.O_APPEND|os.O_WRONLY, 0666)
// closeTraceFSProbeEvent removes the [k,u]probe with the given type, group and symbol
// from <tracefs>/[k,u]probe_events.
func closeTraceFSProbeEvent(typ probeType, group, symbol string) error {
f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
if err != nil {
return fmt.Errorf("error opening kprobe_events: %w", err)
return fmt.Errorf("error opening %s: %w", typ.EventsPath(), err)
}
defer f.Close()
// See kprobe_events syntax above. Kprobe type does not need to be specified
// See [k,u]probe_events syntax above. The probe type does not need to be specified
// for removals.
pe := fmt.Sprintf("-:%s/%s", group, symbol)
if _, err = f.WriteString(pe); err != nil {
return fmt.Errorf("writing '%s' to kprobe_events: %w", pe, err)
return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err)
}
return nil
@ -288,9 +401,38 @@ func randomGroup(prefix string) (string, error) {
return group, nil
}
func kprobePrefix(ret bool) string {
func probePrefix(ret bool) string {
if ret {
return "r"
}
return "p"
}
// determineRetprobeBit reads a Performance Monitoring Unit's retprobe bit
// from /sys/bus/event_source/devices/<pmu>/format/retprobe.
func determineRetprobeBit(typ probeType) (uint64, error) {
p := filepath.Join("/sys/bus/event_source/devices/", typ.String(), "/format/retprobe")
data, err := ioutil.ReadFile(p)
if err != nil {
return 0, err
}
var rp uint64
n, err := fmt.Sscanf(string(bytes.TrimSpace(data)), "config:%d", &rp)
if err != nil {
return 0, fmt.Errorf("parse retprobe bit: %w", err)
}
if n != 1 {
return 0, fmt.Errorf("parse retprobe bit: expected 1 item, got %d", n)
}
return rp, nil
}
func kretprobeBit() (uint64, error) {
kprobeRetprobeBit.once.Do(func() {
kprobeRetprobeBit.value, kprobeRetprobeBit.err = determineRetprobeBit(kprobeType)
})
return kprobeRetprobeBit.value, kprobeRetprobeBit.err
}

View file

@ -31,6 +31,10 @@ import (
// exported kernel symbols. kprobe-based (tracefs) trace events can be
// created system-wide by writing to the <tracefs>/kprobe_events file, or
// they can be scoped to the current process by creating PMU perf events.
// - u(ret)probe: Ephemeral trace events based on user provides ELF binaries
// and offsets. uprobe-based (tracefs) trace events can be
// created system-wide by writing to the <tracefs>/uprobe_events file, or
// they can be scoped to the current process by creating PMU perf events.
// - perf event: An object instantiated based on an existing trace event or
// kernel symbol. Referred to by fd in userspace.
// Exactly one eBPF program can be attached to a perf event. Multiple perf
@ -52,6 +56,16 @@ const (
perfAllThreads = -1
)
type perfEventType uint8
const (
tracepointEvent perfEventType = iota
kprobeEvent
kretprobeEvent
uprobeEvent
uretprobeEvent
)
// A perfEvent represents a perf event kernel object. Exactly one eBPF program
// can be attached to it. It is created based on a tracefs trace event or a
// Performance Monitoring Unit (PMU).
@ -66,11 +80,10 @@ type perfEvent struct {
// ID of the trace event read from tracefs. Valid IDs are non-zero.
tracefsID uint64
// True for kretprobes/uretprobes.
ret bool
// The event type determines the types of programs that can be attached.
typ perfEventType
fd *internal.FD
progType ebpf.ProgramType
fd *internal.FD
}
func (pe *perfEvent) isLink() {}
@ -117,13 +130,18 @@ func (pe *perfEvent) Close() error {
return fmt.Errorf("closing perf event fd: %w", err)
}
switch t := pe.progType; t {
case ebpf.Kprobe:
// For kprobes created using tracefs, clean up the <tracefs>/kprobe_events entry.
switch pe.typ {
case kprobeEvent, kretprobeEvent:
// Clean up kprobe tracefs entry.
if pe.tracefsID != 0 {
return closeTraceFSKprobeEvent(pe.group, pe.name)
return closeTraceFSProbeEvent(kprobeType, pe.group, pe.name)
}
case ebpf.TracePoint:
case uprobeEvent, uretprobeEvent:
// Clean up uprobe tracefs entry.
if pe.tracefsID != 0 {
return closeTraceFSProbeEvent(uprobeType, pe.group, pe.name)
}
case tracepointEvent:
// Tracepoint trace events don't hold any extra resources.
return nil
}
@ -141,12 +159,21 @@ func (pe *perfEvent) attach(prog *ebpf.Program) error {
if pe.fd == nil {
return errors.New("cannot attach to nil perf event")
}
if t := prog.Type(); t != pe.progType {
return fmt.Errorf("invalid program type (expected %s): %s", pe.progType, t)
}
if prog.FD() < 0 {
return fmt.Errorf("invalid program: %w", internal.ErrClosedFd)
}
switch pe.typ {
case kprobeEvent, kretprobeEvent, uprobeEvent, uretprobeEvent:
if t := prog.Type(); t != ebpf.Kprobe {
return fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t)
}
case tracepointEvent:
if t := prog.Type(); t != ebpf.TracePoint {
return fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t)
}
default:
return fmt.Errorf("unknown perf event type: %d", pe.typ)
}
// The ioctl below will fail when the fd is invalid.
kfd, _ := pe.fd.Value()
@ -180,8 +207,8 @@ func unsafeStringPtr(str string) (unsafe.Pointer, error) {
// group and name must be alphanumeric or underscore, as required by the kernel.
func getTraceEventID(group, name string) (uint64, error) {
tid, err := uint64FromFile(tracefsPath, "events", group, name, "id")
if errors.Is(err, ErrNotSupported) {
return 0, fmt.Errorf("trace event %s/%s: %w", group, name, ErrNotSupported)
if errors.Is(err, os.ErrNotExist) {
return 0, fmt.Errorf("trace event %s/%s: %w", group, name, os.ErrNotExist)
}
if err != nil {
return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err)
@ -192,20 +219,22 @@ func getTraceEventID(group, name string) (uint64, error) {
// getPMUEventType reads a Performance Monitoring Unit's type (numeric identifier)
// from /sys/bus/event_source/devices/<pmu>/type.
func getPMUEventType(pmu string) (uint64, error) {
et, err := uint64FromFile("/sys/bus/event_source/devices", pmu, "type")
if errors.Is(err, ErrNotSupported) {
return 0, fmt.Errorf("pmu type %s: %w", pmu, ErrNotSupported)
//
// Returns ErrNotSupported if the pmu type is not supported.
func getPMUEventType(typ probeType) (uint64, error) {
et, err := uint64FromFile("/sys/bus/event_source/devices", typ.String(), "type")
if errors.Is(err, os.ErrNotExist) {
return 0, fmt.Errorf("pmu type %s: %w", typ, ErrNotSupported)
}
if err != nil {
return 0, fmt.Errorf("reading pmu type %s: %w", pmu, err)
return 0, fmt.Errorf("reading pmu type %s: %w", typ, err)
}
return et, nil
}
// openTracepointPerfEvent opens a tracepoint-type perf event. System-wide
// kprobes created by writing to <tracefs>/kprobe_events are tracepoints
// [k,u]probes created by writing to <tracefs>/[k,u]probe_events are tracepoints
// behind the scenes, and can be attached to using these perf events.
func openTracepointPerfEvent(tid uint64) (*internal.FD, error) {
attr := unix.PerfEventAttr{
@ -228,22 +257,13 @@ func openTracepointPerfEvent(tid uint64) (*internal.FD, error) {
// and joined onto base. Returns error if base no longer prefixes the path after
// joining all components.
func uint64FromFile(base string, path ...string) (uint64, error) {
// Resolve leaf path separately for error feedback. Makes the join onto
// base more readable (can't mix with variadic args).
l := filepath.Join(path...)
p := filepath.Join(base, l)
if !strings.HasPrefix(p, base) {
return 0, fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, errInvalidInput)
}
data, err := ioutil.ReadFile(p)
if os.IsNotExist(err) {
// Only echo leaf path, the base path can be prepended at the call site
// if more verbosity is required.
return 0, fmt.Errorf("symbol %s: %w", l, ErrNotSupported)
}
if err != nil {
return 0, fmt.Errorf("reading file %s: %w", p, err)
}

25
vendor/github.com/cilium/ebpf/link/platform.go generated vendored Normal file
View file

@ -0,0 +1,25 @@
package link
import (
"fmt"
"runtime"
)
func platformPrefix(symbol string) string {
prefix := runtime.GOARCH
// per https://github.com/golang/go/blob/master/src/go/build/syslist.go
switch prefix {
case "386":
prefix = "ia32"
case "amd64", "amd64p32":
prefix = "x64"
case "arm64", "arm64be":
prefix = "arm64"
default:
return symbol
}
return fmt.Sprintf("__%s_%s", prefix, symbol)
}

View file

@ -43,7 +43,7 @@ func RawAttachProgram(opts RawAttachProgramOptions) error {
}
if err := internal.BPFProgAttach(&attr); err != nil {
return fmt.Errorf("can't attach program: %s", err)
return fmt.Errorf("can't attach program: %w", err)
}
return nil
}
@ -69,7 +69,7 @@ func RawDetachProgram(opts RawDetachProgramOptions) error {
AttachType: uint32(opts.Attach),
}
if err := internal.BPFProgDetach(&attr); err != nil {
return fmt.Errorf("can't detach program: %s", err)
return fmt.Errorf("can't detach program: %w", err)
}
return nil

View file

@ -102,6 +102,23 @@ func bpfLinkCreate(attr *bpfLinkCreateAttr) (*internal.FD, error) {
return internal.NewFD(uint32(ptr)), nil
}
type bpfLinkCreateIterAttr struct {
prog_fd uint32
target_fd uint32
attach_type ebpf.AttachType
flags uint32
iter_info internal.Pointer
iter_info_len uint32
}
func bpfLinkCreateIter(attr *bpfLinkCreateIterAttr) (*internal.FD, error) {
ptr, err := internal.BPF(internal.BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
if err != nil {
return nil, err
}
return internal.NewFD(uint32(ptr)), nil
}
type bpfLinkUpdateAttr struct {
linkFd uint32
newProgFd uint32

View file

@ -11,7 +11,7 @@ import (
// tracepoints. The top-level directory is the group, the event's subdirectory
// is the name. Example:
//
// Tracepoint("syscalls", "sys_enter_fork")
// Tracepoint("syscalls", "sys_enter_fork", prog)
//
// Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is
// only possible as of kernel 4.14 (commit cf5f5ce).
@ -44,7 +44,7 @@ func Tracepoint(group, name string, prog *ebpf.Program) (Link, error) {
tracefsID: tid,
group: group,
name: name,
progType: ebpf.TracePoint,
typ: tracepointEvent,
}
if err := pe.attach(prog); err != nil {

237
vendor/github.com/cilium/ebpf/link/uprobe.go generated vendored Normal file
View file

@ -0,0 +1,237 @@
package link
import (
"debug/elf"
"errors"
"fmt"
"os"
"path/filepath"
"regexp"
"sync"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/internal"
)
var (
uprobeEventsPath = filepath.Join(tracefsPath, "uprobe_events")
// rgxUprobeSymbol is used to strip invalid characters from the uprobe symbol
// as they are not allowed to be used as the EVENT token in tracefs.
rgxUprobeSymbol = regexp.MustCompile("[^a-zA-Z0-9]+")
uprobeRetprobeBit = struct {
once sync.Once
value uint64
err error
}{}
)
// Executable defines an executable program on the filesystem.
type Executable struct {
// Path of the executable on the filesystem.
path string
// Parsed ELF symbols and dynamic symbols.
symbols map[string]elf.Symbol
}
// UprobeOptions defines additional parameters that will be used
// when loading Uprobes.
type UprobeOptions struct {
// Symbol offset. Must be provided in case of external symbols (shared libs).
// If set, overrides the offset eventually parsed from the executable.
Offset uint64
}
// To open a new Executable, use:
//
// OpenExecutable("/bin/bash")
//
// The returned value can then be used to open Uprobe(s).
func OpenExecutable(path string) (*Executable, error) {
if path == "" {
return nil, fmt.Errorf("path cannot be empty")
}
f, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("open file '%s': %w", path, err)
}
defer f.Close()
se, err := internal.NewSafeELFFile(f)
if err != nil {
return nil, fmt.Errorf("parse ELF file: %w", err)
}
var ex = Executable{
path: path,
symbols: make(map[string]elf.Symbol),
}
if err := ex.addSymbols(se.Symbols); err != nil {
return nil, err
}
if err := ex.addSymbols(se.DynamicSymbols); err != nil {
return nil, err
}
return &ex, nil
}
func (ex *Executable) addSymbols(f func() ([]elf.Symbol, error)) error {
// elf.Symbols and elf.DynamicSymbols return ErrNoSymbols if the section is not found.
syms, err := f()
if err != nil && !errors.Is(err, elf.ErrNoSymbols) {
return err
}
for _, s := range syms {
if elf.ST_TYPE(s.Info) != elf.STT_FUNC {
// Symbol not associated with a function or other executable code.
continue
}
ex.symbols[s.Name] = s
}
return nil
}
func (ex *Executable) symbol(symbol string) (*elf.Symbol, error) {
if s, ok := ex.symbols[symbol]; ok {
return &s, nil
}
return nil, fmt.Errorf("symbol %s not found", symbol)
}
// Uprobe attaches the given eBPF program to a perf event that fires when the
// given symbol starts executing in the given Executable.
// For example, /bin/bash::main():
//
// ex, _ = OpenExecutable("/bin/bash")
// ex.Uprobe("main", prog, nil)
//
// When using symbols which belongs to shared libraries,
// an offset must be provided via options:
//
// ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123})
//
// The resulting Link must be Closed during program shutdown to avoid leaking
// system resources. Functions provided by shared libraries can currently not
// be traced and will result in an ErrNotSupported.
func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) {
u, err := ex.uprobe(symbol, prog, opts, false)
if err != nil {
return nil, err
}
err = u.attach(prog)
if err != nil {
u.Close()
return nil, err
}
return u, nil
}
// Uretprobe attaches the given eBPF program to a perf event that fires right
// before the given symbol exits. For example, /bin/bash::main():
//
// ex, _ = OpenExecutable("/bin/bash")
// ex.Uretprobe("main", prog, nil)
//
// When using symbols which belongs to shared libraries,
// an offset must be provided via options:
//
// ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123})
//
// The resulting Link must be Closed during program shutdown to avoid leaking
// system resources. Functions provided by shared libraries can currently not
// be traced and will result in an ErrNotSupported.
func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) {
u, err := ex.uprobe(symbol, prog, opts, true)
if err != nil {
return nil, err
}
err = u.attach(prog)
if err != nil {
u.Close()
return nil, err
}
return u, nil
}
// uprobe opens a perf event for the given binary/symbol and attaches prog to it.
// If ret is true, create a uretprobe.
func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions, ret bool) (*perfEvent, error) {
if prog == nil {
return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
}
if prog.Type() != ebpf.Kprobe {
return nil, fmt.Errorf("eBPF program type %s is not Kprobe: %w", prog.Type(), errInvalidInput)
}
var offset uint64
if opts != nil && opts.Offset != 0 {
offset = opts.Offset
} else {
sym, err := ex.symbol(symbol)
if err != nil {
return nil, fmt.Errorf("symbol '%s' not found: %w", symbol, err)
}
// Symbols with location 0 from section undef are shared library calls and
// are relocated before the binary is executed. Dynamic linking is not
// implemented by the library, so mark this as unsupported for now.
if sym.Section == elf.SHN_UNDEF && sym.Value == 0 {
return nil, fmt.Errorf("cannot resolve %s library call '%s', "+
"consider providing the offset via options: %w", ex.path, symbol, ErrNotSupported)
}
offset = sym.Value
}
// Use uprobe PMU if the kernel has it available.
tp, err := pmuUprobe(symbol, ex.path, offset, ret)
if err == nil {
return tp, nil
}
if err != nil && !errors.Is(err, ErrNotSupported) {
return nil, fmt.Errorf("creating perf_uprobe PMU: %w", err)
}
// Use tracefs if uprobe PMU is missing.
tp, err = tracefsUprobe(uprobeSanitizedSymbol(symbol), ex.path, offset, ret)
if err != nil {
return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err)
}
return tp, nil
}
// pmuUprobe opens a perf event based on the uprobe PMU.
func pmuUprobe(symbol, path string, offset uint64, ret bool) (*perfEvent, error) {
return pmuProbe(uprobeType, symbol, path, offset, ret)
}
// tracefsUprobe creates a Uprobe tracefs entry.
func tracefsUprobe(symbol, path string, offset uint64, ret bool) (*perfEvent, error) {
return tracefsProbe(uprobeType, symbol, path, offset, ret)
}
// uprobeSanitizedSymbol replaces every invalid characted for the tracefs api with an underscore.
func uprobeSanitizedSymbol(symbol string) string {
return rgxUprobeSymbol.ReplaceAllString(symbol, "_")
}
// uprobePathOffset creates the PATH:OFFSET token for the tracefs api.
func uprobePathOffset(path string, offset uint64) string {
return fmt.Sprintf("%s:%#x", path, offset)
}
func uretprobeBit() (uint64, error) {
uprobeRetprobeBit.once.Do(func() {
uprobeRetprobeBit.value, uprobeRetprobeBit.err = determineRetprobeBit(uprobeType)
})
return uprobeRetprobeBit.value, uprobeRetprobeBit.err
}

View file

@ -108,12 +108,16 @@ func fixupJumpsAndCalls(insns asm.Instructions) error {
offset := iter.Offset
ins := iter.Ins
if ins.Reference == "" {
continue
}
switch {
case ins.IsFunctionCall() && ins.Constant == -1:
// Rewrite bpf to bpf call
callOffset, ok := symbolOffsets[ins.Reference]
if !ok {
return fmt.Errorf("instruction %d: reference to missing symbol %q", i, ins.Reference)
return fmt.Errorf("call at %d: reference to missing symbol %q", i, ins.Reference)
}
ins.Constant = int64(callOffset - offset - 1)
@ -122,10 +126,13 @@ func fixupJumpsAndCalls(insns asm.Instructions) error {
// Rewrite jump to label
jumpOffset, ok := symbolOffsets[ins.Reference]
if !ok {
return fmt.Errorf("instruction %d: reference to missing symbol %q", i, ins.Reference)
return fmt.Errorf("jump at %d: reference to missing symbol %q", i, ins.Reference)
}
ins.Offset = int16(jumpOffset - offset - 1)
case ins.IsLoadFromMap() && ins.MapPtr() == -1:
return fmt.Errorf("map %s: %w", ins.Reference, errUnsatisfiedReference)
}
}

86
vendor/github.com/cilium/ebpf/map.go generated vendored
View file

@ -18,6 +18,7 @@ var (
ErrKeyNotExist = errors.New("key does not exist")
ErrKeyExist = errors.New("key already exists")
ErrIterationAborted = errors.New("iteration aborted")
ErrMapIncompatible = errors.New("map's spec is incompatible with pinned map")
)
// MapOptions control loading a map into the kernel.
@ -87,6 +88,23 @@ func (ms *MapSpec) Copy() *MapSpec {
return &cpy
}
func (ms *MapSpec) clampPerfEventArraySize() error {
if ms.Type != PerfEventArray {
return nil
}
n, err := internal.PossibleCPUs()
if err != nil {
return fmt.Errorf("perf event array: %w", err)
}
if n := uint32(n); ms.MaxEntries > n {
ms.MaxEntries = n
}
return nil
}
// MapKV is used to initialize the contents of a Map.
type MapKV struct {
Key interface{}
@ -96,19 +114,19 @@ type MapKV struct {
func (ms *MapSpec) checkCompatibility(m *Map) error {
switch {
case m.typ != ms.Type:
return fmt.Errorf("expected type %v, got %v", ms.Type, m.typ)
return fmt.Errorf("expected type %v, got %v: %w", ms.Type, m.typ, ErrMapIncompatible)
case m.keySize != ms.KeySize:
return fmt.Errorf("expected key size %v, got %v", ms.KeySize, m.keySize)
return fmt.Errorf("expected key size %v, got %v: %w", ms.KeySize, m.keySize, ErrMapIncompatible)
case m.valueSize != ms.ValueSize:
return fmt.Errorf("expected value size %v, got %v", ms.ValueSize, m.valueSize)
return fmt.Errorf("expected value size %v, got %v: %w", ms.ValueSize, m.valueSize, ErrMapIncompatible)
case m.maxEntries != ms.MaxEntries:
return fmt.Errorf("expected max entries %v, got %v", ms.MaxEntries, m.maxEntries)
return fmt.Errorf("expected max entries %v, got %v: %w", ms.MaxEntries, m.maxEntries, ErrMapIncompatible)
case m.flags != ms.Flags:
return fmt.Errorf("expected flags %v, got %v", ms.Flags, m.flags)
return fmt.Errorf("expected flags %v, got %v: %w", ms.Flags, m.flags, ErrMapIncompatible)
}
return nil
}
@ -171,14 +189,16 @@ func NewMap(spec *MapSpec) (*Map, error) {
// The caller is responsible for ensuring the process' rlimit is set
// sufficiently high for locking memory during map creation. This can be done
// by calling unix.Setrlimit with unix.RLIMIT_MEMLOCK prior to calling NewMapWithOptions.
//
// May return an error wrapping ErrMapIncompatible.
func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) {
btfs := make(btfHandleCache)
defer btfs.close()
handles := newHandleCache()
defer handles.close()
return newMapWithOptions(spec, opts, btfs)
return newMapWithOptions(spec, opts, handles)
}
func newMapWithOptions(spec *MapSpec, opts MapOptions, btfs btfHandleCache) (_ *Map, err error) {
func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ *Map, err error) {
closeOnError := func(c io.Closer) {
if err != nil {
c.Close()
@ -202,7 +222,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, btfs btfHandleCache) (_ *
defer closeOnError(m)
if err := spec.checkCompatibility(m); err != nil {
return nil, fmt.Errorf("use pinned map %s: %s", spec.Name, err)
return nil, fmt.Errorf("use pinned map %s: %w", spec.Name, err)
}
return m, nil
@ -211,7 +231,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, btfs btfHandleCache) (_ *
// Nothing to do here
default:
return nil, fmt.Errorf("unsupported pin type %d", int(spec.Pinning))
return nil, fmt.Errorf("pin type %d: %w", int(spec.Pinning), ErrNotSupported)
}
var innerFd *internal.FD
@ -224,7 +244,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, btfs btfHandleCache) (_ *
return nil, errors.New("inner maps cannot be pinned")
}
template, err := createMap(spec.InnerMap, nil, opts, btfs)
template, err := createMap(spec.InnerMap, nil, opts, handles)
if err != nil {
return nil, err
}
@ -233,7 +253,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, btfs btfHandleCache) (_ *
innerFd = template.fd
}
m, err := createMap(spec, innerFd, opts, btfs)
m, err := createMap(spec, innerFd, opts, handles)
if err != nil {
return nil, err
}
@ -249,7 +269,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, btfs btfHandleCache) (_ *
return m, nil
}
func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, btfs btfHandleCache) (_ *Map, err error) {
func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, handles *handleCache) (_ *Map, err error) {
closeOnError := func(closer io.Closer) {
if err != nil {
closer.Close()
@ -296,44 +316,54 @@ func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, btfs btfHandl
return nil, fmt.Errorf("map create: %w", err)
}
}
if spec.Flags&unix.BPF_F_MMAPABLE > 0 {
if err := haveMmapableMaps(); err != nil {
return nil, fmt.Errorf("map create: %w", err)
}
}
if spec.Flags&unix.BPF_F_INNER_MAP > 0 {
if err := haveInnerMaps(); err != nil {
return nil, fmt.Errorf("map create: %w", err)
}
}
attr := bpfMapCreateAttr{
mapType: spec.Type,
keySize: spec.KeySize,
valueSize: spec.ValueSize,
maxEntries: spec.MaxEntries,
flags: spec.Flags,
numaNode: spec.NumaNode,
attr := internal.BPFMapCreateAttr{
MapType: uint32(spec.Type),
KeySize: spec.KeySize,
ValueSize: spec.ValueSize,
MaxEntries: spec.MaxEntries,
Flags: spec.Flags,
NumaNode: spec.NumaNode,
}
if inner != nil {
var err error
attr.innerMapFd, err = inner.Value()
attr.InnerMapFd, err = inner.Value()
if err != nil {
return nil, fmt.Errorf("map create: %w", err)
}
}
if haveObjName() == nil {
attr.mapName = newBPFObjName(spec.Name)
attr.MapName = internal.NewBPFObjName(spec.Name)
}
var btfDisabled bool
if spec.BTF != nil {
handle, err := btfs.load(btf.MapSpec(spec.BTF))
handle, err := handles.btfHandle(btf.MapSpec(spec.BTF))
btfDisabled = errors.Is(err, btf.ErrNotSupported)
if err != nil && !btfDisabled {
return nil, fmt.Errorf("load BTF: %w", err)
}
if handle != nil {
attr.btfFd = uint32(handle.FD())
attr.btfKeyTypeID = btf.MapKey(spec.BTF).ID()
attr.btfValueTypeID = btf.MapValue(spec.BTF).ID()
attr.BTFFd = uint32(handle.FD())
attr.BTFKeyTypeID = uint32(btf.MapKey(spec.BTF).ID())
attr.BTFValueTypeID = uint32(btf.MapValue(spec.BTF).ID())
}
}
fd, err := bpfMapCreate(&attr)
fd, err := internal.BPFMapCreate(&attr)
if err != nil {
if errors.Is(err, unix.EPERM) {
return nil, fmt.Errorf("map create: RLIMIT_MEMLOCK may be too low: %w", err)

151
vendor/github.com/cilium/ebpf/prog.go generated vendored
View file

@ -5,6 +5,7 @@ import (
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"path/filepath"
"strings"
@ -19,6 +20,8 @@ import (
// ErrNotSupported is returned whenever the kernel doesn't support a feature.
var ErrNotSupported = internal.ErrNotSupported
var errUnsatisfiedReference = errors.New("unsatisfied reference")
// ProgramID represents the unique ID of an eBPF program.
type ProgramID uint32
@ -41,6 +44,12 @@ type ProgramOptions struct {
// Controls the output buffer size for the verifier. Defaults to
// DefaultVerifierLogSize.
LogSize int
// An ELF containing the target BTF for this program. It is used both to
// find the correct function to trace and to apply CO-RE relocations.
// This is useful in environments where the kernel BTF is not available
// (containers) or where it is in a non-standard location. Defaults to
// use the kernel BTF from a well-known location.
TargetBTF io.ReaderAt
}
// ProgramSpec defines a Program.
@ -125,21 +134,21 @@ func NewProgram(spec *ProgramSpec) (*Program, error) {
// Loading a program for the first time will perform
// feature detection by loading small, temporary programs.
func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
btfs := make(btfHandleCache)
defer btfs.close()
handles := newHandleCache()
defer handles.close()
return newProgramWithOptions(spec, opts, btfs)
prog, err := newProgramWithOptions(spec, opts, handles)
if errors.Is(err, errUnsatisfiedReference) {
return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err)
}
return prog, err
}
func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandleCache) (*Program, error) {
func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *handleCache) (*Program, error) {
if len(spec.Instructions) == 0 {
return nil, errors.New("Instructions cannot be empty")
}
if len(spec.License) == 0 {
return nil, errors.New("License cannot be empty")
}
if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian {
return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian)
}
@ -157,44 +166,36 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandl
kv = v.Kernel()
}
insns := make(asm.Instructions, len(spec.Instructions))
copy(insns, spec.Instructions)
if err := fixupJumpsAndCalls(insns); err != nil {
return nil, err
}
buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize))
err := insns.Marshal(buf, internal.NativeEndian)
if err != nil {
return nil, err
}
bytecode := buf.Bytes()
insCount := uint32(len(bytecode) / asm.InstructionSize)
attr := &bpfProgLoadAttr{
progType: spec.Type,
progFlags: spec.Flags,
expectedAttachType: spec.AttachType,
insCount: insCount,
instructions: internal.NewSlicePointer(bytecode),
license: internal.NewStringPointer(spec.License),
kernelVersion: kv,
}
if haveObjName() == nil {
attr.progName = newBPFObjName(spec.Name)
attr.progName = internal.NewBPFObjName(spec.Name)
}
var err error
var targetBTF *btf.Spec
if opts.TargetBTF != nil {
targetBTF, err = handles.btfSpec(opts.TargetBTF)
if err != nil {
return nil, fmt.Errorf("load target BTF: %w", err)
}
}
var btfDisabled bool
var core btf.COREFixups
if spec.BTF != nil {
if relos, err := btf.ProgramRelocations(spec.BTF, nil); err != nil {
return nil, fmt.Errorf("CO-RE relocations: %s", err)
} else if len(relos) > 0 {
return nil, fmt.Errorf("applying CO-RE relocations: %w", ErrNotSupported)
core, err = btf.ProgramFixups(spec.BTF, targetBTF)
if err != nil {
return nil, fmt.Errorf("CO-RE relocations: %w", err)
}
handle, err := btfs.load(btf.ProgramSpec(spec.BTF))
handle, err := handles.btfHandle(btf.ProgramSpec(spec.BTF))
btfDisabled = errors.Is(err, btf.ErrNotSupported)
if err != nil && !btfDisabled {
return nil, fmt.Errorf("load BTF: %w", err)
@ -221,8 +222,27 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandl
}
}
insns, err := core.Apply(spec.Instructions)
if err != nil {
return nil, fmt.Errorf("CO-RE fixup: %w", err)
}
if err := fixupJumpsAndCalls(insns); err != nil {
return nil, err
}
buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize))
err = insns.Marshal(buf, internal.NativeEndian)
if err != nil {
return nil, err
}
bytecode := buf.Bytes()
attr.instructions = internal.NewSlicePointer(bytecode)
attr.insCount = uint32(len(bytecode) / asm.InstructionSize)
if spec.AttachTo != "" {
target, err := resolveBTFType(spec.AttachTo, spec.Type, spec.AttachType)
target, err := resolveBTFType(targetBTF, spec.AttachTo, spec.Type, spec.AttachType)
if err != nil {
return nil, err
}
@ -250,7 +270,7 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandl
}
logErr := err
if opts.LogLevel == 0 {
if opts.LogLevel == 0 && opts.LogSize >= 0 {
// Re-run with the verifier enabled to get better error messages.
logBuf = make([]byte, logSize)
attr.logLevel = 1
@ -664,52 +684,45 @@ func (p *Program) ID() (ProgramID, error) {
return ProgramID(info.id), nil
}
func findKernelType(name string, typ btf.Type) error {
kernel, err := btf.LoadKernelSpec()
if err != nil {
return fmt.Errorf("can't load kernel spec: %w", err)
}
return kernel.FindType(name, typ)
}
func resolveBTFType(name string, progType ProgramType, attachType AttachType) (btf.Type, error) {
func resolveBTFType(kernel *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.Type, error) {
type match struct {
p ProgramType
a AttachType
}
target := match{progType, attachType}
switch target {
var target btf.Type
var typeName, featureName string
switch (match{progType, attachType}) {
case match{LSM, AttachLSMMac}:
var target btf.Func
err := findKernelType("bpf_lsm_"+name, &target)
if errors.Is(err, btf.ErrNotFound) {
return nil, &internal.UnsupportedFeatureError{
Name: name + " LSM hook",
}
}
if err != nil {
return nil, fmt.Errorf("resolve BTF for LSM hook %s: %w", name, err)
}
return &target, nil
target = new(btf.Func)
typeName = "bpf_lsm_" + name
featureName = name + " LSM hook"
case match{Tracing, AttachTraceIter}:
var target btf.Func
err := findKernelType("bpf_iter_"+name, &target)
if errors.Is(err, btf.ErrNotFound) {
return nil, &internal.UnsupportedFeatureError{
Name: name + " iterator",
}
}
if err != nil {
return nil, fmt.Errorf("resolve BTF for iterator %s: %w", name, err)
}
return &target, nil
target = new(btf.Func)
typeName = "bpf_iter_" + name
featureName = name + " iterator"
default:
return nil, nil
}
if kernel == nil {
var err error
kernel, err = btf.LoadKernelSpec()
if err != nil {
return nil, fmt.Errorf("load kernel spec: %w", err)
}
}
err := kernel.FindType(typeName, target)
if errors.Is(err, btf.ErrNotFound) {
return nil, &internal.UnsupportedFeatureError{
Name: featureName,
}
}
if err != nil {
return nil, fmt.Errorf("resolve BTF for %s: %w", featureName, err)
}
return target, nil
}

View file

@ -3,6 +3,7 @@ package ebpf
import (
"errors"
"fmt"
"os"
"unsafe"
"github.com/cilium/ebpf/internal"
@ -10,19 +11,10 @@ import (
"github.com/cilium/ebpf/internal/unix"
)
// Generic errors returned by BPF syscalls.
var ErrNotExist = errors.New("requested object does not exist")
// bpfObjName is a null-terminated string made up of
// 'A-Za-z0-9_' characters.
type bpfObjName [unix.BPF_OBJ_NAME_LEN]byte
// newBPFObjName truncates the result if it is too long.
func newBPFObjName(name string) bpfObjName {
var result bpfObjName
copy(result[:unix.BPF_OBJ_NAME_LEN-1], name)
return result
}
// ErrNotExist is returned when loading a non-existing map or program.
//
// Deprecated: use os.ErrNotExist instead.
var ErrNotExist = os.ErrNotExist
// invalidBPFObjNameChar returns true if char may not appear in
// a BPF object name.
@ -45,21 +37,6 @@ func invalidBPFObjNameChar(char rune) bool {
}
}
type bpfMapCreateAttr struct {
mapType MapType
keySize uint32
valueSize uint32
maxEntries uint32
flags uint32
innerMapFd uint32 // since 4.12 56f668dfe00d
numaNode uint32 // since 4.14 96eabe7a40aa
mapName bpfObjName // since 4.15 ad5b177bd73f
mapIfIndex uint32
btfFd uint32
btfKeyTypeID btf.TypeID
btfValueTypeID btf.TypeID
}
type bpfMapOpAttr struct {
mapFd uint32
padding uint32
@ -86,10 +63,10 @@ type bpfMapInfo struct {
value_size uint32
max_entries uint32
map_flags uint32
name bpfObjName // since 4.15 ad5b177bd73f
ifindex uint32 // since 4.16 52775b33bb50
btf_vmlinux_value_type_id uint32 // since 5.6 85d33df357b6
netns_dev uint64 // since 4.16 52775b33bb50
name internal.BPFObjName // since 4.15 ad5b177bd73f
ifindex uint32 // since 4.16 52775b33bb50
btf_vmlinux_value_type_id uint32 // since 5.6 85d33df357b6
netns_dev uint64 // since 4.16 52775b33bb50
netns_ino uint64
btf_id uint32 // since 4.18 78958fca7ead
btf_key_type_id uint32 // since 4.18 9b2cf328b2ec
@ -104,11 +81,11 @@ type bpfProgLoadAttr struct {
logLevel uint32
logSize uint32
logBuf internal.Pointer
kernelVersion uint32 // since 4.1 2541517c32be
progFlags uint32 // since 4.11 e07b98d9bffe
progName bpfObjName // since 4.15 067cae47771c
progIfIndex uint32 // since 4.15 1f6f4cb7ba21
expectedAttachType AttachType // since 4.17 5e43f899b03a
kernelVersion uint32 // since 4.1 2541517c32be
progFlags uint32 // since 4.11 e07b98d9bffe
progName internal.BPFObjName // since 4.15 067cae47771c
progIfIndex uint32 // since 4.15 1f6f4cb7ba21
expectedAttachType AttachType // since 4.17 5e43f899b03a
progBTFFd uint32
funcInfoRecSize uint32
funcInfo internal.Pointer
@ -132,7 +109,7 @@ type bpfProgInfo struct {
created_by_uid uint32
nr_map_ids uint32
map_ids internal.Pointer
name bpfObjName // since 4.15 067cae47771c
name internal.BPFObjName // since 4.15 067cae47771c
ifindex uint32
gpl_compatible uint32
netns_dev uint64
@ -188,7 +165,7 @@ func bpfProgLoad(attr *bpfProgLoadAttr) (*internal.FD, error) {
fd, err := internal.BPF(internal.BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
// As of ~4.20 the verifier can be interrupted by a signal,
// and returns EAGAIN in that case.
if err == unix.EAGAIN {
if errors.Is(err, unix.EAGAIN) {
continue
}
@ -205,23 +182,14 @@ func bpfProgTestRun(attr *bpfProgTestRunAttr) error {
return err
}
func bpfMapCreate(attr *bpfMapCreateAttr) (*internal.FD, error) {
fd, err := internal.BPF(internal.BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
if err != nil {
return nil, err
}
return internal.NewFD(uint32(fd)), nil
}
var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error {
_, err := bpfMapCreate(&bpfMapCreateAttr{
mapType: ArrayOfMaps,
keySize: 4,
valueSize: 4,
maxEntries: 1,
_, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{
MapType: uint32(ArrayOfMaps),
KeySize: 4,
ValueSize: 4,
MaxEntries: 1,
// Invalid file descriptor.
innerMapFd: ^uint32(0),
InnerMapFd: ^uint32(0),
})
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
@ -235,12 +203,44 @@ var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error {
var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps", "5.2", func() error {
// This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since
// BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check.
m, err := bpfMapCreate(&bpfMapCreateAttr{
mapType: Array,
keySize: 4,
valueSize: 4,
maxEntries: 1,
flags: unix.BPF_F_RDONLY_PROG,
m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{
MapType: uint32(Array),
KeySize: 4,
ValueSize: 4,
MaxEntries: 1,
Flags: unix.BPF_F_RDONLY_PROG,
})
if err != nil {
return internal.ErrNotSupported
}
_ = m.Close()
return nil
})
var haveMmapableMaps = internal.FeatureTest("mmapable maps", "5.5", func() error {
// This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps.
m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{
MapType: uint32(Array),
KeySize: 4,
ValueSize: 4,
MaxEntries: 1,
Flags: unix.BPF_F_MMAPABLE,
})
if err != nil {
return internal.ErrNotSupported
}
_ = m.Close()
return nil
})
var haveInnerMaps = internal.FeatureTest("inner maps", "5.10", func() error {
// This checks BPF_F_INNER_MAP, which appeared in 5.10.
m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{
MapType: uint32(Array),
KeySize: 4,
ValueSize: 4,
MaxEntries: 1,
Flags: unix.BPF_F_INNER_MAP,
})
if err != nil {
return internal.ErrNotSupported
@ -329,7 +329,7 @@ func objGetNextID(cmd internal.BPFCmd, start uint32) (uint32, error) {
startID: start,
}
_, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
return attr.nextID, wrapObjError(err)
return attr.nextID, err
}
func bpfMapBatch(cmd internal.BPFCmd, m *internal.FD, inBatch, outBatch, keys, values internal.Pointer, count uint32, opts *BatchOptions) (uint32, error) {
@ -355,32 +355,21 @@ func bpfMapBatch(cmd internal.BPFCmd, m *internal.FD, inBatch, outBatch, keys, v
return attr.count, wrapMapError(err)
}
func wrapObjError(err error) error {
if err == nil {
return nil
}
if errors.Is(err, unix.ENOENT) {
return fmt.Errorf("%w", ErrNotExist)
}
return errors.New(err.Error())
}
func wrapMapError(err error) error {
if err == nil {
return nil
}
if errors.Is(err, unix.ENOENT) {
return ErrKeyNotExist
return internal.SyscallError(ErrKeyNotExist, unix.ENOENT)
}
if errors.Is(err, unix.EEXIST) {
return ErrKeyExist
return internal.SyscallError(ErrKeyExist, unix.EEXIST)
}
if errors.Is(err, unix.ENOTSUPP) {
return ErrNotSupported
return internal.SyscallError(ErrNotSupported, unix.ENOTSUPP)
}
return err
@ -417,15 +406,15 @@ func bpfGetMapInfoByFD(fd *internal.FD) (*bpfMapInfo, error) {
}
var haveObjName = internal.FeatureTest("object names", "4.15", func() error {
attr := bpfMapCreateAttr{
mapType: Array,
keySize: 4,
valueSize: 4,
maxEntries: 1,
mapName: newBPFObjName("feature_test"),
attr := internal.BPFMapCreateAttr{
MapType: uint32(Array),
KeySize: 4,
ValueSize: 4,
MaxEntries: 1,
MapName: internal.NewBPFObjName("feature_test"),
}
fd, err := bpfMapCreate(&attr)
fd, err := internal.BPFMapCreate(&attr)
if err != nil {
return internal.ErrNotSupported
}
@ -439,15 +428,15 @@ var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func()
return err
}
attr := bpfMapCreateAttr{
mapType: Array,
keySize: 4,
valueSize: 4,
maxEntries: 1,
mapName: newBPFObjName(".test"),
attr := internal.BPFMapCreateAttr{
MapType: uint32(Array),
KeySize: 4,
ValueSize: 4,
MaxEntries: 1,
MapName: internal.NewBPFObjName(".test"),
}
fd, err := bpfMapCreate(&attr)
fd, err := internal.BPFMapCreate(&attr)
if err != nil {
return internal.ErrNotSupported
}
@ -458,14 +447,14 @@ var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func()
var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error {
var maxEntries uint32 = 2
attr := bpfMapCreateAttr{
mapType: Hash,
keySize: 4,
valueSize: 4,
maxEntries: maxEntries,
attr := internal.BPFMapCreateAttr{
MapType: uint32(Hash),
KeySize: 4,
ValueSize: 4,
MaxEntries: maxEntries,
}
fd, err := bpfMapCreate(&attr)
fd, err := internal.BPFMapCreate(&attr)
if err != nil {
return internal.ErrNotSupported
}
@ -487,5 +476,5 @@ func bpfObjGetFDByID(cmd internal.BPFCmd, id uint32) (*internal.FD, error) {
id: id,
}
ptr, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
return internal.NewFD(uint32(ptr)), wrapObjError(err)
return internal.NewFD(uint32(ptr)), err
}

View file

@ -85,6 +85,10 @@ const (
SkStorage
// DevMapHash - Hash-based indexing scheme for references to network devices.
DevMapHash
StructOpts
RingBuf
InodeStorage
TaskStorage
)
// hasPerCPUValue returns true if the Map stores a value per CPU.

View file

@ -34,11 +34,15 @@ func _() {
_ = x[Stack-23]
_ = x[SkStorage-24]
_ = x[DevMapHash-25]
_ = x[StructOpts-26]
_ = x[RingBuf-27]
_ = x[InodeStorage-28]
_ = x[TaskStorage-29]
}
const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHash"
const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOptsRingBufInodeStorageTaskStorage"
var _MapType_index = [...]uint8{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248}
var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 258, 265, 277, 288}
func (i MapType) String() string {
if i >= MapType(len(_MapType_index)-1) {