1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

vendor: update github.com/containerd/cgroups and github.com/cilium/ebpf

Fixes
- https://github.com/docker/for-linux/issues/1284
- https://github.com/containerd/containerd/issues/6700
- https://github.com/moby/moby/issues/43387

Update to cgroups v1.0.1 which has the current proto for cgroupsv1
Need to update cilium/ebpf dependency to v0.4.0

Signed-off-by: Wim <wim@42.be>
This commit is contained in:
Wim 2022-05-07 17:55:08 +02:00
parent 4433bf67ba
commit be7855fdbe
No known key found for this signature in database
GPG key ID: 5E423DA5C9AA63D4
51 changed files with 9657 additions and 2860 deletions

View file

@ -135,13 +135,13 @@ google.golang.org/genproto 3f1135a288c9a07e340ae8ba4cc6
github.com/containerd/containerd 7cfa023d95d37076d5ab035003d4839f4b6ba791 https://github.com/moby/containerd.git # master (v1.5.0-dev) + patch for CVE-2021-41190 and CVE-2022-24769
github.com/containerd/fifo 0724c46b320cf96bb172a0550c19a4b1fca4dacb
github.com/containerd/continuity efbc4488d8fe1bdc16bde3b2d2990d9b3a899165
github.com/containerd/cgroups 0b889c03f102012f1d93a97ddd3ef71cd6f4f510
github.com/containerd/cgroups b9de8a2212026c07cec67baf3323f1fc0121e048 # v1.0.1
github.com/containerd/console 5d7e1412f07b502a01029ea20e20e0d2be31fa7c # v1.0.1
github.com/containerd/go-runc 16b287bc67d069a60fa48db15f330b790b74365b
github.com/containerd/typeurl cd3ce7159eae562a4f60ceff37dada11a939d247 # v1.0.1
github.com/containerd/ttrpc bfba540dc45464586c106b1f31c8547933c1eb41 # v1.0.2
github.com/gogo/googleapis 01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2
github.com/cilium/ebpf 1c8d4c9ef7759622653a1d319284a44652333b28
github.com/cilium/ebpf 32458a752e0bcbc670691625f84e7ef6aef5655d # v0.4.0
# cluster
github.com/docker/swarmkit cf1e0f8d95ee37ea1f9a5f8a54c73c1e0fa58a54 # bump_20.10

54
vendor/github.com/cilium/ebpf/README.md generated vendored Normal file
View file

@ -0,0 +1,54 @@
# eBPF
[![PkgGoDev](https://pkg.go.dev/badge/github.com/cilium/ebpf)](https://pkg.go.dev/github.com/cilium/ebpf)
eBPF is a pure Go library that provides utilities for loading, compiling, and
debugging eBPF programs. It has minimal external dependencies and is intended to
be used in long running processes.
* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic
assembler
* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF
to various hooks
* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a
`PERF_EVENT_ARRAY`
* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows
embedding eBPF in Go
The library is maintained by [Cloudflare](https://www.cloudflare.com) and
[Cilium](https://www.cilium.io). Feel free to
[join](https://cilium.herokuapp.com/) the
[#libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack.
## Current status
The package is production ready, but **the API is explicitly unstable right
now**. Expect to update your code if you want to follow along.
## Requirements
* A version of Go that is [supported by
upstream](https://golang.org/doc/devel/release.html#policy)
* Linux 4.9, 4.19 or 5.4 (versions in-between should work, but are not tested)
## Useful resources
* [eBPF.io](https://ebpf.io) (recommended)
* [Cilium eBPF documentation](https://docs.cilium.io/en/latest/bpf/#bpf-guide)
(recommended)
* [Linux documentation on
BPF](https://www.kernel.org/doc/html/latest/networking/filter.html)
* [eBPF features by Linux
version](https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md)
## Regenerating Testdata
Run `make` in the root of this repository to rebuild testdata in all
subpackages. This requires Docker, as it relies on a standardized build
environment to keep the build output stable.
The toolchain image build files are kept in [testdata/docker/](testdata/docker/).
## License
MIT

206
vendor/github.com/cilium/ebpf/abi.go generated vendored
View file

@ -1,206 +0,0 @@
package ebpf
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"os"
"syscall"
"github.com/cilium/ebpf/internal"
)
// MapABI are the attributes of a Map which are available across all supported kernels.
type MapABI struct {
Type MapType
KeySize uint32
ValueSize uint32
MaxEntries uint32
Flags uint32
}
func newMapABIFromSpec(spec *MapSpec) *MapABI {
return &MapABI{
spec.Type,
spec.KeySize,
spec.ValueSize,
spec.MaxEntries,
spec.Flags,
}
}
func newMapABIFromFd(fd *internal.FD) (string, *MapABI, error) {
info, err := bpfGetMapInfoByFD(fd)
if err != nil {
if errors.Is(err, syscall.EINVAL) {
abi, err := newMapABIFromProc(fd)
return "", abi, err
}
return "", nil, err
}
return "", &MapABI{
MapType(info.mapType),
info.keySize,
info.valueSize,
info.maxEntries,
info.flags,
}, nil
}
func newMapABIFromProc(fd *internal.FD) (*MapABI, error) {
var abi MapABI
err := scanFdInfo(fd, map[string]interface{}{
"map_type": &abi.Type,
"key_size": &abi.KeySize,
"value_size": &abi.ValueSize,
"max_entries": &abi.MaxEntries,
"map_flags": &abi.Flags,
})
if err != nil {
return nil, err
}
return &abi, nil
}
// Equal returns true if two ABIs have the same values.
func (abi *MapABI) Equal(other *MapABI) bool {
switch {
case abi.Type != other.Type:
return false
case abi.KeySize != other.KeySize:
return false
case abi.ValueSize != other.ValueSize:
return false
case abi.MaxEntries != other.MaxEntries:
return false
case abi.Flags != other.Flags:
return false
default:
return true
}
}
// ProgramABI are the attributes of a Program which are available across all supported kernels.
type ProgramABI struct {
Type ProgramType
}
func newProgramABIFromSpec(spec *ProgramSpec) *ProgramABI {
return &ProgramABI{
spec.Type,
}
}
func newProgramABIFromFd(fd *internal.FD) (string, *ProgramABI, error) {
info, err := bpfGetProgInfoByFD(fd)
if err != nil {
if errors.Is(err, syscall.EINVAL) {
return newProgramABIFromProc(fd)
}
return "", nil, err
}
var name string
if bpfName := internal.CString(info.name[:]); bpfName != "" {
name = bpfName
} else {
name = internal.CString(info.tag[:])
}
return name, &ProgramABI{
Type: ProgramType(info.progType),
}, nil
}
func newProgramABIFromProc(fd *internal.FD) (string, *ProgramABI, error) {
var (
abi ProgramABI
name string
)
err := scanFdInfo(fd, map[string]interface{}{
"prog_type": &abi.Type,
"prog_tag": &name,
})
if errors.Is(err, errMissingFields) {
return "", nil, &internal.UnsupportedFeatureError{
Name: "reading ABI from /proc/self/fdinfo",
MinimumVersion: internal.Version{4, 11, 0},
}
}
if err != nil {
return "", nil, err
}
return name, &abi, nil
}
func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error {
raw, err := fd.Value()
if err != nil {
return err
}
fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", raw))
if err != nil {
return err
}
defer fh.Close()
if err := scanFdInfoReader(fh, fields); err != nil {
return fmt.Errorf("%s: %w", fh.Name(), err)
}
return nil
}
var errMissingFields = errors.New("missing fields")
func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error {
var (
scanner = bufio.NewScanner(r)
scanned int
)
for scanner.Scan() {
parts := bytes.SplitN(scanner.Bytes(), []byte("\t"), 2)
if len(parts) != 2 {
continue
}
name := bytes.TrimSuffix(parts[0], []byte(":"))
field, ok := fields[string(name)]
if !ok {
continue
}
if n, err := fmt.Fscanln(bytes.NewReader(parts[1]), field); err != nil || n != 1 {
return fmt.Errorf("can't parse field %s: %v", name, err)
}
scanned++
}
if err := scanner.Err(); err != nil {
return err
}
if scanned != len(fields) {
return errMissingFields
}
return nil
}
// Equal returns true if two ABIs have the same values.
func (abi *ProgramABI) Equal(other *ProgramABI) bool {
switch {
case abi.Type != other.Type:
return false
default:
return true
}
}

View file

@ -1,17 +1,29 @@
package asm
import (
"crypto/sha1"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"math"
"strings"
"github.com/cilium/ebpf/internal/unix"
)
// InstructionSize is the size of a BPF instruction in bytes
const InstructionSize = 8
// RawInstructionOffset is an offset in units of raw BPF instructions.
type RawInstructionOffset uint64
// Bytes returns the offset of an instruction in bytes.
func (rio RawInstructionOffset) Bytes() uint64 {
return uint64(rio) * InstructionSize
}
// Instruction is a single eBPF instruction.
type Instruction struct {
OpCode OpCode
@ -151,10 +163,20 @@ func (ins *Instruction) mapOffset() uint32 {
return uint32(uint64(ins.Constant) >> 32)
}
// isLoadFromMap returns true if the instruction loads from a map.
//
// This covers both loading the map pointer and direct map value loads.
func (ins *Instruction) isLoadFromMap() bool {
return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue)
}
// IsFunctionCall returns true if the instruction calls another BPF function.
//
// This is not the same thing as a BPF helper call.
func (ins *Instruction) IsFunctionCall() bool {
return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall
}
// Format implements fmt.Formatter.
func (ins Instruction) Format(f fmt.State, c rune) {
if c != 'v' {
@ -310,28 +332,6 @@ func (insns Instructions) ReferenceOffsets() map[string][]int {
return offsets
}
func (insns Instructions) marshalledOffsets() (map[string]int, error) {
symbols := make(map[string]int)
marshalledPos := 0
for _, ins := range insns {
currentPos := marshalledPos
marshalledPos += ins.OpCode.marshalledInstructions()
if ins.Symbol == "" {
continue
}
if _, ok := symbols[ins.Symbol]; ok {
return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol)
}
symbols[ins.Symbol] = currentPos
}
return symbols, nil
}
// Format implements fmt.Formatter.
//
// You can control indentation of symbols by
@ -370,21 +370,17 @@ func (insns Instructions) Format(f fmt.State, c rune) {
symIndent = strings.Repeat(" ", symPadding)
}
// Figure out how many digits we need to represent the highest
// offset.
highestOffset := 0
for _, ins := range insns {
highestOffset += ins.OpCode.marshalledInstructions()
}
// Guess how many digits we need at most, by assuming that all instructions
// are double wide.
highestOffset := len(insns) * 2
offsetWidth := int(math.Ceil(math.Log10(float64(highestOffset))))
offset := 0
for _, ins := range insns {
if ins.Symbol != "" {
fmt.Fprintf(f, "%s%s:\n", symIndent, ins.Symbol)
iter := insns.Iterate()
for iter.Next() {
if iter.Ins.Symbol != "" {
fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol)
}
fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, offset, ins)
offset += ins.OpCode.marshalledInstructions()
fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins)
}
return
@ -392,43 +388,69 @@ func (insns Instructions) Format(f fmt.State, c rune) {
// Marshal encodes a BPF program into the kernel format.
func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
absoluteOffsets, err := insns.marshalledOffsets()
if err != nil {
return err
}
num := 0
for i, ins := range insns {
switch {
case ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall && ins.Constant == -1:
// Rewrite bpf to bpf call
offset, ok := absoluteOffsets[ins.Reference]
if !ok {
return fmt.Errorf("instruction %d: reference to missing symbol %s", i, ins.Reference)
}
ins.Constant = int64(offset - num - 1)
case ins.OpCode.Class() == JumpClass && ins.Offset == -1:
// Rewrite jump to label
offset, ok := absoluteOffsets[ins.Reference]
if !ok {
return fmt.Errorf("instruction %d: reference to missing symbol %s", i, ins.Reference)
}
ins.Offset = int16(offset - num - 1)
}
n, err := ins.Marshal(w, bo)
_, err := ins.Marshal(w, bo)
if err != nil {
return fmt.Errorf("instruction %d: %w", i, err)
}
num += int(n / InstructionSize)
}
return nil
}
// Tag calculates the kernel tag for a series of instructions.
//
// It mirrors bpf_prog_calc_tag in the kernel and so can be compared
// to ProgramInfo.Tag to figure out whether a loaded program matches
// certain instructions.
func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) {
h := sha1.New()
for i, ins := range insns {
if ins.isLoadFromMap() {
ins.Constant = 0
}
_, err := ins.Marshal(h, bo)
if err != nil {
return "", fmt.Errorf("instruction %d: %w", i, err)
}
}
return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil
}
// Iterate allows iterating a BPF program while keeping track of
// various offsets.
//
// Modifying the instruction slice will lead to undefined behaviour.
func (insns Instructions) Iterate() *InstructionIterator {
return &InstructionIterator{insns: insns}
}
// InstructionIterator iterates over a BPF program.
type InstructionIterator struct {
insns Instructions
// The instruction in question.
Ins *Instruction
// The index of the instruction in the original instruction slice.
Index int
// The offset of the instruction in raw BPF instructions. This accounts
// for double-wide instructions.
Offset RawInstructionOffset
}
// Next returns true as long as there are any instructions remaining.
func (iter *InstructionIterator) Next() bool {
if len(iter.insns) == 0 {
return false
}
if iter.Ins != nil {
iter.Index++
iter.Offset += RawInstructionOffset(iter.Ins.OpCode.rawInstructions())
}
iter.Ins = &iter.insns[0]
iter.insns = iter.insns[1:]
return true
}
type bpfInstruction struct {
OpCode OpCode
Registers bpfRegisters

View file

@ -66,10 +66,10 @@ type OpCode uint8
// InvalidOpCode is returned by setters on OpCode
const InvalidOpCode OpCode = 0xff
// marshalledInstructions returns the number of BPF instructions required
// rawInstructions returns the number of BPF instructions required
// to encode this opcode.
func (op OpCode) marshalledInstructions() int {
if op == LoadImmOp(DWord) {
func (op OpCode) rawInstructions() int {
if op.isDWordLoad() {
return 2
}
return 1

View file

@ -4,6 +4,8 @@ import (
"errors"
"fmt"
"math"
"reflect"
"strings"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
@ -11,7 +13,10 @@ import (
)
// CollectionOptions control loading a collection into the kernel.
//
// Maps and Programs are passed to NewMapWithOptions and NewProgramsWithOptions.
type CollectionOptions struct {
Maps MapOptions
Programs ProgramOptions
}
@ -126,6 +131,106 @@ func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error
return nil
}
// Assign the contents of a CollectionSpec to a struct.
//
// This function is a short-cut to manually checking the presence
// of maps and programs in a collection spec. Consider using bpf2go if this
// sounds useful.
//
// The argument to must be a pointer to a struct. A field of the
// struct is updated with values from Programs or Maps if it
// has an `ebpf` tag and its type is *ProgramSpec or *MapSpec.
// The tag gives the name of the program or map as found in
// the CollectionSpec.
//
// struct {
// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"`
// Bar *ebpf.MapSpec `ebpf:"bar_map"`
// Ignored int
// }
//
// Returns an error if any of the fields can't be found, or
// if the same map or program is assigned multiple times.
func (cs *CollectionSpec) Assign(to interface{}) error {
valueOf := func(typ reflect.Type, name string) (reflect.Value, error) {
switch typ {
case reflect.TypeOf((*ProgramSpec)(nil)):
p := cs.Programs[name]
if p == nil {
return reflect.Value{}, fmt.Errorf("missing program %q", name)
}
return reflect.ValueOf(p), nil
case reflect.TypeOf((*MapSpec)(nil)):
m := cs.Maps[name]
if m == nil {
return reflect.Value{}, fmt.Errorf("missing map %q", name)
}
return reflect.ValueOf(m), nil
default:
return reflect.Value{}, fmt.Errorf("unsupported type %s", typ)
}
}
return assignValues(to, valueOf)
}
// LoadAndAssign maps and programs into the kernel and assign them to a struct.
//
// This function is a short-cut to manually checking the presence
// of maps and programs in a collection spec. Consider using bpf2go if this
// sounds useful.
//
// The argument to must be a pointer to a struct. A field of the
// struct is updated with values from Programs or Maps if it
// has an `ebpf` tag and its type is *Program or *Map.
// The tag gives the name of the program or map as found in
// the CollectionSpec.
//
// struct {
// Foo *ebpf.Program `ebpf:"xdp_foo"`
// Bar *ebpf.Map `ebpf:"bar_map"`
// Ignored int
// }
//
// opts may be nil.
//
// Returns an error if any of the fields can't be found, or
// if the same map or program is assigned multiple times.
func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error {
if opts == nil {
opts = &CollectionOptions{}
}
loadMap, loadProgram, done, cleanup := lazyLoadCollection(cs, opts)
defer cleanup()
valueOf := func(typ reflect.Type, name string) (reflect.Value, error) {
switch typ {
case reflect.TypeOf((*Program)(nil)):
p, err := loadProgram(name)
if err != nil {
return reflect.Value{}, err
}
return reflect.ValueOf(p), nil
case reflect.TypeOf((*Map)(nil)):
m, err := loadMap(name)
if err != nil {
return reflect.Value{}, err
}
return reflect.ValueOf(m), nil
default:
return reflect.Value{}, fmt.Errorf("unsupported type %s", typ)
}
}
if err := assignValues(to, valueOf); err != nil {
return err
}
done()
return nil
}
// Collection is a collection of Programs and Maps associated
// with their symbols
type Collection struct {
@ -134,28 +239,75 @@ type Collection struct {
}
// NewCollection creates a Collection from a specification.
//
// Only maps referenced by at least one of the programs are initialized.
func NewCollection(spec *CollectionSpec) (*Collection, error) {
return NewCollectionWithOptions(spec, CollectionOptions{})
}
// NewCollectionWithOptions creates a Collection from a specification.
//
// Only maps referenced by at least one of the programs are initialized.
func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (coll *Collection, err error) {
func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) {
loadMap, loadProgram, done, cleanup := lazyLoadCollection(spec, &opts)
defer cleanup()
for mapName := range spec.Maps {
_, err := loadMap(mapName)
if err != nil {
return nil, err
}
}
for progName := range spec.Programs {
_, err := loadProgram(progName)
if err != nil {
return nil, err
}
}
maps, progs := done()
return &Collection{
progs,
maps,
}, nil
}
type btfHandleCache map[*btf.Spec]*btf.Handle
func (btfs btfHandleCache) load(spec *btf.Spec) (*btf.Handle, error) {
if btfs[spec] != nil {
return btfs[spec], nil
}
handle, err := btf.NewHandle(spec)
if err != nil {
return nil, err
}
btfs[spec] = handle
return handle, nil
}
func (btfs btfHandleCache) close() {
for _, handle := range btfs {
handle.Close()
}
}
func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
loadMap func(string) (*Map, error),
loadProgram func(string) (*Program, error),
done func() (map[string]*Map, map[string]*Program),
cleanup func(),
) {
var (
maps = make(map[string]*Map)
progs = make(map[string]*Program)
btfs = make(map[*btf.Spec]*btf.Handle)
maps = make(map[string]*Map)
progs = make(map[string]*Program)
btfs = make(btfHandleCache)
skipMapsAndProgs = false
)
defer func() {
for _, btf := range btfs {
btf.Close()
}
cleanup = func() {
btfs.close()
if err == nil {
if skipMapsAndProgs {
return
}
@ -166,40 +318,43 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (col
for _, p := range progs {
p.Close()
}
}()
loadBTF := func(spec *btf.Spec) (*btf.Handle, error) {
if btfs[spec] != nil {
return btfs[spec], nil
}
handle, err := btf.NewHandle(spec)
if err != nil {
return nil, err
}
btfs[spec] = handle
return handle, nil
}
for mapName, mapSpec := range spec.Maps {
var handle *btf.Handle
if mapSpec.BTF != nil {
handle, err = loadBTF(btf.MapSpec(mapSpec.BTF))
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
return nil, err
}
done = func() (map[string]*Map, map[string]*Program) {
skipMapsAndProgs = true
return maps, progs
}
loadMap = func(mapName string) (*Map, error) {
if m := maps[mapName]; m != nil {
return m, nil
}
m, err := newMapWithBTF(mapSpec, handle)
mapSpec := coll.Maps[mapName]
if mapSpec == nil {
return nil, fmt.Errorf("missing map %s", mapName)
}
m, err := newMapWithOptions(mapSpec, opts.Maps, btfs)
if err != nil {
return nil, fmt.Errorf("map %s: %w", mapName, err)
}
maps[mapName] = m
return m, nil
}
for progName, origProgSpec := range spec.Programs {
progSpec := origProgSpec.Copy()
loadProgram = func(progName string) (*Program, error) {
if prog := progs[progName]; prog != nil {
return prog, nil
}
progSpec := coll.Programs[progName]
if progSpec == nil {
return nil, fmt.Errorf("unknown program %s", progName)
}
progSpec = progSpec.Copy()
// Rewrite any reference to a valid map.
for i := range progSpec.Instructions {
@ -215,9 +370,9 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (col
continue
}
m := maps[ins.Reference]
if m == nil {
return nil, fmt.Errorf("program %s: missing map %s", progName, ins.Reference)
m, err := loadMap(ins.Reference)
if err != nil {
return nil, fmt.Errorf("program %s: %s", progName, err)
}
fd := m.FD()
@ -229,25 +384,16 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (col
}
}
var handle *btf.Handle
if progSpec.BTF != nil {
handle, err = loadBTF(btf.ProgramSpec(progSpec.BTF))
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
return nil, err
}
}
prog, err := newProgramWithBTF(progSpec, handle, opts.Programs)
prog, err := newProgramWithOptions(progSpec, opts.Programs, btfs)
if err != nil {
return nil, fmt.Errorf("program %s: %w", progName, err)
}
progs[progName] = prog
return prog, nil
}
return &Collection{
progs,
maps,
}, nil
return
}
// LoadCollection parses an object file and converts it to a collection.
@ -292,3 +438,152 @@ func (coll *Collection) DetachProgram(name string) *Program {
delete(coll.Programs, name)
return p
}
// Assign the contents of a collection to a struct.
//
// Deprecated: use CollectionSpec.Assign instead. It provides the same
// functionality but creates only the maps and programs requested.
func (coll *Collection) Assign(to interface{}) error {
assignedMaps := make(map[string]struct{})
assignedPrograms := make(map[string]struct{})
valueOf := func(typ reflect.Type, name string) (reflect.Value, error) {
switch typ {
case reflect.TypeOf((*Program)(nil)):
p := coll.Programs[name]
if p == nil {
return reflect.Value{}, fmt.Errorf("missing program %q", name)
}
assignedPrograms[name] = struct{}{}
return reflect.ValueOf(p), nil
case reflect.TypeOf((*Map)(nil)):
m := coll.Maps[name]
if m == nil {
return reflect.Value{}, fmt.Errorf("missing map %q", name)
}
assignedMaps[name] = struct{}{}
return reflect.ValueOf(m), nil
default:
return reflect.Value{}, fmt.Errorf("unsupported type %s", typ)
}
}
if err := assignValues(to, valueOf); err != nil {
return err
}
for name := range assignedPrograms {
coll.DetachProgram(name)
}
for name := range assignedMaps {
coll.DetachMap(name)
}
return nil
}
func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Value, error)) error {
type structField struct {
reflect.StructField
value reflect.Value
}
var (
fields []structField
visitedTypes = make(map[reflect.Type]bool)
flattenStruct func(reflect.Value) error
)
flattenStruct = func(structVal reflect.Value) error {
structType := structVal.Type()
if structType.Kind() != reflect.Struct {
return fmt.Errorf("%s is not a struct", structType)
}
if visitedTypes[structType] {
return fmt.Errorf("recursion on type %s", structType)
}
for i := 0; i < structType.NumField(); i++ {
field := structField{structType.Field(i), structVal.Field(i)}
name := field.Tag.Get("ebpf")
if name != "" {
fields = append(fields, field)
continue
}
var err error
switch field.Type.Kind() {
case reflect.Ptr:
if field.Type.Elem().Kind() != reflect.Struct {
continue
}
if field.value.IsNil() {
return fmt.Errorf("nil pointer to %s", structType)
}
err = flattenStruct(field.value.Elem())
case reflect.Struct:
err = flattenStruct(field.value)
default:
continue
}
if err != nil {
return fmt.Errorf("field %s: %s", field.Name, err)
}
}
return nil
}
toValue := reflect.ValueOf(to)
if toValue.Type().Kind() != reflect.Ptr {
return fmt.Errorf("%T is not a pointer to struct", to)
}
if toValue.IsNil() {
return fmt.Errorf("nil pointer to %T", to)
}
if err := flattenStruct(toValue.Elem()); err != nil {
return err
}
type elem struct {
// Either *Map or *Program
typ reflect.Type
name string
}
assignedTo := make(map[elem]string)
for _, field := range fields {
name := field.Tag.Get("ebpf")
if strings.Contains(name, ",") {
return fmt.Errorf("field %s: ebpf tag contains a comma", field.Name)
}
e := elem{field.Type, name}
if assignedField := assignedTo[e]; assignedField != "" {
return fmt.Errorf("field %s: %q was already assigned to %s", field.Name, name, assignedField)
}
value, err := valueOf(field.Type, name)
if err != nil {
return fmt.Errorf("field %s: %w", field.Name, err)
}
if !field.value.CanSet() {
return fmt.Errorf("field %s: can't set value", field.Name)
}
field.value.Set(value)
assignedTo[e] = field.Name
}
return nil
}

View file

@ -12,6 +12,5 @@
// eBPF code should be compiled ahead of time using clang, and shipped with
// your application as any other resource.
//
// This package doesn't include code required to attach eBPF to Linux
// subsystems, since this varies per subsystem.
// Use the link subpackage to attach a loaded program to a hook in the kernel.
package ebpf

View file

@ -1,6 +1,7 @@
package ebpf
import (
"bufio"
"bytes"
"debug/elf"
"encoding/binary"
@ -17,12 +18,14 @@ import (
"github.com/cilium/ebpf/internal/unix"
)
// elfCode is a convenience to reduce the amount of arguments that have to
// be passed around explicitly. You should treat it's contents as immutable.
type elfCode struct {
*elf.File
symbols []elf.Symbol
symbolsPerSection map[elf.SectionIndex]map[uint64]elf.Symbol
license string
version uint32
*internal.SafeELFFile
sections map[elf.SectionIndex]*elfSection
license string
version uint32
btf *btf.Spec
}
// LoadCollectionSpec parses an ELF file into a CollectionSpec.
@ -42,63 +45,52 @@ func LoadCollectionSpec(file string) (*CollectionSpec, error) {
// LoadCollectionSpecFromReader parses an ELF file into a CollectionSpec.
func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
f, err := elf.NewFile(rd)
f, err := internal.NewSafeELFFile(rd)
if err != nil {
return nil, err
}
defer f.Close()
symbols, err := f.Symbols()
if err != nil {
return nil, fmt.Errorf("load symbols: %v", err)
}
ec := &elfCode{f, symbols, symbolsPerSection(symbols), "", 0}
var (
licenseSection *elf.Section
versionSection *elf.Section
btfMaps = make(map[elf.SectionIndex]*elf.Section)
progSections = make(map[elf.SectionIndex]*elf.Section)
sections = make(map[elf.SectionIndex]*elfSection)
relSections = make(map[elf.SectionIndex]*elf.Section)
mapSections = make(map[elf.SectionIndex]*elf.Section)
dataSections = make(map[elf.SectionIndex]*elf.Section)
)
for i, sec := range ec.Sections {
// This is the target of relocations generated by inline assembly.
sections[elf.SHN_UNDEF] = newElfSection(new(elf.Section), undefSection)
// Collect all the sections we're interested in. This includes relocations
// which we parse later.
for i, sec := range f.Sections {
idx := elf.SectionIndex(i)
switch {
case strings.HasPrefix(sec.Name, "license"):
licenseSection = sec
case strings.HasPrefix(sec.Name, "version"):
versionSection = sec
case strings.HasPrefix(sec.Name, "maps"):
mapSections[elf.SectionIndex(i)] = sec
sections[idx] = newElfSection(sec, mapSection)
case sec.Name == ".maps":
btfMaps[elf.SectionIndex(i)] = sec
case sec.Name == ".bss" || sec.Name == ".rodata" || sec.Name == ".data":
dataSections[elf.SectionIndex(i)] = sec
sections[idx] = newElfSection(sec, btfMapSection)
case sec.Name == ".bss" || sec.Name == ".data" || strings.HasPrefix(sec.Name, ".rodata"):
sections[idx] = newElfSection(sec, dataSection)
case sec.Type == elf.SHT_REL:
if int(sec.Info) >= len(ec.Sections) {
return nil, fmt.Errorf("found relocation section %v for missing section %v", i, sec.Info)
}
// Store relocations under the section index of the target
idx := elf.SectionIndex(sec.Info)
if relSections[idx] != nil {
return nil, fmt.Errorf("section %d has multiple relocation sections", sec.Info)
}
relSections[idx] = sec
relSections[elf.SectionIndex(sec.Info)] = sec
case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0:
progSections[elf.SectionIndex(i)] = sec
sections[idx] = newElfSection(sec, programSection)
}
}
ec.license, err = loadLicense(licenseSection)
license, err := loadLicense(licenseSection)
if err != nil {
return nil, fmt.Errorf("load license: %w", err)
}
ec.version, err = loadVersion(versionSection, ec.ByteOrder)
version, err := loadVersion(versionSection, f.ByteOrder)
if err != nil {
return nil, fmt.Errorf("load version: %w", err)
}
@ -108,37 +100,90 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
return nil, fmt.Errorf("load BTF: %w", err)
}
relocations, referencedSections, err := ec.loadRelocations(relSections)
// Assign symbols to all the sections we're interested in.
symbols, err := f.Symbols()
if err != nil {
return nil, fmt.Errorf("load relocations: %w", err)
return nil, fmt.Errorf("load symbols: %v", err)
}
for _, symbol := range symbols {
idx := symbol.Section
symType := elf.ST_TYPE(symbol.Info)
section := sections[idx]
if section == nil {
continue
}
// Older versions of LLVM don't tag symbols correctly, so keep
// all NOTYPE ones.
keep := symType == elf.STT_NOTYPE
switch section.kind {
case mapSection, btfMapSection, dataSection:
keep = keep || symType == elf.STT_OBJECT
case programSection:
keep = keep || symType == elf.STT_FUNC
}
if !keep || symbol.Name == "" {
continue
}
section.symbols[symbol.Value] = symbol
}
ec := &elfCode{
SafeELFFile: f,
sections: sections,
license: license,
version: version,
btf: btfSpec,
}
// Go through relocation sections, and parse the ones for sections we're
// interested in. Make sure that relocations point at valid sections.
for idx, relSection := range relSections {
section := sections[idx]
if section == nil {
continue
}
rels, err := ec.loadRelocations(relSection, symbols)
if err != nil {
return nil, fmt.Errorf("relocation for section %q: %w", section.Name, err)
}
for _, rel := range rels {
target := sections[rel.Section]
if target == nil {
return nil, fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported)
}
if target.Flags&elf.SHF_STRINGS > 0 {
return nil, fmt.Errorf("section %q: string %q is not stack allocated: %w", section.Name, rel.Name, ErrNotSupported)
}
target.references++
}
section.relocations = rels
}
// Collect all the various ways to define maps.
maps := make(map[string]*MapSpec)
if err := ec.loadMaps(maps, mapSections); err != nil {
if err := ec.loadMaps(maps); err != nil {
return nil, fmt.Errorf("load maps: %w", err)
}
if len(btfMaps) > 0 {
if err := ec.loadBTFMaps(maps, btfMaps, btfSpec); err != nil {
return nil, fmt.Errorf("load BTF maps: %w", err)
}
if err := ec.loadBTFMaps(maps); err != nil {
return nil, fmt.Errorf("load BTF maps: %w", err)
}
if len(dataSections) > 0 {
for idx := range dataSections {
if !referencedSections[idx] {
// Prune data sections which are not referenced by any
// instructions.
delete(dataSections, idx)
}
}
if err := ec.loadDataSections(maps, dataSections, btfSpec); err != nil {
return nil, fmt.Errorf("load data sections: %w", err)
}
if err := ec.loadDataSections(maps); err != nil {
return nil, fmt.Errorf("load data sections: %w", err)
}
progs, err := ec.loadPrograms(progSections, relocations, btfSpec)
// Finally, collect programs and link them.
progs, err := ec.loadPrograms()
if err != nil {
return nil, fmt.Errorf("load programs: %w", err)
}
@ -170,26 +215,61 @@ func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) {
return version, nil
}
func (ec *elfCode) loadPrograms(progSections map[elf.SectionIndex]*elf.Section, relocations map[elf.SectionIndex]map[uint64]elf.Symbol, btfSpec *btf.Spec) (map[string]*ProgramSpec, error) {
type elfSectionKind int
const (
undefSection elfSectionKind = iota
mapSection
btfMapSection
programSection
dataSection
)
type elfSection struct {
*elf.Section
kind elfSectionKind
// Offset from the start of the section to a symbol
symbols map[uint64]elf.Symbol
// Offset from the start of the section to a relocation, which points at
// a symbol in another section.
relocations map[uint64]elf.Symbol
// The number of relocations pointing at this section.
references int
}
func newElfSection(section *elf.Section, kind elfSectionKind) *elfSection {
return &elfSection{
section,
kind,
make(map[uint64]elf.Symbol),
make(map[uint64]elf.Symbol),
0,
}
}
func (ec *elfCode) loadPrograms() (map[string]*ProgramSpec, error) {
var (
progs []*ProgramSpec
libs []*ProgramSpec
)
for idx, sec := range progSections {
syms := ec.symbolsPerSection[idx]
if len(syms) == 0 {
for _, sec := range ec.sections {
if sec.kind != programSection {
continue
}
if len(sec.symbols) == 0 {
return nil, fmt.Errorf("section %v: missing symbols", sec.Name)
}
funcSym, ok := syms[0]
funcSym, ok := sec.symbols[0]
if !ok {
return nil, fmt.Errorf("section %v: no label at start", sec.Name)
}
insns, length, err := ec.loadInstructions(sec, syms, relocations[idx])
insns, length, err := ec.loadInstructions(sec)
if err != nil {
return nil, fmt.Errorf("program %s: can't unmarshal instructions: %w", funcSym.Name, err)
return nil, fmt.Errorf("program %s: %w", funcSym.Name, err)
}
progType, attachType, attachTo := getProgType(sec.Name)
@ -205,8 +285,8 @@ func (ec *elfCode) loadPrograms(progSections map[elf.SectionIndex]*elf.Section,
ByteOrder: ec.ByteOrder,
}
if btfSpec != nil {
spec.BTF, err = btfSpec.Program(sec.Name, length)
if ec.btf != nil {
spec.BTF, err = ec.btf.Program(sec.Name, length)
if err != nil && !errors.Is(err, btf.ErrNoExtendedInfo) {
return nil, fmt.Errorf("program %s: %w", funcSym.Name, err)
}
@ -234,9 +314,9 @@ func (ec *elfCode) loadPrograms(progSections map[elf.SectionIndex]*elf.Section,
return res, nil
}
func (ec *elfCode) loadInstructions(section *elf.Section, symbols, relocations map[uint64]elf.Symbol) (asm.Instructions, uint64, error) {
func (ec *elfCode) loadInstructions(section *elfSection) (asm.Instructions, uint64, error) {
var (
r = section.Open()
r = bufio.NewReader(section.Open())
insns asm.Instructions
offset uint64
)
@ -250,11 +330,11 @@ func (ec *elfCode) loadInstructions(section *elf.Section, symbols, relocations m
return nil, 0, fmt.Errorf("offset %d: %w", offset, err)
}
ins.Symbol = symbols[offset].Name
ins.Symbol = section.symbols[offset].Name
if rel, ok := relocations[offset]; ok {
if rel, ok := section.relocations[offset]; ok {
if err = ec.relocateInstruction(&ins, rel); err != nil {
return nil, 0, fmt.Errorf("offset %d: can't relocate instruction: %w", offset, err)
return nil, 0, fmt.Errorf("offset %d: relocate instruction: %w", offset, err)
}
}
@ -270,69 +350,66 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
name = rel.Name
)
if typ == elf.STT_SECTION {
// Symbols with section type do not have a name set. Get it
// from the section itself.
idx := int(rel.Section)
if idx > len(ec.Sections) {
return errors.New("out-of-bounds section index")
target := ec.sections[rel.Section]
switch target.kind {
case mapSection, btfMapSection:
if bind != elf.STB_GLOBAL {
return fmt.Errorf("possible erroneous static qualifier on map definition: found reference to %q", name)
}
name = ec.Sections[idx].Name
}
outer:
switch {
case ins.OpCode == asm.LoadImmOp(asm.DWord):
// There are two distinct types of a load from a map:
// a direct one, where the value is extracted without
// a call to map_lookup_elem in eBPF, and an indirect one
// that goes via the helper. They are distinguished by
// different relocations.
switch typ {
case elf.STT_SECTION:
// This is a direct load since the referenced symbol is a
// section. Weirdly, the offset of the real symbol in the
// section is encoded in the instruction stream.
if bind != elf.STB_LOCAL {
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
}
// For some reason, clang encodes the offset of the symbol its
// section in the first basic BPF instruction, while the kernel
// expects it in the second one.
ins.Constant <<= 32
ins.Src = asm.PseudoMapValue
case elf.STT_NOTYPE:
if bind == elf.STB_GLOBAL && rel.Section == elf.SHN_UNDEF {
// This is a relocation generated by inline assembly.
// We can't do more than assigning ins.Reference.
break outer
}
// This is an ELF generated on clang < 8, which doesn't tag
if typ != elf.STT_OBJECT && typ != elf.STT_NOTYPE {
// STT_NOTYPE is generated on clang < 8 which doesn't tag
// relocations appropriately.
fallthrough
case elf.STT_OBJECT:
if bind != elf.STB_GLOBAL {
return fmt.Errorf("load: %s: unsupported binding: %s", name, bind)
}
ins.Src = asm.PseudoMapFD
default:
return fmt.Errorf("load: %s: unsupported relocation: %s", name, typ)
return fmt.Errorf("map load: incorrect relocation type %v", typ)
}
ins.Src = asm.PseudoMapFD
// Mark the instruction as needing an update when creating the
// collection.
if err := ins.RewriteMapPtr(-1); err != nil {
return err
}
case ins.OpCode.JumpOp() == asm.Call:
case dataSection:
switch typ {
case elf.STT_SECTION:
if bind != elf.STB_LOCAL {
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
}
case elf.STT_OBJECT:
if bind != elf.STB_GLOBAL {
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
}
default:
return fmt.Errorf("incorrect relocation type %v for direct map load", typ)
}
// We rely on using the name of the data section as the reference. It
// would be nicer to keep the real name in case of an STT_OBJECT, but
// it's not clear how to encode that into Instruction.
name = target.Name
// For some reason, clang encodes the offset of the symbol its
// section in the first basic BPF instruction, while the kernel
// expects it in the second one.
ins.Constant <<= 32
ins.Src = asm.PseudoMapValue
// Mark the instruction as needing an update when creating the
// collection.
if err := ins.RewriteMapPtr(-1); err != nil {
return err
}
case programSection:
if ins.OpCode.JumpOp() != asm.Call {
return fmt.Errorf("not a call instruction: %s", ins)
}
if ins.Src != asm.PseudoCall {
return fmt.Errorf("call: %s: incorrect source register", name)
}
@ -357,7 +434,7 @@ outer:
return fmt.Errorf("call: %s: invalid offset %d", name, offset)
}
sym, ok := ec.symbolsPerSection[rel.Section][uint64(offset)]
sym, ok := target.symbols[uint64(offset)]
if !ok {
return fmt.Errorf("call: %s: no symbol at offset %d", name, offset)
}
@ -369,31 +446,46 @@ outer:
return fmt.Errorf("call: %s: invalid symbol type %s", name, typ)
}
case undefSection:
if bind != elf.STB_GLOBAL {
return fmt.Errorf("asm relocation: %s: unsupported binding: %s", name, bind)
}
if typ != elf.STT_NOTYPE {
return fmt.Errorf("asm relocation: %s: unsupported type %s", name, typ)
}
// There is nothing to do here but set ins.Reference.
default:
return fmt.Errorf("relocation for unsupported instruction: %s", ins.OpCode)
return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported)
}
ins.Reference = name
return nil
}
func (ec *elfCode) loadMaps(maps map[string]*MapSpec, mapSections map[elf.SectionIndex]*elf.Section) error {
for idx, sec := range mapSections {
syms := ec.symbolsPerSection[idx]
if len(syms) == 0 {
func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error {
for _, sec := range ec.sections {
if sec.kind != mapSection {
continue
}
nSym := len(sec.symbols)
if nSym == 0 {
return fmt.Errorf("section %v: no symbols", sec.Name)
}
if sec.Size%uint64(len(syms)) != 0 {
if sec.Size%uint64(nSym) != 0 {
return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name)
}
var (
r = sec.Open()
size = sec.Size / uint64(len(syms))
r = bufio.NewReader(sec.Open())
size = sec.Size / uint64(nSym)
)
for i, offset := 0, uint64(0); i < len(syms); i, offset = i+1, offset+size {
mapSym, ok := syms[offset]
for i, offset := 0, uint64(0); i < nSym; i, offset = i+1, offset+size {
mapSym, ok := sec.symbols[offset]
if !ok {
return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
}
@ -431,24 +523,46 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec, mapSections map[elf.Sectio
return nil
}
func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec, mapSections map[elf.SectionIndex]*elf.Section, spec *btf.Spec) error {
if spec == nil {
return fmt.Errorf("missing BTF")
}
func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error {
for _, sec := range ec.sections {
if sec.kind != btfMapSection {
continue
}
for idx, sec := range mapSections {
syms := ec.symbolsPerSection[idx]
if len(syms) == 0 {
if ec.btf == nil {
return fmt.Errorf("missing BTF")
}
if len(sec.symbols) == 0 {
return fmt.Errorf("section %v: no symbols", sec.Name)
}
for _, sym := range syms {
_, err := io.Copy(internal.DiscardZeroes{}, bufio.NewReader(sec.Open()))
if err != nil {
return fmt.Errorf("section %v: initializing BTF map definitions: %w", sec.Name, internal.ErrNotSupported)
}
for _, sym := range sec.symbols {
name := sym.Name
if maps[name] != nil {
return fmt.Errorf("section %v: map %v already exists", sec.Name, sym)
}
mapSpec, err := mapSpecFromBTF(spec, name)
// A global Var is created by declaring a struct with a 'structure variable',
// as is common in eBPF C to declare eBPF maps. For example,
// `struct { ... } map_name ...;` emits a global variable `map_name`
// with the type of said struct (which can be anonymous).
var v btf.Var
if err := ec.btf.FindType(name, &v); err != nil {
return fmt.Errorf("cannot find global variable '%s' in BTF: %w", name, err)
}
mapStruct, ok := v.Type.(*btf.Struct)
if !ok {
return fmt.Errorf("expected struct, got %s", v.Type)
}
mapSpec, err := mapSpecFromBTF(name, mapStruct, false, ec.btf)
if err != nil {
return fmt.Errorf("map %v: %w", name, err)
}
@ -460,30 +574,21 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec, mapSections map[elf.Sec
return nil
}
func mapSpecFromBTF(spec *btf.Spec, name string) (*MapSpec, error) {
btfMap, btfMapMembers, err := spec.Map(name)
if err != nil {
return nil, fmt.Errorf("can't get BTF: %w", err)
}
keyType := btf.MapKey(btfMap)
size, err := btf.Sizeof(keyType)
if err != nil {
return nil, fmt.Errorf("can't get size of BTF key: %w", err)
}
keySize := uint32(size)
valueType := btf.MapValue(btfMap)
size, err = btf.Sizeof(valueType)
if err != nil {
return nil, fmt.Errorf("can't get size of BTF value: %w", err)
}
valueSize := uint32(size)
// mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing
// a BTF map definition. The name and spec arguments will be copied to the
// resulting MapSpec, and inner must be true on any resursive invocations.
func mapSpecFromBTF(name string, def *btf.Struct, inner bool, spec *btf.Spec) (*MapSpec, error) {
var (
key, value btf.Type
keySize, valueSize uint32
mapType, flags, maxEntries uint32
pinType PinType
innerMapSpec *MapSpec
err error
)
for _, member := range btfMapMembers {
for i, member := range def.Members {
switch member.Name {
case "type":
mapType, err = uintFromBTF(member.Type)
@ -503,8 +608,48 @@ func mapSpecFromBTF(spec *btf.Spec, name string) (*MapSpec, error) {
return nil, fmt.Errorf("can't get BTF map max entries: %w", err)
}
case "key":
if keySize != 0 {
return nil, errors.New("both key and key_size given")
}
pk, ok := member.Type.(*btf.Pointer)
if !ok {
return nil, fmt.Errorf("key type is not a pointer: %T", member.Type)
}
key = pk.Target
size, err := btf.Sizeof(pk.Target)
if err != nil {
return nil, fmt.Errorf("can't get size of BTF key: %w", err)
}
keySize = uint32(size)
case "value":
if valueSize != 0 {
return nil, errors.New("both value and value_size given")
}
vk, ok := member.Type.(*btf.Pointer)
if !ok {
return nil, fmt.Errorf("value type is not a pointer: %T", member.Type)
}
value = vk.Target
size, err := btf.Sizeof(vk.Target)
if err != nil {
return nil, fmt.Errorf("can't get size of BTF value: %w", err)
}
valueSize = uint32(size)
case "key_size":
if _, isVoid := keyType.(*btf.Void); !isVoid {
// Key needs to be nil and keySize needs to be 0 for key_size to be
// considered a valid member.
if key != nil || keySize != 0 {
return nil, errors.New("both key and key_size given")
}
@ -514,7 +659,9 @@ func mapSpecFromBTF(spec *btf.Spec, name string) (*MapSpec, error) {
}
case "value_size":
if _, isVoid := valueType.(*btf.Void); !isVoid {
// Value needs to be nil and valueSize needs to be 0 for value_size to be
// considered a valid member.
if value != nil || valueSize != 0 {
return nil, errors.New("both value and value_size given")
}
@ -524,28 +671,79 @@ func mapSpecFromBTF(spec *btf.Spec, name string) (*MapSpec, error) {
}
case "pinning":
if inner {
return nil, errors.New("inner maps can't be pinned")
}
pinning, err := uintFromBTF(member.Type)
if err != nil {
return nil, fmt.Errorf("can't get pinning: %w", err)
}
if pinning != 0 {
return nil, fmt.Errorf("'pinning' attribute not supported: %w", ErrNotSupported)
pinType = PinType(pinning)
case "values":
// The 'values' field in BTF map definitions is used for declaring map
// value types that are references to other BPF objects, like other maps
// or programs. It is always expected to be an array of pointers.
if i != len(def.Members)-1 {
return nil, errors.New("'values' must be the last member in a BTF map definition")
}
if valueSize != 0 && valueSize != 4 {
return nil, errors.New("value_size must be 0 or 4")
}
valueSize = 4
valueType, err := resolveBTFArrayMacro(member.Type)
if err != nil {
return nil, fmt.Errorf("can't resolve type of member 'values': %w", err)
}
switch t := valueType.(type) {
case *btf.Struct:
// The values member pointing to an array of structs means we're expecting
// a map-in-map declaration.
if MapType(mapType) != ArrayOfMaps && MapType(mapType) != HashOfMaps {
return nil, errors.New("outer map needs to be an array or a hash of maps")
}
if inner {
return nil, fmt.Errorf("nested inner maps are not supported")
}
// This inner map spec is used as a map template, but it needs to be
// created as a traditional map before it can be used to do so.
// libbpf names the inner map template '<outer_name>.inner', but we
// opted for _inner to simplify validation logic. (dots only supported
// on kernels 5.2 and up)
// Pass the BTF spec from the parent object, since both parent and
// child must be created from the same BTF blob (on kernels that support BTF).
innerMapSpec, err = mapSpecFromBTF(name+"_inner", t, true, spec)
if err != nil {
return nil, fmt.Errorf("can't parse BTF map definition of inner map: %w", err)
}
default:
return nil, fmt.Errorf("unsupported value type %q in 'values' field", t)
}
case "key", "value":
default:
return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name)
}
}
bm := btf.NewMap(spec, key, value)
return &MapSpec{
Name: SanitizeName(name, -1),
Type: MapType(mapType),
KeySize: keySize,
ValueSize: valueSize,
MaxEntries: maxEntries,
Flags: flags,
BTF: btfMap,
BTF: &bm,
Pinning: pinType,
InnerMap: innerMapSpec,
}, nil
}
@ -565,13 +763,40 @@ func uintFromBTF(typ btf.Type) (uint32, error) {
return arr.Nelems, nil
}
func (ec *elfCode) loadDataSections(maps map[string]*MapSpec, dataSections map[elf.SectionIndex]*elf.Section, spec *btf.Spec) error {
if spec == nil {
return errors.New("data sections require BTF, make sure all consts are marked as static")
// resolveBTFArrayMacro resolves the __array macro, which declares an array
// of pointers to a given type. This function returns the target Type of
// the pointers in the array.
func resolveBTFArrayMacro(typ btf.Type) (btf.Type, error) {
arr, ok := typ.(*btf.Array)
if !ok {
return nil, fmt.Errorf("not an array: %v", typ)
}
for _, sec := range dataSections {
btfMap, err := spec.Datasec(sec.Name)
ptr, ok := arr.Type.(*btf.Pointer)
if !ok {
return nil, fmt.Errorf("not an array of pointers: %v", typ)
}
return ptr.Target, nil
}
func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error {
for _, sec := range ec.sections {
if sec.kind != dataSection {
continue
}
if sec.references == 0 {
// Prune data sections which are not referenced by any
// instructions.
continue
}
if ec.btf == nil {
return errors.New("data sections require BTF, make sure all consts are marked as static")
}
btfMap, err := ec.btf.Datasec(sec.Name)
if err != nil {
return err
}
@ -636,6 +861,8 @@ func getProgType(sectionName string) (ProgramType, AttachType, string) {
"lirc_mode2": {LircMode2, AttachLircMode2},
"flow_dissector": {FlowDissector, AttachFlowDissector},
"iter/": {Tracing, AttachTraceIter},
"sk_lookup/": {SkLookup, AttachSkLookup},
"lsm/": {LSM, AttachLSMMac},
"cgroup_skb/ingress": {CGroupSKB, AttachCGroupInetIngress},
"cgroup_skb/egress": {CGroupSKB, AttachCGroupInetEgress},
@ -674,69 +901,30 @@ func getProgType(sectionName string) (ProgramType, AttachType, string) {
return UnspecifiedProgram, AttachNone, ""
}
func (ec *elfCode) loadRelocations(sections map[elf.SectionIndex]*elf.Section) (map[elf.SectionIndex]map[uint64]elf.Symbol, map[elf.SectionIndex]bool, error) {
result := make(map[elf.SectionIndex]map[uint64]elf.Symbol)
targets := make(map[elf.SectionIndex]bool)
for idx, sec := range sections {
rels := make(map[uint64]elf.Symbol)
func (ec *elfCode) loadRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) {
rels := make(map[uint64]elf.Symbol)
if sec.Entsize < 16 {
return nil, nil, fmt.Errorf("section %s: relocations are less than 16 bytes", sec.Name)
}
r := sec.Open()
for off := uint64(0); off < sec.Size; off += sec.Entsize {
ent := io.LimitReader(r, int64(sec.Entsize))
var rel elf.Rel64
if binary.Read(ent, ec.ByteOrder, &rel) != nil {
return nil, nil, fmt.Errorf("can't parse relocation at offset %v", off)
}
symNo := int(elf.R_SYM64(rel.Info) - 1)
if symNo >= len(ec.symbols) {
return nil, nil, fmt.Errorf("relocation at offset %d: symbol %v doesnt exist", off, symNo)
}
symbol := ec.symbols[symNo]
targets[symbol.Section] = true
rels[rel.Off] = ec.symbols[symNo]
}
result[idx] = rels
if sec.Entsize < 16 {
return nil, fmt.Errorf("section %s: relocations are less than 16 bytes", sec.Name)
}
return result, targets, nil
}
func symbolsPerSection(symbols []elf.Symbol) map[elf.SectionIndex]map[uint64]elf.Symbol {
result := make(map[elf.SectionIndex]map[uint64]elf.Symbol)
for _, sym := range symbols {
switch elf.ST_TYPE(sym.Info) {
case elf.STT_NOTYPE:
// Older versions of LLVM doesn't tag
// symbols correctly.
break
case elf.STT_OBJECT:
break
case elf.STT_FUNC:
break
default:
continue
r := bufio.NewReader(sec.Open())
for off := uint64(0); off < sec.Size; off += sec.Entsize {
ent := io.LimitReader(r, int64(sec.Entsize))
var rel elf.Rel64
if binary.Read(ent, ec.ByteOrder, &rel) != nil {
return nil, fmt.Errorf("can't parse relocation at offset %v", off)
}
if sym.Section == elf.SHN_UNDEF || sym.Section >= elf.SHN_LORESERVE {
continue
symNo := int(elf.R_SYM64(rel.Info) - 1)
if symNo >= len(symbols) {
return nil, fmt.Errorf("offset %d: symbol %d doesn't exist", off, symNo)
}
if sym.Name == "" {
continue
}
idx := sym.Section
if _, ok := result[idx]; !ok {
result[idx] = make(map[uint64]elf.Symbol)
}
result[idx][sym.Value] = sym
symbol := symbols[symNo]
rels[rel.Off] = symbol
}
return result
return rels, nil
}

21
vendor/github.com/cilium/ebpf/elf_reader_fuzz.go generated vendored Normal file
View file

@ -0,0 +1,21 @@
// +build gofuzz
// Use with https://github.com/dvyukov/go-fuzz
package ebpf
import "bytes"
func FuzzLoadCollectionSpec(data []byte) int {
spec, err := LoadCollectionSpecFromReader(bytes.NewReader(data))
if err != nil {
if spec != nil {
panic("spec is not nil")
}
return 0
}
if spec == nil {
panic("spec is nil")
}
return 1
}

9
vendor/github.com/cilium/ebpf/examples/go.mod generated vendored Normal file
View file

@ -0,0 +1,9 @@
module github.com/cilium/ebpf/examples
go 1.15
require (
github.com/cilium/ebpf v0.3.0
github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9
)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,80 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __BPF_HELPERS__
#define __BPF_HELPERS__
/*
* Note that bpf programs need to include either
* vmlinux.h (auto-generated from BTF) or linux/types.h
* in advance since bpf_helper_defs.h uses such types
* as __u64.
*/
#include "bpf_helper_defs.h"
#define __uint(name, val) int (*name)[val]
#define __type(name, val) typeof(val) *name
#define __array(name, val) typeof(val) *name[]
/* Helper macro to print out debug messages */
#define bpf_printk(fmt, ...) \
({ \
char ____fmt[] = fmt; \
bpf_trace_printk(____fmt, sizeof(____fmt), \
##__VA_ARGS__); \
})
/*
* Helper macro to place programs, maps, license in
* different sections in elf_bpf file. Section names
* are interpreted by elf_bpf loader
*/
#define SEC(NAME) __attribute__((section(NAME), used))
#ifndef __always_inline
#define __always_inline __attribute__((always_inline))
#endif
#ifndef __weak
#define __weak __attribute__((weak))
#endif
/*
* Helper macro to manipulate data structures
*/
#ifndef offsetof
#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER)
#endif
#ifndef container_of
#define container_of(ptr, type, member) \
({ \
void *__mptr = (void *)(ptr); \
((type *)(__mptr - offsetof(type, member))); \
})
#endif
/*
* Helper structure used by eBPF C program
* to describe BPF map attributes to libbpf loader
*/
struct bpf_map_def {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
unsigned int map_flags;
};
enum libbpf_pin_type {
LIBBPF_PIN_NONE,
/* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
LIBBPF_PIN_BY_NAME,
};
enum libbpf_tristate {
TRI_NO = 0,
TRI_YES = 1,
TRI_MODULE = 2,
};
#define __kconfig __attribute__((section(".kconfig")))
#define __ksym __attribute__((section(".ksyms")))
#endif

View file

@ -0,0 +1,65 @@
// This is a compact version of `vmlinux.h` to be used in the examples using C code.
#ifndef __VMLINUX_H__
#define __VMLINUX_H__
typedef unsigned char __u8;
typedef short int __s16;
typedef short unsigned int __u16;
typedef int __s32;
typedef unsigned int __u32;
typedef long long int __s64;
typedef long long unsigned int __u64;
typedef __u8 u8;
typedef __s16 s16;
typedef __u16 u16;
typedef __s32 s32;
typedef __u32 u32;
typedef __s64 s64;
typedef __u64 u64;
typedef __u16 __le16;
typedef __u16 __be16;
typedef __u32 __be32;
typedef __u64 __be64;
typedef __u32 __wsum;
enum bpf_map_type {
BPF_MAP_TYPE_UNSPEC = 0,
BPF_MAP_TYPE_HASH = 1,
BPF_MAP_TYPE_ARRAY = 2,
BPF_MAP_TYPE_PROG_ARRAY = 3,
BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4,
BPF_MAP_TYPE_PERCPU_HASH = 5,
BPF_MAP_TYPE_PERCPU_ARRAY = 6,
BPF_MAP_TYPE_STACK_TRACE = 7,
BPF_MAP_TYPE_CGROUP_ARRAY = 8,
BPF_MAP_TYPE_LRU_HASH = 9,
BPF_MAP_TYPE_LRU_PERCPU_HASH = 10,
BPF_MAP_TYPE_LPM_TRIE = 11,
BPF_MAP_TYPE_ARRAY_OF_MAPS = 12,
BPF_MAP_TYPE_HASH_OF_MAPS = 13,
BPF_MAP_TYPE_DEVMAP = 14,
BPF_MAP_TYPE_SOCKMAP = 15,
BPF_MAP_TYPE_CPUMAP = 16,
BPF_MAP_TYPE_XSKMAP = 17,
BPF_MAP_TYPE_SOCKHASH = 18,
BPF_MAP_TYPE_CGROUP_STORAGE = 19,
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20,
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21,
BPF_MAP_TYPE_QUEUE = 22,
BPF_MAP_TYPE_STACK = 23,
BPF_MAP_TYPE_SK_STORAGE = 24,
BPF_MAP_TYPE_DEVMAP_HASH = 25,
BPF_MAP_TYPE_STRUCT_OPS = 26,
BPF_MAP_TYPE_RINGBUF = 27,
BPF_MAP_TYPE_INODE_STORAGE = 28,
};
enum {
BPF_ANY = 0,
BPF_NOEXIST = 1,
BPF_EXIST = 2,
BPF_F_LOCK = 4,
};
#endif /* __VMLINUX_H__ */

View file

@ -0,0 +1,26 @@
#include "common.h"
#include "bpf_helpers.h"
char __license[] SEC("license") = "Dual MIT/GPL";
struct bpf_map_def SEC("maps") kprobe_map = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(u32),
.value_size = sizeof(u64),
.max_entries = 1,
};
SEC("kprobe/__x64_sys_execve")
int kprobe_execve() {
u32 key = 0;
u64 initval = 1, *valp;
valp = bpf_map_lookup_elem(&kprobe_map, &key);
if (!valp) {
bpf_map_update_elem(&kprobe_map, &key, &initval, BPF_ANY);
return 0;
}
__sync_fetch_and_add(valp, 1);
return 0;
}

View file

@ -1,5 +1,9 @@
module github.com/cilium/ebpf
go 1.13
go 1.15
require golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9
require (
github.com/frankban/quicktest v1.11.3
github.com/google/go-cmp v0.5.4
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
)

239
vendor/github.com/cilium/ebpf/info.go generated vendored Normal file
View file

@ -0,0 +1,239 @@
package ebpf
import (
"bufio"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"strings"
"syscall"
"time"
"github.com/cilium/ebpf/internal"
)
// MapInfo describes a map.
type MapInfo struct {
Type MapType
id MapID
KeySize uint32
ValueSize uint32
MaxEntries uint32
Flags uint32
// Name as supplied by user space at load time.
Name string
}
func newMapInfoFromFd(fd *internal.FD) (*MapInfo, error) {
info, err := bpfGetMapInfoByFD(fd)
if errors.Is(err, syscall.EINVAL) {
return newMapInfoFromProc(fd)
}
if err != nil {
return nil, err
}
return &MapInfo{
MapType(info.map_type),
MapID(info.id),
info.key_size,
info.value_size,
info.max_entries,
info.map_flags,
// name is available from 4.15.
internal.CString(info.name[:]),
}, nil
}
func newMapInfoFromProc(fd *internal.FD) (*MapInfo, error) {
var mi MapInfo
err := scanFdInfo(fd, map[string]interface{}{
"map_type": &mi.Type,
"key_size": &mi.KeySize,
"value_size": &mi.ValueSize,
"max_entries": &mi.MaxEntries,
"map_flags": &mi.Flags,
})
if err != nil {
return nil, err
}
return &mi, nil
}
// ID returns the map ID.
//
// Available from 4.13.
//
// The bool return value indicates whether this optional field is available.
func (mi *MapInfo) ID() (MapID, bool) {
return mi.id, mi.id > 0
}
// programStats holds statistics of a program.
type programStats struct {
// Total accumulated runtime of the program ins ns.
runtime time.Duration
// Total number of times the program was called.
runCount uint64
}
// ProgramInfo describes a program.
type ProgramInfo struct {
Type ProgramType
id ProgramID
// Truncated hash of the BPF bytecode.
Tag string
// Name as supplied by user space at load time.
Name string
stats *programStats
}
func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) {
info, err := bpfGetProgInfoByFD(fd)
if errors.Is(err, syscall.EINVAL) {
return newProgramInfoFromProc(fd)
}
if err != nil {
return nil, err
}
return &ProgramInfo{
Type: ProgramType(info.prog_type),
id: ProgramID(info.id),
// tag is available if the kernel supports BPF_PROG_GET_INFO_BY_FD.
Tag: hex.EncodeToString(info.tag[:]),
// name is available from 4.15.
Name: internal.CString(info.name[:]),
stats: &programStats{
runtime: time.Duration(info.run_time_ns),
runCount: info.run_cnt,
},
}, nil
}
func newProgramInfoFromProc(fd *internal.FD) (*ProgramInfo, error) {
var info ProgramInfo
err := scanFdInfo(fd, map[string]interface{}{
"prog_type": &info.Type,
"prog_tag": &info.Tag,
})
if errors.Is(err, errMissingFields) {
return nil, &internal.UnsupportedFeatureError{
Name: "reading program info from /proc/self/fdinfo",
MinimumVersion: internal.Version{4, 10, 0},
}
}
if err != nil {
return nil, err
}
return &info, nil
}
// ID returns the program ID.
//
// Available from 4.13.
//
// The bool return value indicates whether this optional field is available.
func (pi *ProgramInfo) ID() (ProgramID, bool) {
return pi.id, pi.id > 0
}
// RunCount returns the total number of times the program was called.
//
// Can return 0 if the collection of statistics is not enabled. See EnableStats().
// The bool return value indicates whether this optional field is available.
func (pi *ProgramInfo) RunCount() (uint64, bool) {
if pi.stats != nil {
return pi.stats.runCount, true
}
return 0, false
}
// Runtime returns the total accumulated runtime of the program.
//
// Can return 0 if the collection of statistics is not enabled. See EnableStats().
// The bool return value indicates whether this optional field is available.
func (pi *ProgramInfo) Runtime() (time.Duration, bool) {
if pi.stats != nil {
return pi.stats.runtime, true
}
return time.Duration(0), false
}
func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error {
raw, err := fd.Value()
if err != nil {
return err
}
fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", raw))
if err != nil {
return err
}
defer fh.Close()
if err := scanFdInfoReader(fh, fields); err != nil {
return fmt.Errorf("%s: %w", fh.Name(), err)
}
return nil
}
var errMissingFields = errors.New("missing fields")
func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error {
var (
scanner = bufio.NewScanner(r)
scanned int
)
for scanner.Scan() {
parts := strings.SplitN(scanner.Text(), "\t", 2)
if len(parts) != 2 {
continue
}
name := strings.TrimSuffix(parts[0], ":")
field, ok := fields[string(name)]
if !ok {
continue
}
if n, err := fmt.Sscanln(parts[1], field); err != nil || n != 1 {
return fmt.Errorf("can't parse field %s: %v", name, err)
}
scanned++
}
if err := scanner.Err(); err != nil {
return err
}
if scanned != len(fields) {
return errMissingFields
}
return nil
}
// EnableStats starts the measuring of the runtime
// and run counts of eBPF programs.
//
// Collecting statistics can have an impact on the performance.
//
// Requires at least 5.8.
func EnableStats(which uint32) (io.Closer, error) {
attr := internal.BPFEnableStatsAttr{
StatsType: which,
}
fd, err := internal.BPFEnableStats(&attr)
if err != nil {
return nil, err
}
return fd, nil
}

View file

@ -29,12 +29,14 @@ var (
// Spec represents decoded BTF.
type Spec struct {
rawTypes []rawType
strings stringTable
types map[string][]Type
funcInfos map[string]extInfo
lineInfos map[string]extInfo
byteOrder binary.ByteOrder
rawTypes []rawType
strings stringTable
types []Type
namedTypes map[string][]namedType
funcInfos map[string]extInfo
lineInfos map[string]extInfo
coreRelos map[string]bpfCoreRelos
byteOrder binary.ByteOrder
}
type btfHeader struct {
@ -53,35 +55,15 @@ type btfHeader struct {
//
// Returns a nil Spec and no error if no BTF was present.
func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
file, err := elf.NewFile(rd)
file, err := internal.NewSafeELFFile(rd)
if err != nil {
return nil, err
}
defer file.Close()
var (
btfSection *elf.Section
btfExtSection *elf.Section
sectionSizes = make(map[string]uint32)
)
for _, sec := range file.Sections {
switch sec.Name {
case ".BTF":
btfSection = sec
case ".BTF.ext":
btfExtSection = sec
default:
if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS {
break
}
if sec.Size > math.MaxUint32 {
return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
}
sectionSizes[sec.Name] = uint32(sec.Size)
}
btfSection, btfExtSection, sectionSizes, err := findBtfSections(file)
if err != nil {
return nil, err
}
if btfSection == nil {
@ -100,6 +82,10 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
continue
}
if int(symbol.Section) >= len(file.Sections) {
return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section)
}
secName := file.Sections[symbol.Section].Name
if _, ok := sectionSizes[secName]; !ok {
continue
@ -121,7 +107,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
return spec, nil
}
spec.funcInfos, spec.lineInfos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings)
spec.funcInfos, spec.lineInfos, spec.coreRelos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings)
if err != nil {
return nil, fmt.Errorf("can't read ext info: %w", err)
}
@ -129,6 +115,51 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
return spec, nil
}
func findBtfSections(file *internal.SafeELFFile) (*elf.Section, *elf.Section, map[string]uint32, error) {
var (
btfSection *elf.Section
btfExtSection *elf.Section
sectionSizes = make(map[string]uint32)
)
for _, sec := range file.Sections {
switch sec.Name {
case ".BTF":
btfSection = sec
case ".BTF.ext":
btfExtSection = sec
default:
if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS {
break
}
if sec.Size > math.MaxUint32 {
return nil, nil, nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
}
sectionSizes[sec.Name] = uint32(sec.Size)
}
}
return btfSection, btfExtSection, sectionSizes, nil
}
func loadSpecFromVmlinux(rd io.ReaderAt) (*Spec, error) {
file, err := internal.NewSafeELFFile(rd)
if err != nil {
return nil, err
}
defer file.Close()
btfSection, _, _, err := findBtfSections(file)
if err != nil {
return nil, fmt.Errorf(".BTF ELF section: %s", err)
}
if btfSection == nil {
return nil, fmt.Errorf("unable to find .BTF ELF section")
}
return loadNakedSpec(btfSection.Open(), file.ByteOrder, nil, nil)
}
func loadNakedSpec(btf io.ReadSeeker, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) {
rawTypes, rawStrings, err := parseBTF(btf, bo)
if err != nil {
@ -140,16 +171,17 @@ func loadNakedSpec(btf io.ReadSeeker, bo binary.ByteOrder, sectionSizes map[stri
return nil, err
}
types, err := inflateRawTypes(rawTypes, rawStrings)
types, typesByName, err := inflateRawTypes(rawTypes, rawStrings)
if err != nil {
return nil, err
}
return &Spec{
rawTypes: rawTypes,
types: types,
strings: rawStrings,
byteOrder: bo,
rawTypes: rawTypes,
namedTypes: typesByName,
types: types,
strings: rawStrings,
byteOrder: bo,
}, nil
}
@ -176,16 +208,43 @@ func LoadKernelSpec() (*Spec, error) {
}
func loadKernelSpec() (*Spec, error) {
fh, err := os.Open("/sys/kernel/btf/vmlinux")
if os.IsNotExist(err) {
return nil, fmt.Errorf("can't open kernel BTF at /sys/kernel/btf/vmlinux: %w", ErrNotFound)
}
release, err := unix.KernelRelease()
if err != nil {
return nil, fmt.Errorf("can't read kernel BTF: %s", err)
return nil, fmt.Errorf("can't read kernel release number: %w", err)
}
defer fh.Close()
return loadNakedSpec(fh, internal.NativeEndian, nil, nil)
fh, err := os.Open("/sys/kernel/btf/vmlinux")
if err == nil {
defer fh.Close()
return loadNakedSpec(fh, internal.NativeEndian, nil, nil)
}
// use same list of locations as libbpf
// https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122
locations := []string{
"/boot/vmlinux-%s",
"/lib/modules/%s/vmlinux-%[1]s",
"/lib/modules/%s/build/vmlinux",
"/usr/lib/modules/%s/kernel/vmlinux",
"/usr/lib/debug/boot/vmlinux-%s",
"/usr/lib/debug/boot/vmlinux-%s.debug",
"/usr/lib/debug/lib/modules/%s/vmlinux",
}
for _, loc := range locations {
path := fmt.Sprintf(loc, release)
fh, err := os.Open(path)
if err != nil {
continue
}
defer fh.Close()
return loadSpecFromVmlinux(fh)
}
return nil, fmt.Errorf("no BTF for kernel version %s: %w", release, internal.ErrNotSupported)
}
func parseBTF(btf io.ReadSeeker, bo binary.ByteOrder) ([]rawType, stringTable, error) {
@ -259,10 +318,14 @@ func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[s
return err
}
if name == ".kconfig" || name == ".ksym" {
if name == ".kconfig" || name == ".ksyms" {
return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
}
if rawTypes[i].SizeType != 0 {
continue
}
size, ok := sectionSizes[name]
if !ok {
return fmt.Errorf("data section %s: missing size", name)
@ -369,54 +432,19 @@ func (s *Spec) Program(name string, length uint64) (*Program, error) {
return nil, errors.New("length musn't be zero")
}
if s.funcInfos == nil && s.lineInfos == nil {
if s.funcInfos == nil && s.lineInfos == nil && s.coreRelos == nil {
return nil, fmt.Errorf("BTF for section %s: %w", name, ErrNoExtendedInfo)
}
funcInfos, funcOK := s.funcInfos[name]
lineInfos, lineOK := s.lineInfos[name]
coreRelos, coreOK := s.coreRelos[name]
if !funcOK && !lineOK {
if !funcOK && !lineOK && !coreOK {
return nil, fmt.Errorf("no extended BTF info for section %s", name)
}
return &Program{s, length, funcInfos, lineInfos}, nil
}
// Map finds the BTF for a map.
//
// Returns an error if there is no BTF for the given name.
func (s *Spec) Map(name string) (*Map, []Member, error) {
var mapVar Var
if err := s.FindType(name, &mapVar); err != nil {
return nil, nil, err
}
mapStruct, ok := mapVar.Type.(*Struct)
if !ok {
return nil, nil, fmt.Errorf("expected struct, have %s", mapVar.Type)
}
var key, value Type
for _, member := range mapStruct.Members {
switch member.Name {
case "key":
key = member.Type
case "value":
value = member.Type
}
}
if key == nil {
key = (*Void)(nil)
}
if value == nil {
value = (*Void)(nil)
}
return &Map{s, key, value}, mapStruct.Members, nil
return &Program{s, length, funcInfos, lineInfos, coreRelos}, nil
}
// Datasec returns the BTF required to create maps which represent data sections.
@ -426,7 +454,8 @@ func (s *Spec) Datasec(name string) (*Map, error) {
return nil, fmt.Errorf("data section %s: can't get BTF: %w", name, err)
}
return &Map{s, &Void{}, &datasec}, nil
m := NewMap(s, &Void{}, &datasec)
return &m, nil
}
// FindType searches for a type with a specific name.
@ -441,11 +470,16 @@ func (s *Spec) FindType(name string, typ Type) error {
candidate Type
)
for _, typ := range s.types[name] {
for _, typ := range s.namedTypes[essentialName(name)] {
if reflect.TypeOf(typ) != wanted {
continue
}
// Match against the full name, not just the essential one.
if typ.name() != name {
continue
}
if candidate != nil {
return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
}
@ -532,6 +566,23 @@ type Map struct {
key, value Type
}
// NewMap returns a new Map containing the given values.
// The key and value arguments are initialized to Void if nil values are given.
func NewMap(spec *Spec, key Type, value Type) Map {
if key == nil {
key = &Void{}
}
if value == nil {
value = &Void{}
}
return Map{
spec: spec,
key: key,
value: value,
}
}
// MapSpec should be a method on Map, but is a free function
// to hide it from users of the ebpf package.
func MapSpec(m *Map) *Spec {
@ -555,6 +606,7 @@ type Program struct {
spec *Spec
length uint64
funcInfos, lineInfos extInfo
coreRelos bpfCoreRelos
}
// ProgramSpec returns the Spec needed for loading function and line infos into the kernel.
@ -580,9 +632,10 @@ func ProgramAppend(s, other *Program) error {
return fmt.Errorf("line infos: %w", err)
}
s.length += other.length
s.funcInfos = funcInfos
s.lineInfos = lineInfos
s.coreRelos = s.coreRelos.append(other.coreRelos, s.length)
s.length += other.length
return nil
}
@ -612,6 +665,19 @@ func ProgramLineInfos(s *Program) (recordSize uint32, bytes []byte, err error) {
return s.lineInfos.recordSize, bytes, nil
}
// ProgramRelocations returns the CO-RE relocations required to adjust the
// program to the target.
//
// This is a free function instead of a method to hide it from users
// of package ebpf.
func ProgramRelocations(s *Program, target *Spec) (map[uint64]Relocation, error) {
if len(s.coreRelos) == 0 {
return nil, nil
}
return coreRelocate(s.spec, target, s.coreRelos)
}
type bpfLoadBTFAttr struct {
btf internal.Pointer
logBuf internal.Pointer
@ -621,9 +687,7 @@ type bpfLoadBTFAttr struct {
}
func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) {
const _BTFLoad = 18
fd, err := internal.BPF(_BTFLoad, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
fd, err := internal.BPF(internal.BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
if err != nil {
return nil, err
}
@ -653,7 +717,7 @@ func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
return buf.Bytes()
}
var haveBTF = internal.FeatureTest("BTF", "5.1", func() (bool, error) {
var haveBTF = internal.FeatureTest("BTF", "5.1", func() error {
var (
types struct {
Integer btfType
@ -677,15 +741,24 @@ var haveBTF = internal.FeatureTest("BTF", "5.1", func() (bool, error) {
btf: internal.NewSlicePointer(btf),
btfSize: uint32(len(btf)),
})
if err == nil {
fd.Close()
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
// Treat both EINVAL and EPERM as not supported: loading the program
// might still succeed without BTF.
return internal.ErrNotSupported
}
// Check for EINVAL specifically, rather than err != nil since we
// otherwise misdetect due to insufficient permissions.
return !errors.Is(err, unix.EINVAL), nil
if err != nil {
return err
}
fd.Close()
return nil
})
var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() (bool, error) {
var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error {
if err := haveBTF(); err != nil {
return err
}
var (
types struct {
FuncProto btfType
@ -706,11 +779,13 @@ var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() (bo
btf: internal.NewSlicePointer(btf),
btfSize: uint32(len(btf)),
})
if err == nil {
fd.Close()
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
if err != nil {
return err
}
// Check for EINVAL specifically, rather than err != nil since we
// otherwise misdetect due to insufficient permissions.
return !errors.Is(err, unix.EINVAL), nil
fd.Close()
return nil
})

View file

@ -40,10 +40,12 @@ const (
)
const (
btfTypeKindShift = 24
btfTypeKindLen = 4
btfTypeVlenShift = 0
btfTypeVlenMask = 16
btfTypeKindShift = 24
btfTypeKindLen = 4
btfTypeVlenShift = 0
btfTypeVlenMask = 16
btfTypeKindFlagShift = 31
btfTypeKindFlagMask = 1
)
// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst.
@ -136,6 +138,10 @@ func (bt *btfType) SetVlen(vlen int) {
bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
}
func (bt *btfType) KindFlag() bool {
return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1
}
func (bt *btfType) Linkage() btfFuncLinkage {
return btfFuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
}
@ -257,3 +263,7 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
types = append(types, rawType{header, data})
}
}
func intEncoding(raw uint32) (IntEncoding, uint32, byte) {
return IntEncoding((raw & 0x0f000000) >> 24), (raw & 0x00ff0000) >> 16, byte(raw & 0x000000ff)
}

388
vendor/github.com/cilium/ebpf/internal/btf/core.go generated vendored Normal file
View file

@ -0,0 +1,388 @@
package btf
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
)
// Code in this file is derived from libbpf, which is available under a BSD
// 2-Clause license.
// Relocation describes a CO-RE relocation.
type Relocation struct {
Current uint32
New uint32
}
func (r Relocation) equal(other Relocation) bool {
return r.Current == other.Current && r.New == other.New
}
// coreReloKind is the type of CO-RE relocation
type coreReloKind uint32
const (
reloFieldByteOffset coreReloKind = iota /* field byte offset */
reloFieldByteSize /* field size in bytes */
reloFieldExists /* field existence in target kernel */
reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
reloFieldLShiftU64 /* bitfield-specific left bitshift */
reloFieldRShiftU64 /* bitfield-specific right bitshift */
reloTypeIDLocal /* type ID in local BPF object */
reloTypeIDTarget /* type ID in target kernel */
reloTypeExists /* type existence in target kernel */
reloTypeSize /* type size in bytes */
reloEnumvalExists /* enum value existence in target kernel */
reloEnumvalValue /* enum value integer value */
)
func (k coreReloKind) String() string {
switch k {
case reloFieldByteOffset:
return "byte_off"
case reloFieldByteSize:
return "byte_sz"
case reloFieldExists:
return "field_exists"
case reloFieldSigned:
return "signed"
case reloFieldLShiftU64:
return "lshift_u64"
case reloFieldRShiftU64:
return "rshift_u64"
case reloTypeIDLocal:
return "local_type_id"
case reloTypeIDTarget:
return "target_type_id"
case reloTypeExists:
return "type_exists"
case reloTypeSize:
return "type_size"
case reloEnumvalExists:
return "enumval_exists"
case reloEnumvalValue:
return "enumval_value"
default:
return "unknown"
}
}
func coreRelocate(local, target *Spec, coreRelos bpfCoreRelos) (map[uint64]Relocation, error) {
if target == nil {
var err error
target, err = loadKernelSpec()
if err != nil {
return nil, err
}
}
if local.byteOrder != target.byteOrder {
return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
}
relocations := make(map[uint64]Relocation, len(coreRelos))
for _, relo := range coreRelos {
accessorStr, err := local.strings.Lookup(relo.AccessStrOff)
if err != nil {
return nil, err
}
accessor, err := parseCoreAccessor(accessorStr)
if err != nil {
return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
}
if int(relo.TypeID) >= len(local.types) {
return nil, fmt.Errorf("invalid type id %d", relo.TypeID)
}
typ := local.types[relo.TypeID]
if relo.ReloKind == reloTypeIDLocal {
relocations[uint64(relo.InsnOff)] = Relocation{
uint32(typ.ID()),
uint32(typ.ID()),
}
continue
}
named, ok := typ.(namedType)
if !ok || named.name() == "" {
return nil, fmt.Errorf("relocate anonymous type %s: %w", typ.String(), ErrNotSupported)
}
name := essentialName(named.name())
res, err := coreCalculateRelocation(typ, target.namedTypes[name], relo.ReloKind, accessor)
if err != nil {
return nil, fmt.Errorf("relocate %s: %w", name, err)
}
relocations[uint64(relo.InsnOff)] = res
}
return relocations, nil
}
var errAmbiguousRelocation = errors.New("ambiguous relocation")
func coreCalculateRelocation(local Type, targets []namedType, kind coreReloKind, localAccessor coreAccessor) (Relocation, error) {
var relos []Relocation
var matches []Type
for _, target := range targets {
switch kind {
case reloTypeIDTarget:
if localAccessor[0] != 0 {
return Relocation{}, fmt.Errorf("%s: unexpected non-zero accessor", kind)
}
if compat, err := coreAreTypesCompatible(local, target); err != nil {
return Relocation{}, fmt.Errorf("%s: %s", kind, err)
} else if !compat {
continue
}
relos = append(relos, Relocation{uint32(target.ID()), uint32(target.ID())})
default:
return Relocation{}, fmt.Errorf("relocation %s: %w", kind, ErrNotSupported)
}
matches = append(matches, target)
}
if len(relos) == 0 {
// TODO: Add switch for existence checks like reloEnumvalExists here.
// TODO: This might have to be poisoned.
return Relocation{}, fmt.Errorf("no relocation found, tried %v", targets)
}
relo := relos[0]
for _, altRelo := range relos[1:] {
if !altRelo.equal(relo) {
return Relocation{}, fmt.Errorf("multiple types %v match: %w", matches, errAmbiguousRelocation)
}
}
return relo, nil
}
/* coreAccessor contains a path through a struct. It contains at least one index.
*
* The interpretation depends on the kind of the relocation. The following is
* taken from struct bpf_core_relo in libbpf_internal.h:
*
* - for field-based relocations, string encodes an accessed field using
* a sequence of field and array indices, separated by colon (:). It's
* conceptually very close to LLVM's getelementptr ([0]) instruction's
* arguments for identifying offset to a field.
* - for type-based relocations, strings is expected to be just "0";
* - for enum value-based relocations, string contains an index of enum
* value within its enum type;
*
* Example to provide a better feel.
*
* struct sample {
* int a;
* struct {
* int b[10];
* };
* };
*
* struct sample s = ...;
* int x = &s->a; // encoded as "0:0" (a is field #0)
* int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
* // b is field #0 inside anon struct, accessing elem #5)
* int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
*/
type coreAccessor []int
func parseCoreAccessor(accessor string) (coreAccessor, error) {
if accessor == "" {
return nil, fmt.Errorf("empty accessor")
}
var result coreAccessor
parts := strings.Split(accessor, ":")
for _, part := range parts {
// 31 bits to avoid overflowing int on 32 bit platforms.
index, err := strconv.ParseUint(part, 10, 31)
if err != nil {
return nil, fmt.Errorf("accessor index %q: %s", part, err)
}
result = append(result, int(index))
}
return result, nil
}
/* The comment below is from bpf_core_types_are_compat in libbpf.c:
*
* Check local and target types for compatibility. This check is used for
* type-based CO-RE relocations and follow slightly different rules than
* field-based relocations. This function assumes that root types were already
* checked for name match. Beyond that initial root-level name check, names
* are completely ignored. Compatibility rules are as follows:
* - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
* kind should match for local and target types (i.e., STRUCT is not
* compatible with UNION);
* - for ENUMs, the size is ignored;
* - for INT, size and signedness are ignored;
* - for ARRAY, dimensionality is ignored, element types are checked for
* compatibility recursively;
* - CONST/VOLATILE/RESTRICT modifiers are ignored;
* - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
* - FUNC_PROTOs are compatible if they have compatible signature: same
* number of input args and compatible return and argument types.
* These rules are not set in stone and probably will be adjusted as we get
* more experience with using BPF CO-RE relocations.
*/
func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
var (
localTs, targetTs typeDeque
l, t = &localType, &targetType
depth = 0
)
for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
if depth >= maxTypeDepth {
return false, errors.New("types are nested too deep")
}
localType = skipQualifierAndTypedef(*l)
targetType = skipQualifierAndTypedef(*t)
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
return false, nil
}
switch lv := (localType).(type) {
case *Void, *Struct, *Union, *Enum, *Fwd:
// Nothing to do here
case *Int:
tv := targetType.(*Int)
if lv.isBitfield() || tv.isBitfield() {
return false, nil
}
case *Pointer, *Array:
depth++
localType.walk(&localTs)
targetType.walk(&targetTs)
case *FuncProto:
tv := targetType.(*FuncProto)
if len(lv.Params) != len(tv.Params) {
return false, nil
}
depth++
localType.walk(&localTs)
targetType.walk(&targetTs)
default:
return false, fmt.Errorf("unsupported type %T", localType)
}
}
if l != nil {
return false, fmt.Errorf("dangling local type %T", *l)
}
if t != nil {
return false, fmt.Errorf("dangling target type %T", *t)
}
return true, nil
}
/* The comment below is from bpf_core_fields_are_compat in libbpf.c:
*
* Check two types for compatibility for the purpose of field access
* relocation. const/volatile/restrict and typedefs are skipped to ensure we
* are relocating semantically compatible entities:
* - any two STRUCTs/UNIONs are compatible and can be mixed;
* - any two FWDs are compatible, if their names match (modulo flavor suffix);
* - any two PTRs are always compatible;
* - for ENUMs, names should be the same (ignoring flavor suffix) or at
* least one of enums should be anonymous;
* - for ENUMs, check sizes, names are ignored;
* - for INT, size and signedness are ignored;
* - for ARRAY, dimensionality is ignored, element types are checked for
* compatibility recursively;
* - everything else shouldn't be ever a target of relocation.
* These rules are not set in stone and probably will be adjusted as we get
* more experience with using BPF CO-RE relocations.
*/
func coreAreMembersCompatible(localType Type, targetType Type) (bool, error) {
doNamesMatch := func(a, b string) bool {
if a == "" || b == "" {
// allow anonymous and named type to match
return true
}
return essentialName(a) == essentialName(b)
}
for depth := 0; depth <= maxTypeDepth; depth++ {
localType = skipQualifierAndTypedef(localType)
targetType = skipQualifierAndTypedef(targetType)
_, lok := localType.(composite)
_, tok := targetType.(composite)
if lok && tok {
return true, nil
}
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
return false, nil
}
switch lv := localType.(type) {
case *Pointer:
return true, nil
case *Enum:
tv := targetType.(*Enum)
return doNamesMatch(lv.name(), tv.name()), nil
case *Fwd:
tv := targetType.(*Fwd)
return doNamesMatch(lv.name(), tv.name()), nil
case *Int:
tv := targetType.(*Int)
return !lv.isBitfield() && !tv.isBitfield(), nil
case *Array:
tv := targetType.(*Array)
localType = lv.Type
targetType = tv.Type
default:
return false, fmt.Errorf("unsupported type %T", localType)
}
}
return false, errors.New("types are nested too deep")
}
func skipQualifierAndTypedef(typ Type) Type {
result := typ
for depth := 0; depth <= maxTypeDepth; depth++ {
switch v := (result).(type) {
case qualifier:
result = v.qualify()
case *Typedef:
result = v.Type
default:
return result
}
}
return typ
}

View file

@ -1,6 +1,7 @@
package btf
import (
"bufio"
"bytes"
"encoding/binary"
"errors"
@ -24,55 +25,82 @@ type btfExtHeader struct {
LineInfoLen uint32
}
func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, err error) {
type btfExtCoreHeader struct {
CoreReloOff uint32
CoreReloLen uint32
}
func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, coreRelos map[string]bpfCoreRelos, err error) {
var header btfExtHeader
var coreHeader btfExtCoreHeader
if err := binary.Read(r, bo, &header); err != nil {
return nil, nil, fmt.Errorf("can't read header: %v", err)
return nil, nil, nil, fmt.Errorf("can't read header: %v", err)
}
if header.Magic != btfMagic {
return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
return nil, nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
}
if header.Version != 1 {
return nil, nil, fmt.Errorf("unexpected version %v", header.Version)
return nil, nil, nil, fmt.Errorf("unexpected version %v", header.Version)
}
if header.Flags != 0 {
return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
return nil, nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
}
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
if remainder < 0 {
return nil, nil, errors.New("header is too short")
return nil, nil, nil, errors.New("header is too short")
}
coreHdrSize := int64(binary.Size(&coreHeader))
if remainder >= coreHdrSize {
if err := binary.Read(r, bo, &coreHeader); err != nil {
return nil, nil, nil, fmt.Errorf("can't read CO-RE relocation header: %v", err)
}
remainder -= coreHdrSize
}
// Of course, the .BTF.ext header has different semantics than the
// .BTF ext header. We need to ignore non-null values.
_, err = io.CopyN(ioutil.Discard, r, remainder)
if err != nil {
return nil, nil, fmt.Errorf("header padding: %v", err)
return nil, nil, nil, fmt.Errorf("header padding: %v", err)
}
if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil {
return nil, nil, fmt.Errorf("can't seek to function info section: %v", err)
return nil, nil, nil, fmt.Errorf("can't seek to function info section: %v", err)
}
funcInfo, err = parseExtInfo(io.LimitReader(r, int64(header.FuncInfoLen)), bo, strings)
buf := bufio.NewReader(io.LimitReader(r, int64(header.FuncInfoLen)))
funcInfo, err = parseExtInfo(buf, bo, strings)
if err != nil {
return nil, nil, fmt.Errorf("function info: %w", err)
return nil, nil, nil, fmt.Errorf("function info: %w", err)
}
if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil {
return nil, nil, fmt.Errorf("can't seek to line info section: %v", err)
return nil, nil, nil, fmt.Errorf("can't seek to line info section: %v", err)
}
lineInfo, err = parseExtInfo(io.LimitReader(r, int64(header.LineInfoLen)), bo, strings)
buf = bufio.NewReader(io.LimitReader(r, int64(header.LineInfoLen)))
lineInfo, err = parseExtInfo(buf, bo, strings)
if err != nil {
return nil, nil, fmt.Errorf("line info: %w", err)
return nil, nil, nil, fmt.Errorf("line info: %w", err)
}
return funcInfo, lineInfo, nil
if coreHeader.CoreReloOff > 0 && coreHeader.CoreReloLen > 0 {
if _, err := r.Seek(int64(header.HdrLen+coreHeader.CoreReloOff), io.SeekStart); err != nil {
return nil, nil, nil, fmt.Errorf("can't seek to CO-RE relocation section: %v", err)
}
coreRelos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings)
if err != nil {
return nil, nil, nil, fmt.Errorf("CO-RE relocation info: %w", err)
}
}
return funcInfo, lineInfo, coreRelos, nil
}
type btfExtInfoSec struct {
@ -127,6 +155,8 @@ func (ei extInfo) MarshalBinary() ([]byte, error) {
}
func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) {
const maxRecordSize = 256
var recordSize uint32
if err := binary.Read(r, bo, &recordSize); err != nil {
return nil, fmt.Errorf("can't read record size: %v", err)
@ -136,23 +166,15 @@ func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[st
// Need at least insnOff
return nil, errors.New("record size too short")
}
if recordSize > maxRecordSize {
return nil, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize)
}
result := make(map[string]extInfo)
for {
var infoHeader btfExtInfoSec
if err := binary.Read(r, bo, &infoHeader); err == io.EOF {
secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
if errors.Is(err, io.EOF) {
return result, nil
} else if err != nil {
return nil, fmt.Errorf("can't read ext info header: %v", err)
}
secName, err := strings.Lookup(infoHeader.SecNameOff)
if err != nil {
return nil, fmt.Errorf("can't get section name: %w", err)
}
if infoHeader.NumInfo == 0 {
return nil, fmt.Errorf("section %s has invalid number of records", secName)
}
var records []extInfoRecord
@ -180,3 +202,80 @@ func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[st
}
}
}
// bpfCoreRelo matches `struct bpf_core_relo` from the kernel
type bpfCoreRelo struct {
InsnOff uint32
TypeID TypeID
AccessStrOff uint32
ReloKind coreReloKind
}
type bpfCoreRelos []bpfCoreRelo
// append two slices of extInfoRelo to each other. The InsnOff of b are adjusted
// by offset.
func (r bpfCoreRelos) append(other bpfCoreRelos, offset uint64) bpfCoreRelos {
result := make([]bpfCoreRelo, 0, len(r)+len(other))
result = append(result, r...)
for _, relo := range other {
relo.InsnOff += uint32(offset)
result = append(result, relo)
}
return result
}
var extInfoReloSize = binary.Size(bpfCoreRelo{})
func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]bpfCoreRelos, error) {
var recordSize uint32
if err := binary.Read(r, bo, &recordSize); err != nil {
return nil, fmt.Errorf("read record size: %v", err)
}
if recordSize != uint32(extInfoReloSize) {
return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
}
result := make(map[string]bpfCoreRelos)
for {
secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
if errors.Is(err, io.EOF) {
return result, nil
}
var relos []bpfCoreRelo
for i := uint32(0); i < infoHeader.NumInfo; i++ {
var relo bpfCoreRelo
if err := binary.Read(r, bo, &relo); err != nil {
return nil, fmt.Errorf("section %v: read record: %v", secName, err)
}
if relo.InsnOff%asm.InstructionSize != 0 {
return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, relo.InsnOff)
}
relos = append(relos, relo)
}
result[secName] = relos
}
}
func parseExtInfoHeader(r io.Reader, bo binary.ByteOrder, strings stringTable) (string, *btfExtInfoSec, error) {
var infoHeader btfExtInfoSec
if err := binary.Read(r, bo, &infoHeader); err != nil {
return "", nil, fmt.Errorf("read ext info header: %w", err)
}
secName, err := strings.Lookup(infoHeader.SecNameOff)
if err != nil {
return "", nil, fmt.Errorf("get section name: %w", err)
}
if infoHeader.NumInfo == 0 {
return "", nil, fmt.Errorf("section %s has zero records", secName)
}
return secName, &infoHeader, nil
}

49
vendor/github.com/cilium/ebpf/internal/btf/fuzz.go generated vendored Normal file
View file

@ -0,0 +1,49 @@
// +build gofuzz
// Use with https://github.com/dvyukov/go-fuzz
package btf
import (
"bytes"
"encoding/binary"
"github.com/cilium/ebpf/internal"
)
func FuzzSpec(data []byte) int {
if len(data) < binary.Size(btfHeader{}) {
return -1
}
spec, err := loadNakedSpec(bytes.NewReader(data), internal.NativeEndian, nil, nil)
if err != nil {
if spec != nil {
panic("spec is not nil")
}
return 0
}
if spec == nil {
panic("spec is nil")
}
return 1
}
func FuzzExtInfo(data []byte) int {
if len(data) < binary.Size(btfExtHeader{}) {
return -1
}
table := stringTable("\x00foo\x00barfoo\x00")
info, err := parseExtInfo(bytes.NewReader(data), internal.NativeEndian, table)
if err != nil {
if info != nil {
panic("info is not nil")
}
return 0
}
if info == nil {
panic("info is nil")
}
return 1
}

View file

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"math"
"strings"
)
const maxTypeDepth = 32
@ -20,10 +21,22 @@ func (tid TypeID) ID() TypeID {
type Type interface {
ID() TypeID
String() string
// Make a copy of the type, without copying Type members.
copy() Type
walk(*copyStack)
// Enumerate all nested Types. Repeated calls must visit nested
// types in the same order.
walk(*typeDeque)
}
// namedType is a type with a name.
//
// Most named types simply embed Name.
type namedType interface {
Type
name() string
}
// Name identifies a type.
@ -39,9 +52,18 @@ func (n Name) name() string {
type Void struct{}
func (v *Void) ID() TypeID { return 0 }
func (v *Void) String() string { return "void#0" }
func (v *Void) size() uint32 { return 0 }
func (v *Void) copy() Type { return (*Void)(nil) }
func (v *Void) walk(*copyStack) {}
func (v *Void) walk(*typeDeque) {}
type IntEncoding byte
const (
Signed IntEncoding = 1 << iota
Char
Bool
)
// Int is an integer of a given length.
type Int struct {
@ -49,24 +71,64 @@ type Int struct {
Name
// The size of the integer in bytes.
Size uint32
Size uint32
Encoding IntEncoding
// Offset is the starting bit offset. Currently always 0.
// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int
Offset uint32
Bits byte
}
var _ namedType = (*Int)(nil)
func (i *Int) String() string {
var s strings.Builder
switch {
case i.Encoding&Char != 0:
s.WriteString("char")
case i.Encoding&Bool != 0:
s.WriteString("bool")
default:
if i.Encoding&Signed == 0 {
s.WriteRune('u')
}
s.WriteString("int")
fmt.Fprintf(&s, "%d", i.Size*8)
}
fmt.Fprintf(&s, "#%d", i.TypeID)
if i.Bits > 0 {
fmt.Fprintf(&s, "[bits=%d]", i.Bits)
}
return s.String()
}
func (i *Int) size() uint32 { return i.Size }
func (i *Int) walk(*copyStack) {}
func (i *Int) walk(*typeDeque) {}
func (i *Int) copy() Type {
cpy := *i
return &cpy
}
func (i *Int) isBitfield() bool {
return i.Offset > 0
}
// Pointer is a pointer to another type.
type Pointer struct {
TypeID
Target Type
}
func (p *Pointer) size() uint32 { return 8 }
func (p *Pointer) walk(cs *copyStack) { cs.push(&p.Target) }
func (p *Pointer) String() string {
return fmt.Sprintf("pointer#%d[target=#%d]", p.TypeID, p.Target.ID())
}
func (p *Pointer) size() uint32 { return 8 }
func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) }
func (p *Pointer) copy() Type {
cpy := *p
return &cpy
@ -79,7 +141,11 @@ type Array struct {
Nelems uint32
}
func (arr *Array) walk(cs *copyStack) { cs.push(&arr.Type) }
func (arr *Array) String() string {
return fmt.Sprintf("array#%d[type=#%d n=%d]", arr.TypeID, arr.Type.ID(), arr.Nelems)
}
func (arr *Array) walk(tdq *typeDeque) { tdq.push(&arr.Type) }
func (arr *Array) copy() Type {
cpy := *arr
return &cpy
@ -94,11 +160,15 @@ type Struct struct {
Members []Member
}
func (s *Struct) String() string {
return fmt.Sprintf("struct#%d[%q]", s.TypeID, s.Name)
}
func (s *Struct) size() uint32 { return s.Size }
func (s *Struct) walk(cs *copyStack) {
func (s *Struct) walk(tdq *typeDeque) {
for i := range s.Members {
cs.push(&s.Members[i].Type)
tdq.push(&s.Members[i].Type)
}
}
@ -109,6 +179,10 @@ func (s *Struct) copy() Type {
return &cpy
}
func (s *Struct) members() []Member {
return s.Members
}
// Union is a compound type where members occupy the same memory.
type Union struct {
TypeID
@ -118,11 +192,15 @@ type Union struct {
Members []Member
}
func (u *Union) String() string {
return fmt.Sprintf("union#%d[%q]", u.TypeID, u.Name)
}
func (u *Union) size() uint32 { return u.Size }
func (u *Union) walk(cs *copyStack) {
func (u *Union) walk(tdq *typeDeque) {
for i := range u.Members {
cs.push(&u.Members[i].Type)
tdq.push(&u.Members[i].Type)
}
}
@ -133,35 +211,90 @@ func (u *Union) copy() Type {
return &cpy
}
func (u *Union) members() []Member {
return u.Members
}
type composite interface {
members() []Member
}
var (
_ composite = (*Struct)(nil)
_ composite = (*Union)(nil)
)
// Member is part of a Struct or Union.
//
// It is not a valid Type.
type Member struct {
Name
Type Type
Offset uint32
Type Type
// Offset is the bit offset of this member
Offset uint32
BitfieldSize uint32
}
// Enum lists possible values.
type Enum struct {
TypeID
Name
Values []EnumValue
}
func (e *Enum) String() string {
return fmt.Sprintf("enum#%d[%q]", e.TypeID, e.Name)
}
// EnumValue is part of an Enum
//
// Is is not a valid Type
type EnumValue struct {
Name
Value int32
}
func (e *Enum) size() uint32 { return 4 }
func (e *Enum) walk(*copyStack) {}
func (e *Enum) walk(*typeDeque) {}
func (e *Enum) copy() Type {
cpy := *e
cpy.Values = make([]EnumValue, len(e.Values))
copy(cpy.Values, e.Values)
return &cpy
}
// FwdKind is the type of forward declaration.
type FwdKind int
// Valid types of forward declaration.
const (
FwdStruct FwdKind = iota
FwdUnion
)
func (fk FwdKind) String() string {
switch fk {
case FwdStruct:
return "struct"
case FwdUnion:
return "union"
default:
return fmt.Sprintf("%T(%d)", fk, int(fk))
}
}
// Fwd is a forward declaration of a Type.
type Fwd struct {
TypeID
Name
Kind FwdKind
}
func (f *Fwd) walk(*copyStack) {}
func (f *Fwd) String() string {
return fmt.Sprintf("fwd#%d[%s %q]", f.TypeID, f.Kind, f.Name)
}
func (f *Fwd) walk(*typeDeque) {}
func (f *Fwd) copy() Type {
cpy := *f
return &cpy
@ -174,43 +307,62 @@ type Typedef struct {
Type Type
}
func (td *Typedef) walk(cs *copyStack) { cs.push(&td.Type) }
func (td *Typedef) String() string {
return fmt.Sprintf("typedef#%d[%q #%d]", td.TypeID, td.Name, td.Type.ID())
}
func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) }
func (td *Typedef) copy() Type {
cpy := *td
return &cpy
}
// Volatile is a modifier.
// Volatile is a qualifier.
type Volatile struct {
TypeID
Type Type
}
func (v *Volatile) walk(cs *copyStack) { cs.push(&v.Type) }
func (v *Volatile) String() string {
return fmt.Sprintf("volatile#%d[#%d]", v.TypeID, v.Type.ID())
}
func (v *Volatile) qualify() Type { return v.Type }
func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) }
func (v *Volatile) copy() Type {
cpy := *v
return &cpy
}
// Const is a modifier.
// Const is a qualifier.
type Const struct {
TypeID
Type Type
}
func (c *Const) walk(cs *copyStack) { cs.push(&c.Type) }
func (c *Const) String() string {
return fmt.Sprintf("const#%d[#%d]", c.TypeID, c.Type.ID())
}
func (c *Const) qualify() Type { return c.Type }
func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) }
func (c *Const) copy() Type {
cpy := *c
return &cpy
}
// Restrict is a modifier.
// Restrict is a qualifier.
type Restrict struct {
TypeID
Type Type
}
func (r *Restrict) walk(cs *copyStack) { cs.push(&r.Type) }
func (r *Restrict) String() string {
return fmt.Sprintf("restrict#%d[#%d]", r.TypeID, r.Type.ID())
}
func (r *Restrict) qualify() Type { return r.Type }
func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) }
func (r *Restrict) copy() Type {
cpy := *r
return &cpy
@ -223,7 +375,11 @@ type Func struct {
Type Type
}
func (f *Func) walk(cs *copyStack) { cs.push(&f.Type) }
func (f *Func) String() string {
return fmt.Sprintf("func#%d[%q proto=#%d]", f.TypeID, f.Name, f.Type.ID())
}
func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) }
func (f *Func) copy() Type {
cpy := *f
return &cpy
@ -233,15 +389,38 @@ func (f *Func) copy() Type {
type FuncProto struct {
TypeID
Return Type
// Parameters not supported yet
Params []FuncParam
}
func (fp *FuncProto) String() string {
var s strings.Builder
fmt.Fprintf(&s, "proto#%d[", fp.TypeID)
for _, param := range fp.Params {
fmt.Fprintf(&s, "%q=#%d, ", param.Name, param.Type.ID())
}
fmt.Fprintf(&s, "return=#%d]", fp.Return.ID())
return s.String()
}
func (fp *FuncProto) walk(tdq *typeDeque) {
tdq.push(&fp.Return)
for i := range fp.Params {
tdq.push(&fp.Params[i].Type)
}
}
func (fp *FuncProto) walk(cs *copyStack) { cs.push(&fp.Return) }
func (fp *FuncProto) copy() Type {
cpy := *fp
cpy.Params = make([]FuncParam, len(fp.Params))
copy(cpy.Params, fp.Params)
return &cpy
}
type FuncParam struct {
Name
Type Type
}
// Var is a global variable.
type Var struct {
TypeID
@ -249,7 +428,12 @@ type Var struct {
Type Type
}
func (v *Var) walk(cs *copyStack) { cs.push(&v.Type) }
func (v *Var) String() string {
// TODO: Linkage
return fmt.Sprintf("var#%d[%q]", v.TypeID, v.Name)
}
func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) }
func (v *Var) copy() Type {
cpy := *v
return &cpy
@ -263,11 +447,15 @@ type Datasec struct {
Vars []VarSecinfo
}
func (ds *Datasec) String() string {
return fmt.Sprintf("section#%d[%q]", ds.TypeID, ds.Name)
}
func (ds *Datasec) size() uint32 { return ds.Size }
func (ds *Datasec) walk(cs *copyStack) {
func (ds *Datasec) walk(tdq *typeDeque) {
for i := range ds.Vars {
cs.push(&ds.Vars[i].Type)
tdq.push(&ds.Vars[i].Type)
}
}
@ -279,6 +467,8 @@ func (ds *Datasec) copy() Type {
}
// VarSecinfo describes variable in a Datasec
//
// It is not a valid Type.
type VarSecinfo struct {
Type Type
Offset uint32
@ -298,6 +488,16 @@ var (
_ sizer = (*Datasec)(nil)
)
type qualifier interface {
qualify() Type
}
var (
_ qualifier = (*Const)(nil)
_ qualifier = (*Restrict)(nil)
_ qualifier = (*Volatile)(nil)
)
// Sizeof returns the size of a type in bytes.
//
// Returns an error if the size can't be computed.
@ -326,14 +526,9 @@ func Sizeof(typ Type) (int, error) {
case *Typedef:
typ = v.Type
continue
case *Volatile:
typ = v.Type
continue
case *Const:
typ = v.Type
continue
case *Restrict:
typ = v.Type
case qualifier:
typ = v.qualify()
continue
default:
@ -361,7 +556,7 @@ func Sizeof(typ Type) (int, error) {
func copyType(typ Type) Type {
var (
copies = make(map[Type]Type)
work copyStack
work typeDeque
)
for t := &typ; t != nil; t = work.pop() {
@ -382,40 +577,83 @@ func copyType(typ Type) Type {
return typ
}
// copyStack keeps track of pointers to types which still
// need to be copied.
type copyStack []*Type
// push adds a type to the stack.
func (cs *copyStack) push(t *Type) {
*cs = append(*cs, t)
// typeDeque keeps track of pointers to types which still
// need to be visited.
type typeDeque struct {
types []*Type
read, write uint64
mask uint64
}
// pop returns the topmost Type, or nil.
func (cs *copyStack) pop() *Type {
n := len(*cs)
if n == 0 {
// push adds a type to the stack.
func (dq *typeDeque) push(t *Type) {
if dq.write-dq.read < uint64(len(dq.types)) {
dq.types[dq.write&dq.mask] = t
dq.write++
return
}
new := len(dq.types) * 2
if new == 0 {
new = 8
}
types := make([]*Type, new)
pivot := dq.read & dq.mask
n := copy(types, dq.types[pivot:])
n += copy(types[n:], dq.types[:pivot])
types[n] = t
dq.types = types
dq.mask = uint64(new) - 1
dq.read, dq.write = 0, uint64(n+1)
}
// shift returns the first element or null.
func (dq *typeDeque) shift() *Type {
if dq.read == dq.write {
return nil
}
t := (*cs)[n-1]
*cs = (*cs)[:n-1]
index := dq.read & dq.mask
t := dq.types[index]
dq.types[index] = nil
dq.read++
return t
}
type namer interface {
name() string
// pop returns the last element or null.
func (dq *typeDeque) pop() *Type {
if dq.read == dq.write {
return nil
}
dq.write--
index := dq.write & dq.mask
t := dq.types[index]
dq.types[index] = nil
return t
}
var _ namer = Name("")
// all returns all elements.
//
// The deque is empty after calling this method.
func (dq *typeDeque) all() []*Type {
length := dq.write - dq.read
types := make([]*Type, 0, length)
for t := dq.shift(); t != nil; t = dq.shift() {
types = append(types, t)
}
return types
}
// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns
// it into a graph of Types connected via pointers.
//
// Returns a map of named types (so, where NameOff is non-zero). Since BTF ignores
// compilation units, multiple types may share the same name. A Type may form a
// cyclic graph by pointing at itself.
func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map[string][]Type, err error) {
// Returns a map of named types (so, where NameOff is non-zero) and a slice of types
// indexed by TypeID. Since BTF ignores compilation units, multiple types may share
// the same name. A Type may form a cyclic graph by pointing at itself.
func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[string][]namedType, err error) {
type fixupDef struct {
id TypeID
expectedKind btfKind
@ -427,7 +665,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
fixups = append(fixups, fixupDef{id, expectedKind, typ})
}
convertMembers := func(raw []btfMember) ([]Member, error) {
convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) {
// NB: The fixup below relies on pre-allocating this array to
// work, since otherwise append might re-allocate members.
members := make([]Member, 0, len(raw))
@ -436,10 +674,15 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
if err != nil {
return nil, fmt.Errorf("can't get name for member %d: %w", i, err)
}
members = append(members, Member{
m := Member{
Name: name,
Offset: btfMember.Offset,
})
}
if kindFlag {
m.BitfieldSize = btfMember.Offset >> 24
m.Offset &= 0xffffff
}
members = append(members, m)
}
for i := range members {
fixup(raw[i].Type, kindUnknown, &members[i].Type)
@ -447,9 +690,9 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
return members, nil
}
types := make([]Type, 0, len(rawTypes))
types = make([]Type, 0, len(rawTypes))
types = append(types, (*Void)(nil))
namedTypes = make(map[string][]Type)
namedTypes = make(map[string][]namedType)
for i, raw := range rawTypes {
var (
@ -461,12 +704,13 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
name, err := rawStrings.LookupName(raw.NameOff)
if err != nil {
return nil, fmt.Errorf("can't get name for type id %d: %w", id, err)
return nil, nil, fmt.Errorf("get name for type id %d: %w", id, err)
}
switch raw.Kind() {
case kindInt:
typ = &Int{id, name, raw.Size()}
encoding, offset, bits := intEncoding(*raw.data.(*uint32))
typ = &Int{id, name, raw.Size(), encoding, offset, bits}
case kindPointer:
ptr := &Pointer{id, nil}
@ -483,24 +727,40 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
typ = arr
case kindStruct:
members, err := convertMembers(raw.data.([]btfMember))
members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
if err != nil {
return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
return nil, nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
}
typ = &Struct{id, name, raw.Size(), members}
case kindUnion:
members, err := convertMembers(raw.data.([]btfMember))
members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
if err != nil {
return nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
return nil, nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
}
typ = &Union{id, name, raw.Size(), members}
case kindEnum:
typ = &Enum{id, name}
rawvals := raw.data.([]btfEnum)
vals := make([]EnumValue, 0, len(rawvals))
for i, btfVal := range rawvals {
name, err := rawStrings.LookupName(btfVal.NameOff)
if err != nil {
return nil, nil, fmt.Errorf("get name for enum value %d: %s", i, err)
}
vals = append(vals, EnumValue{
Name: name,
Value: btfVal.Val,
})
}
typ = &Enum{id, name, vals}
case kindForward:
typ = &Fwd{id, name}
if raw.KindFlag() {
typ = &Fwd{id, name, FwdUnion}
} else {
typ = &Fwd{id, name, FwdStruct}
}
case kindTypedef:
typedef := &Typedef{id, name, nil}
@ -528,7 +788,22 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
typ = fn
case kindFuncProto:
fp := &FuncProto{id, nil}
rawparams := raw.data.([]btfParam)
params := make([]FuncParam, 0, len(rawparams))
for i, param := range rawparams {
name, err := rawStrings.LookupName(param.NameOff)
if err != nil {
return nil, nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err)
}
params = append(params, FuncParam{
Name: name,
})
}
for i := range params {
fixup(rawparams[i].Type, kindUnknown, &params[i].Type)
}
fp := &FuncProto{id, nil, params}
fixup(raw.Type(), kindUnknown, &fp.Return)
typ = fp
@ -552,14 +827,14 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
typ = &Datasec{id, name, raw.SizeType, vars}
default:
return nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
return nil, nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
}
types = append(types, typ)
if namer, ok := typ.(namer); ok {
if name := namer.name(); name != "" {
namedTypes[name] = append(namedTypes[name], typ)
if named, ok := typ.(namedType); ok {
if name := essentialName(named.name()); name != "" {
namedTypes[name] = append(namedTypes[name], named)
}
}
}
@ -567,7 +842,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
for _, fixup := range fixups {
i := int(fixup.id)
if i >= len(types) {
return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
return nil, nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
}
// Default void (id 0) to unknown
@ -577,11 +852,20 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (namedTypes map
}
if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected {
return nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind)
return nil, nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind)
}
*fixup.typ = types[i]
}
return namedTypes, nil
return types, namedTypes, nil
}
// essentialName returns name without a ___ suffix.
func essentialName(name string) string {
lastIdx := strings.LastIndex(name, "___")
if lastIdx > 0 {
return name[:lastIdx]
}
return name
}

52
vendor/github.com/cilium/ebpf/internal/elf.go generated vendored Normal file
View file

@ -0,0 +1,52 @@
package internal
import (
"debug/elf"
"fmt"
"io"
)
type SafeELFFile struct {
*elf.File
}
// NewSafeELFFile reads an ELF safely.
//
// Any panic during parsing is turned into an error. This is necessary since
// there are a bunch of unfixed bugs in debug/elf.
//
// https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+debug%2Felf+in%3Atitle
func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) {
defer func() {
r := recover()
if r == nil {
return
}
safe = nil
err = fmt.Errorf("reading ELF file panicked: %s", r)
}()
file, err := elf.NewFile(r)
if err != nil {
return nil, err
}
return &SafeELFFile{file}, nil
}
// Symbols is the safe version of elf.File.Symbols.
func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) {
defer func() {
r := recover()
if r == nil {
return
}
syms = nil
err = fmt.Errorf("reading ELF symbols panicked: %s", r)
}()
syms, err = se.File.Symbols()
return
}

View file

@ -20,6 +20,9 @@ type UnsupportedFeatureError struct {
}
func (ufe *UnsupportedFeatureError) Error() string {
if ufe.MinimumVersion.Unspecified() {
return fmt.Sprintf("%s not supported", ufe.Name)
}
return fmt.Sprintf("%s not supported (requires >= %s)", ufe.Name, ufe.MinimumVersion)
}
@ -29,7 +32,7 @@ func (ufe *UnsupportedFeatureError) Is(target error) bool {
}
type featureTest struct {
sync.Mutex
sync.RWMutex
successful bool
result error
}
@ -39,10 +42,10 @@ type featureTest struct {
//
// The return values have the following semantics:
//
// err == ErrNotSupported: the feature is not available
// err == nil: the feature is available
// err != nil: the test couldn't be executed
// err == nil && available: the feature is available
// err == nil && !available: the feature isn't available
type FeatureTestFn func() (available bool, err error)
type FeatureTestFn func() error
// FeatureTest wraps a function so that it is run at most once.
//
@ -58,32 +61,40 @@ func FeatureTest(name, version string, fn FeatureTestFn) func() error {
ft := new(featureTest)
return func() error {
ft.RLock()
if ft.successful {
defer ft.RUnlock()
return ft.result
}
ft.RUnlock()
ft.Lock()
defer ft.Unlock()
// check one more time on the off
// chance that two go routines
// were able to call into the write
// lock
if ft.successful {
return ft.result
}
available, err := fn()
if errors.Is(err, ErrNotSupported) {
// The feature test aborted because a dependent feature
// is missing, which we should cache.
available = false
} else if err != nil {
// We couldn't execute the feature test to a point
// where it could make a determination.
// Don't cache the result, just return it.
return fmt.Errorf("can't detect support for %s: %w", name, err)
}
ft.successful = true
if !available {
err := fn()
switch {
case errors.Is(err, ErrNotSupported):
ft.result = &UnsupportedFeatureError{
MinimumVersion: v,
Name: name,
}
fallthrough
case err == nil:
ft.successful = true
default:
// We couldn't execute the feature test to a point
// where it could make a determination.
// Don't cache the result, just return it.
return fmt.Errorf("detect support for %s: %w", name, err)
}
return ft.result
}
}
@ -120,3 +131,8 @@ func (v Version) Less(other Version) bool {
}
return false
}
// Unspecified returns true if the version is all zero.
func (v Version) Unspecified() bool {
return v[0] == 0 && v[1] == 0 && v[2] == 0
}

View file

@ -91,6 +91,19 @@ func BPFProgDetach(attr *BPFProgDetachAttr) error {
return err
}
type BPFEnableStatsAttr struct {
StatsType uint32
}
func BPFEnableStats(attr *BPFEnableStatsAttr) (*FD, error) {
ptr, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
if err != nil {
return nil, fmt.Errorf("enable stats: %w", err)
}
return NewFD(uint32(ptr)), nil
}
type bpfObjAttr struct {
fileName Pointer
fd uint32
@ -137,3 +150,30 @@ func BPFObjGet(fileName string) (*FD, error) {
}
return NewFD(uint32(ptr)), nil
}
type bpfObjGetInfoByFDAttr struct {
fd uint32
infoLen uint32
info Pointer
}
// BPFObjGetInfoByFD wraps BPF_OBJ_GET_INFO_BY_FD.
//
// Available from 4.13.
func BPFObjGetInfoByFD(fd *FD, info unsafe.Pointer, size uintptr) error {
value, err := fd.Value()
if err != nil {
return err
}
attr := bpfObjGetInfoByFDAttr{
fd: value,
infoLen: uint32(size),
info: NewPointer(info),
}
_, err = BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
if err != nil {
return fmt.Errorf("fd %v: %w", fd, err)
}
return nil
}

View file

@ -3,22 +3,29 @@
package unix
import (
"bytes"
"syscall"
linux "golang.org/x/sys/unix"
)
const (
ENOENT = linux.ENOENT
EEXIST = linux.EEXIST
EAGAIN = linux.EAGAIN
ENOSPC = linux.ENOSPC
EINVAL = linux.EINVAL
EPOLLIN = linux.EPOLLIN
EINTR = linux.EINTR
EPERM = linux.EPERM
ESRCH = linux.ESRCH
ENODEV = linux.ENODEV
ENOENT = linux.ENOENT
EEXIST = linux.EEXIST
EAGAIN = linux.EAGAIN
ENOSPC = linux.ENOSPC
EINVAL = linux.EINVAL
EPOLLIN = linux.EPOLLIN
EINTR = linux.EINTR
EPERM = linux.EPERM
ESRCH = linux.ESRCH
ENODEV = linux.ENODEV
// ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP
ENOTSUPP = syscall.Errno(0x20c)
EBADF = linux.EBADF
BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC
BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE
BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG
BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG
BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN
@ -39,6 +46,7 @@ const (
PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC
RLIM_INFINITY = linux.RLIM_INFINITY
RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK
BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME
)
// Statfs_t is a wrapper
@ -148,3 +156,15 @@ func Gettid() int {
func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
return linux.Tgkill(tgid, tid, sig)
}
func KernelRelease() (string, error) {
var uname Utsname
err := Uname(&uname)
if err != nil {
return "", err
}
end := bytes.IndexByte(uname.Release[:], 0)
release := string(uname.Release[:end])
return release, nil
}

View file

@ -11,15 +11,21 @@ import (
var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH)
const (
ENOENT = syscall.ENOENT
EEXIST = syscall.EEXIST
EAGAIN = syscall.EAGAIN
ENOSPC = syscall.ENOSPC
EINVAL = syscall.EINVAL
EINTR = syscall.EINTR
EPERM = syscall.EPERM
ESRCH = syscall.ESRCH
ENODEV = syscall.ENODEV
ENOENT = syscall.ENOENT
EEXIST = syscall.EEXIST
EAGAIN = syscall.EAGAIN
ENOSPC = syscall.ENOSPC
EINVAL = syscall.EINVAL
EINTR = syscall.EINTR
EPERM = syscall.EPERM
ESRCH = syscall.ESRCH
ENODEV = syscall.ENODEV
EBADF = syscall.Errno(0)
// ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP
ENOTSUPP = syscall.Errno(0x20c)
BPF_F_NO_PREALLOC = 0
BPF_F_NUMA_NODE = 0
BPF_F_RDONLY_PROG = 0
BPF_F_WRONLY_PROG = 0
BPF_OBJ_NAME_LEN = 0x10
@ -41,6 +47,7 @@ const (
PERF_FLAG_FD_CLOEXEC = 0x8
RLIM_INFINITY = 0x7fffffffffffffff
RLIMIT_MEMLOCK = 8
BPF_STATS_RUN_TIME = 0
)
// Statfs_t is a wrapper
@ -215,3 +222,7 @@ func Gettid() int {
func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
return errNonLinux
}
func KernelRelease() (string, error) {
return "", errNonLinux
}

169
vendor/github.com/cilium/ebpf/link/cgroup.go generated vendored Normal file
View file

@ -0,0 +1,169 @@
package link
import (
"errors"
"fmt"
"os"
"github.com/cilium/ebpf"
)
type cgroupAttachFlags uint32
// cgroup attach flags
const (
flagAllowOverride cgroupAttachFlags = 1 << iota
flagAllowMulti
flagReplace
)
type CgroupOptions struct {
// Path to a cgroupv2 folder.
Path string
// One of the AttachCgroup* constants
Attach ebpf.AttachType
// Program must be of type CGroup*, and the attach type must match Attach.
Program *ebpf.Program
}
// AttachCgroup links a BPF program to a cgroup.
func AttachCgroup(opts CgroupOptions) (Link, error) {
cgroup, err := os.Open(opts.Path)
if err != nil {
return nil, fmt.Errorf("can't open cgroup: %s", err)
}
clone, err := opts.Program.Clone()
if err != nil {
cgroup.Close()
return nil, err
}
var cg Link
cg, err = newLinkCgroup(cgroup, opts.Attach, clone)
if errors.Is(err, ErrNotSupported) {
cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowMulti)
}
if errors.Is(err, ErrNotSupported) {
cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowOverride)
}
if err != nil {
cgroup.Close()
clone.Close()
return nil, err
}
return cg, nil
}
// LoadPinnedCgroup loads a pinned cgroup from a bpffs.
func LoadPinnedCgroup(fileName string) (Link, error) {
link, err := LoadPinnedRawLink(fileName)
if err != nil {
return nil, err
}
return &linkCgroup{link}, nil
}
type progAttachCgroup struct {
cgroup *os.File
current *ebpf.Program
attachType ebpf.AttachType
flags cgroupAttachFlags
}
var _ Link = (*progAttachCgroup)(nil)
func (cg *progAttachCgroup) isLink() {}
func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program, flags cgroupAttachFlags) (*progAttachCgroup, error) {
if flags&flagAllowMulti > 0 {
if err := haveProgAttachReplace(); err != nil {
return nil, fmt.Errorf("can't support multiple programs: %w", err)
}
}
err := RawAttachProgram(RawAttachProgramOptions{
Target: int(cgroup.Fd()),
Program: prog,
Flags: uint32(flags),
Attach: attach,
})
if err != nil {
return nil, fmt.Errorf("cgroup: %w", err)
}
return &progAttachCgroup{cgroup, prog, attach, flags}, nil
}
func (cg *progAttachCgroup) Close() error {
defer cg.cgroup.Close()
defer cg.current.Close()
err := RawDetachProgram(RawDetachProgramOptions{
Target: int(cg.cgroup.Fd()),
Program: cg.current,
Attach: cg.attachType,
})
if err != nil {
return fmt.Errorf("close cgroup: %s", err)
}
return nil
}
func (cg *progAttachCgroup) Update(prog *ebpf.Program) error {
new, err := prog.Clone()
if err != nil {
return err
}
args := RawAttachProgramOptions{
Target: int(cg.cgroup.Fd()),
Program: prog,
Attach: cg.attachType,
Flags: uint32(cg.flags),
}
if cg.flags&flagAllowMulti > 0 {
// Atomically replacing multiple programs requires at least
// 5.5 (commit 7dd68b3279f17921 "bpf: Support replacing cgroup-bpf
// program in MULTI mode")
args.Flags |= uint32(flagReplace)
args.Replace = cg.current
}
if err := RawAttachProgram(args); err != nil {
new.Close()
return fmt.Errorf("can't update cgroup: %s", err)
}
cg.current.Close()
cg.current = new
return nil
}
func (cg *progAttachCgroup) Pin(string) error {
return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported)
}
type linkCgroup struct {
*RawLink
}
var _ Link = (*linkCgroup)(nil)
func (cg *linkCgroup) isLink() {}
func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) {
link, err := AttachRawLink(RawLinkOptions{
Target: int(cgroup.Fd()),
Program: prog,
Attach: attach,
})
if err != nil {
return nil, err
}
return &linkCgroup{link}, err
}

2
vendor/github.com/cilium/ebpf/link/doc.go generated vendored Normal file
View file

@ -0,0 +1,2 @@
// Package link allows attaching eBPF programs to various kernel hooks.
package link

91
vendor/github.com/cilium/ebpf/link/iter.go generated vendored Normal file
View file

@ -0,0 +1,91 @@
package link
import (
"fmt"
"io"
"github.com/cilium/ebpf"
)
type IterOptions struct {
// Program must be of type Tracing with attach type
// AttachTraceIter. The kind of iterator to attach to is
// determined at load time via the AttachTo field.
//
// AttachTo requires the kernel to include BTF of itself,
// and it to be compiled with a recent pahole (>= 1.16).
Program *ebpf.Program
}
// AttachIter attaches a BPF seq_file iterator.
func AttachIter(opts IterOptions) (*Iter, error) {
link, err := AttachRawLink(RawLinkOptions{
Program: opts.Program,
Attach: ebpf.AttachTraceIter,
})
if err != nil {
return nil, fmt.Errorf("can't link iterator: %w", err)
}
return &Iter{link}, err
}
// LoadPinnedIter loads a pinned iterator from a bpffs.
func LoadPinnedIter(fileName string) (*Iter, error) {
link, err := LoadPinnedRawLink(fileName)
if err != nil {
return nil, err
}
return &Iter{link}, err
}
// Iter represents an attached bpf_iter.
type Iter struct {
link *RawLink
}
var _ Link = (*Iter)(nil)
func (it *Iter) isLink() {}
// FD returns the underlying file descriptor.
func (it *Iter) FD() int {
return it.link.FD()
}
// Close implements Link.
func (it *Iter) Close() error {
return it.link.Close()
}
// Pin implements Link.
func (it *Iter) Pin(fileName string) error {
return it.link.Pin(fileName)
}
// Update implements Link.
func (it *Iter) Update(new *ebpf.Program) error {
return it.link.Update(new)
}
// Open creates a new instance of the iterator.
//
// Reading from the returned reader triggers the BPF program.
func (it *Iter) Open() (io.ReadCloser, error) {
linkFd, err := it.link.fd.Value()
if err != nil {
return nil, err
}
attr := &bpfIterCreateAttr{
linkFd: linkFd,
}
fd, err := bpfIterCreate(attr)
if err != nil {
return nil, fmt.Errorf("can't create iterator: %w", err)
}
return fd.File("bpf_iter"), nil
}

214
vendor/github.com/cilium/ebpf/link/link.go generated vendored Normal file
View file

@ -0,0 +1,214 @@
package link
import (
"fmt"
"unsafe"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/internal"
)
var ErrNotSupported = internal.ErrNotSupported
// Link represents a Program attached to a BPF hook.
type Link interface {
// Replace the current program with a new program.
//
// Passing a nil program is an error. May return an error wrapping ErrNotSupported.
Update(*ebpf.Program) error
// Persist a link by pinning it into a bpffs.
//
// May return an error wrapping ErrNotSupported.
Pin(string) error
// Close frees resources.
//
// The link will be broken unless it has been pinned. A link
// may continue past the lifetime of the process if Close is
// not called.
Close() error
// Prevent external users from implementing this interface.
isLink()
}
// ID uniquely identifies a BPF link.
type ID uint32
// RawLinkOptions control the creation of a raw link.
type RawLinkOptions struct {
// File descriptor to attach to. This differs for each attach type.
Target int
// Program to attach.
Program *ebpf.Program
// Attach must match the attach type of Program.
Attach ebpf.AttachType
}
// RawLinkInfo contains metadata on a link.
type RawLinkInfo struct {
Type Type
ID ID
Program ebpf.ProgramID
}
// RawLink is the low-level API to bpf_link.
//
// You should consider using the higher level interfaces in this
// package instead.
type RawLink struct {
fd *internal.FD
}
// AttachRawLink creates a raw link.
func AttachRawLink(opts RawLinkOptions) (*RawLink, error) {
if err := haveBPFLink(); err != nil {
return nil, err
}
if opts.Target < 0 {
return nil, fmt.Errorf("invalid target: %s", internal.ErrClosedFd)
}
progFd := opts.Program.FD()
if progFd < 0 {
return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd)
}
attr := bpfLinkCreateAttr{
targetFd: uint32(opts.Target),
progFd: uint32(progFd),
attachType: opts.Attach,
}
fd, err := bpfLinkCreate(&attr)
if err != nil {
return nil, fmt.Errorf("can't create link: %s", err)
}
return &RawLink{fd}, nil
}
// LoadPinnedRawLink loads a persisted link from a bpffs.
func LoadPinnedRawLink(fileName string) (*RawLink, error) {
return loadPinnedRawLink(fileName, UnspecifiedType)
}
func loadPinnedRawLink(fileName string, typ Type) (*RawLink, error) {
fd, err := internal.BPFObjGet(fileName)
if err != nil {
return nil, fmt.Errorf("load pinned link: %s", err)
}
link := &RawLink{fd}
if typ == UnspecifiedType {
return link, nil
}
info, err := link.Info()
if err != nil {
link.Close()
return nil, fmt.Errorf("get pinned link info: %s", err)
}
if info.Type != typ {
link.Close()
return nil, fmt.Errorf("link type %v doesn't match %v", info.Type, typ)
}
return link, nil
}
func (l *RawLink) isLink() {}
// FD returns the raw file descriptor.
func (l *RawLink) FD() int {
fd, err := l.fd.Value()
if err != nil {
return -1
}
return int(fd)
}
// Close breaks the link.
//
// Use Pin if you want to make the link persistent.
func (l *RawLink) Close() error {
return l.fd.Close()
}
// Pin persists a link past the lifetime of the process.
//
// Calling Close on a pinned Link will not break the link
// until the pin is removed.
func (l *RawLink) Pin(fileName string) error {
if err := internal.BPFObjPin(fileName, l.fd); err != nil {
return fmt.Errorf("can't pin link: %s", err)
}
return nil
}
// Update implements Link.
func (l *RawLink) Update(new *ebpf.Program) error {
return l.UpdateArgs(RawLinkUpdateOptions{
New: new,
})
}
// RawLinkUpdateOptions control the behaviour of RawLink.UpdateArgs.
type RawLinkUpdateOptions struct {
New *ebpf.Program
Old *ebpf.Program
Flags uint32
}
// UpdateArgs updates a link based on args.
func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error {
newFd := opts.New.FD()
if newFd < 0 {
return fmt.Errorf("invalid program: %s", internal.ErrClosedFd)
}
var oldFd int
if opts.Old != nil {
oldFd = opts.Old.FD()
if oldFd < 0 {
return fmt.Errorf("invalid replacement program: %s", internal.ErrClosedFd)
}
}
linkFd, err := l.fd.Value()
if err != nil {
return fmt.Errorf("can't update link: %s", err)
}
attr := bpfLinkUpdateAttr{
linkFd: linkFd,
newProgFd: uint32(newFd),
oldProgFd: uint32(oldFd),
flags: opts.Flags,
}
return bpfLinkUpdate(&attr)
}
// struct bpf_link_info
type bpfLinkInfo struct {
typ uint32
id uint32
prog_id uint32
}
// Info returns metadata about the link.
func (l *RawLink) Info() (*RawLinkInfo, error) {
var info bpfLinkInfo
err := internal.BPFObjGetInfoByFD(l.fd, unsafe.Pointer(&info), unsafe.Sizeof(info))
if err != nil {
return nil, fmt.Errorf("link info: %s", err)
}
return &RawLinkInfo{
Type(info.typ),
ID(info.id),
ebpf.ProgramID(info.prog_id),
}, nil
}

60
vendor/github.com/cilium/ebpf/link/netns.go generated vendored Normal file
View file

@ -0,0 +1,60 @@
package link
import (
"fmt"
"github.com/cilium/ebpf"
)
// NetNsInfo contains metadata about a network namespace link.
type NetNsInfo struct {
RawLinkInfo
}
// NetNsLink is a program attached to a network namespace.
type NetNsLink struct {
*RawLink
}
// AttachNetNs attaches a program to a network namespace.
func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) {
var attach ebpf.AttachType
switch t := prog.Type(); t {
case ebpf.FlowDissector:
attach = ebpf.AttachFlowDissector
case ebpf.SkLookup:
attach = ebpf.AttachSkLookup
default:
return nil, fmt.Errorf("can't attach %v to network namespace", t)
}
link, err := AttachRawLink(RawLinkOptions{
Target: ns,
Program: prog,
Attach: attach,
})
if err != nil {
return nil, err
}
return &NetNsLink{link}, nil
}
// LoadPinnedNetNs loads a network namespace link from bpffs.
func LoadPinnedNetNs(fileName string) (*NetNsLink, error) {
link, err := loadPinnedRawLink(fileName, NetNsType)
if err != nil {
return nil, err
}
return &NetNsLink{link}, nil
}
// Info returns information about the link.
func (nns *NetNsLink) Info() (*NetNsInfo, error) {
info, err := nns.RawLink.Info()
if err != nil {
return nil, err
}
return &NetNsInfo{*info}, nil
}

76
vendor/github.com/cilium/ebpf/link/program.go generated vendored Normal file
View file

@ -0,0 +1,76 @@
package link
import (
"fmt"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/internal"
)
type RawAttachProgramOptions struct {
// File descriptor to attach to. This differs for each attach type.
Target int
// Program to attach.
Program *ebpf.Program
// Program to replace (cgroups).
Replace *ebpf.Program
// Attach must match the attach type of Program (and Replace).
Attach ebpf.AttachType
// Flags control the attach behaviour. This differs for each attach type.
Flags uint32
}
// RawAttachProgram is a low level wrapper around BPF_PROG_ATTACH.
//
// You should use one of the higher level abstractions available in this
// package if possible.
func RawAttachProgram(opts RawAttachProgramOptions) error {
if err := haveProgAttach(); err != nil {
return err
}
var replaceFd uint32
if opts.Replace != nil {
replaceFd = uint32(opts.Replace.FD())
}
attr := internal.BPFProgAttachAttr{
TargetFd: uint32(opts.Target),
AttachBpfFd: uint32(opts.Program.FD()),
ReplaceBpfFd: replaceFd,
AttachType: uint32(opts.Attach),
AttachFlags: uint32(opts.Flags),
}
if err := internal.BPFProgAttach(&attr); err != nil {
return fmt.Errorf("can't attach program: %s", err)
}
return nil
}
type RawDetachProgramOptions struct {
Target int
Program *ebpf.Program
Attach ebpf.AttachType
}
// RawDetachProgram is a low level wrapper around BPF_PROG_DETACH.
//
// You should use one of the higher level abstractions available in this
// package if possible.
func RawDetachProgram(opts RawDetachProgramOptions) error {
if err := haveProgAttach(); err != nil {
return err
}
attr := internal.BPFProgDetachAttr{
TargetFd: uint32(opts.Target),
AttachBpfFd: uint32(opts.Program.FD()),
AttachType: uint32(opts.Attach),
}
if err := internal.BPFProgDetach(&attr); err != nil {
return fmt.Errorf("can't detach program: %s", err)
}
return nil
}

57
vendor/github.com/cilium/ebpf/link/raw_tracepoint.go generated vendored Normal file
View file

@ -0,0 +1,57 @@
package link
import (
"fmt"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/internal"
)
type RawTracepointOptions struct {
// Tracepoint name.
Name string
// Program must be of type RawTracepoint*
Program *ebpf.Program
}
// AttachRawTracepoint links a BPF program to a raw_tracepoint.
//
// Requires at least Linux 4.17.
func AttachRawTracepoint(opts RawTracepointOptions) (Link, error) {
if t := opts.Program.Type(); t != ebpf.RawTracepoint && t != ebpf.RawTracepointWritable {
return nil, fmt.Errorf("invalid program type %s, expected RawTracepoint(Writable)", t)
}
if opts.Program.FD() < 0 {
return nil, fmt.Errorf("invalid program: %w", internal.ErrClosedFd)
}
fd, err := bpfRawTracepointOpen(&bpfRawTracepointOpenAttr{
name: internal.NewStringPointer(opts.Name),
fd: uint32(opts.Program.FD()),
})
if err != nil {
return nil, err
}
return &progAttachRawTracepoint{fd: fd}, nil
}
type progAttachRawTracepoint struct {
fd *internal.FD
}
var _ Link = (*progAttachRawTracepoint)(nil)
func (rt *progAttachRawTracepoint) isLink() {}
func (rt *progAttachRawTracepoint) Close() error {
return rt.fd.Close()
}
func (rt *progAttachRawTracepoint) Update(_ *ebpf.Program) error {
return fmt.Errorf("can't update raw_tracepoint: %w", ErrNotSupported)
}
func (rt *progAttachRawTracepoint) Pin(_ string) error {
return fmt.Errorf("can't pin raw_tracepoint: %w", ErrNotSupported)
}

173
vendor/github.com/cilium/ebpf/link/syscalls.go generated vendored Normal file
View file

@ -0,0 +1,173 @@
package link
import (
"errors"
"unsafe"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/unix"
)
// Type is the kind of link.
type Type uint32
// Valid link types.
//
// Equivalent to enum bpf_link_type.
const (
UnspecifiedType Type = iota
RawTracepointType
TracingType
CgroupType
IterType
NetNsType
XDPType
)
var haveProgAttach = internal.FeatureTest("BPF_PROG_ATTACH", "4.10", func() error {
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
Type: ebpf.CGroupSKB,
AttachType: ebpf.AttachCGroupInetIngress,
License: "MIT",
Instructions: asm.Instructions{
asm.Mov.Imm(asm.R0, 0),
asm.Return(),
},
})
if err != nil {
return internal.ErrNotSupported
}
// BPF_PROG_ATTACH was introduced at the same time as CGgroupSKB,
// so being able to load the program is enough to infer that we
// have the syscall.
prog.Close()
return nil
})
var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replacement", "5.5", func() error {
if err := haveProgAttach(); err != nil {
return err
}
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
Type: ebpf.CGroupSKB,
AttachType: ebpf.AttachCGroupInetIngress,
License: "MIT",
Instructions: asm.Instructions{
asm.Mov.Imm(asm.R0, 0),
asm.Return(),
},
})
if err != nil {
return internal.ErrNotSupported
}
defer prog.Close()
// We know that we have BPF_PROG_ATTACH since we can load CGroupSKB programs.
// If passing BPF_F_REPLACE gives us EINVAL we know that the feature isn't
// present.
attr := internal.BPFProgAttachAttr{
// We rely on this being checked after attachFlags.
TargetFd: ^uint32(0),
AttachBpfFd: uint32(prog.FD()),
AttachType: uint32(ebpf.AttachCGroupInetIngress),
AttachFlags: uint32(flagReplace),
}
err = internal.BPFProgAttach(&attr)
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
if errors.Is(err, unix.EBADF) {
return nil
}
return err
})
type bpfLinkCreateAttr struct {
progFd uint32
targetFd uint32
attachType ebpf.AttachType
flags uint32
}
func bpfLinkCreate(attr *bpfLinkCreateAttr) (*internal.FD, error) {
ptr, err := internal.BPF(internal.BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
if err != nil {
return nil, err
}
return internal.NewFD(uint32(ptr)), nil
}
type bpfLinkUpdateAttr struct {
linkFd uint32
newProgFd uint32
flags uint32
oldProgFd uint32
}
func bpfLinkUpdate(attr *bpfLinkUpdateAttr) error {
_, err := internal.BPF(internal.BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
return err
}
var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error {
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
Type: ebpf.CGroupSKB,
AttachType: ebpf.AttachCGroupInetIngress,
License: "MIT",
Instructions: asm.Instructions{
asm.Mov.Imm(asm.R0, 0),
asm.Return(),
},
})
if err != nil {
return internal.ErrNotSupported
}
defer prog.Close()
attr := bpfLinkCreateAttr{
// This is a hopefully invalid file descriptor, which triggers EBADF.
targetFd: ^uint32(0),
progFd: uint32(prog.FD()),
attachType: ebpf.AttachCGroupInetIngress,
}
_, err = bpfLinkCreate(&attr)
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
if errors.Is(err, unix.EBADF) {
return nil
}
return err
})
type bpfIterCreateAttr struct {
linkFd uint32
flags uint32
}
func bpfIterCreate(attr *bpfIterCreateAttr) (*internal.FD, error) {
ptr, err := internal.BPF(internal.BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
if err == nil {
return internal.NewFD(uint32(ptr)), nil
}
return nil, err
}
type bpfRawTracepointOpenAttr struct {
name internal.Pointer
fd uint32
_ uint32
}
func bpfRawTracepointOpen(attr *bpfRawTracepointOpenAttr) (*internal.FD, error) {
ptr, err := internal.BPF(internal.BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
if err == nil {
return internal.NewFD(uint32(ptr)), nil
}
return nil, err
}

View file

@ -84,3 +84,50 @@ func needSection(insns, section asm.Instructions) (bool, error) {
// None of the functions in the section are called.
return false, nil
}
func fixupJumpsAndCalls(insns asm.Instructions) error {
symbolOffsets := make(map[string]asm.RawInstructionOffset)
iter := insns.Iterate()
for iter.Next() {
ins := iter.Ins
if ins.Symbol == "" {
continue
}
if _, ok := symbolOffsets[ins.Symbol]; ok {
return fmt.Errorf("duplicate symbol %s", ins.Symbol)
}
symbolOffsets[ins.Symbol] = iter.Offset
}
iter = insns.Iterate()
for iter.Next() {
i := iter.Index
offset := iter.Offset
ins := iter.Ins
switch {
case ins.IsFunctionCall() && ins.Constant == -1:
// Rewrite bpf to bpf call
callOffset, ok := symbolOffsets[ins.Reference]
if !ok {
return fmt.Errorf("instruction %d: reference to missing symbol %q", i, ins.Reference)
}
ins.Constant = int64(callOffset - offset - 1)
case ins.OpCode.Class() == asm.JumpClass && ins.Offset == -1:
// Rewrite jump to label
jumpOffset, ok := symbolOffsets[ins.Reference]
if !ok {
return fmt.Errorf("instruction %d: reference to missing symbol %q", i, ins.Reference)
}
ins.Offset = int16(jumpOffset - offset - 1)
}
}
return nil
}

716
vendor/github.com/cilium/ebpf/map.go generated vendored

File diff suppressed because it is too large Load diff

View file

@ -13,14 +13,12 @@ import (
"github.com/cilium/ebpf/internal"
)
// marshalPtr converts an arbitrary value into a pointer suitable
// to be passed to the kernel.
//
// As an optimization, it returns the original value if it is an
// unsafe.Pointer.
func marshalPtr(data interface{}, length int) (internal.Pointer, error) {
if data == nil {
if length == 0 {
return internal.NewPointer(nil), nil
}
return internal.Pointer{}, errors.New("can't use nil as key of map")
}
if ptr, ok := data.(unsafe.Pointer); ok {
return internal.NewPointer(ptr), nil
}
@ -33,6 +31,13 @@ func marshalPtr(data interface{}, length int) (internal.Pointer, error) {
return internal.NewSlicePointer(buf), nil
}
// marshalBytes converts an arbitrary value into a byte buffer.
//
// Prefer using Map.marshalKey and Map.marshalValue if possible, since
// those have special cases that allow more types to be encoded.
//
// Returns an error if the given value isn't representable in exactly
// length bytes.
func marshalBytes(data interface{}, length int) (buf []byte, err error) {
switch value := data.(type) {
case encoding.BinaryMarshaler:
@ -43,6 +48,8 @@ func marshalBytes(data interface{}, length int) (buf []byte, err error) {
buf = value
case unsafe.Pointer:
err = errors.New("can't marshal from unsafe.Pointer")
case Map, *Map, Program, *Program:
err = fmt.Errorf("can't marshal %T", value)
default:
var wr bytes.Buffer
err = binary.Write(&wr, internal.NativeEndian, value)
@ -70,6 +77,10 @@ func makeBuffer(dst interface{}, length int) (internal.Pointer, []byte) {
return internal.NewSlicePointer(buf), buf
}
// unmarshalBytes converts a byte buffer into an arbitrary value.
//
// Prefer using Map.unmarshalKey and Map.unmarshalValue if possible, since
// those have special cases that allow more types to be encoded.
func unmarshalBytes(data interface{}, buf []byte) error {
switch value := data.(type) {
case unsafe.Pointer:
@ -83,6 +94,8 @@ func unmarshalBytes(data interface{}, buf []byte) error {
copy(dst, buf)
runtime.KeepAlive(value)
return nil
case Map, *Map, Program, *Program:
return fmt.Errorf("can't unmarshal into %T", value)
case encoding.BinaryUnmarshaler:
return value.UnmarshalBinary(buf)
case *string:

42
vendor/github.com/cilium/ebpf/pinning.go generated vendored Normal file
View file

@ -0,0 +1,42 @@
package ebpf
import (
"errors"
"fmt"
"os"
"github.com/cilium/ebpf/internal"
)
func pin(currentPath, newPath string, fd *internal.FD) error {
if newPath == "" {
return errors.New("given pinning path cannot be empty")
}
if currentPath == "" {
return internal.BPFObjPin(newPath, fd)
}
if currentPath == newPath {
return nil
}
var err error
// Object is now moved to the new pinning path.
if err = os.Rename(currentPath, newPath); err == nil {
return nil
}
if !os.IsNotExist(err) {
return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err)
}
// Internal state not in sync with the file system so let's fix it.
return internal.BPFObjPin(newPath, fd)
}
func unpin(pinnedPath string) error {
if pinnedPath == "" {
return nil
}
err := os.Remove(pinnedPath)
if err == nil || os.IsNotExist(err) {
return nil
}
return err
}

374
vendor/github.com/cilium/ebpf/prog.go generated vendored
View file

@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"math"
"path/filepath"
"strings"
"time"
@ -18,7 +19,7 @@ import (
// ErrNotSupported is returned whenever the kernel doesn't support a feature.
var ErrNotSupported = internal.ErrNotSupported
// ProgramID represents the unique ID of an eBPF program
// ProgramID represents the unique ID of an eBPF program.
type ProgramID uint32
const (
@ -42,7 +43,7 @@ type ProgramOptions struct {
LogSize int
}
// ProgramSpec defines a Program
// ProgramSpec defines a Program.
type ProgramSpec struct {
// Name is passed to the kernel as a debug aid. Must only contain
// alpha numeric and '_' characters.
@ -87,6 +88,13 @@ func (ps *ProgramSpec) Copy() *ProgramSpec {
return &cpy
}
// Tag calculates the kernel tag for a series of instructions.
//
// Use asm.Instructions.Tag if you need to calculate for non-native endianness.
func (ps *ProgramSpec) Tag() (string, error) {
return ps.Instructions.Tag(internal.NativeEndian)
}
// Program represents BPF program loaded into the kernel.
//
// It is not safe to close a Program which is used by other goroutines.
@ -97,8 +105,8 @@ type Program struct {
fd *internal.FD
name string
abi ProgramABI
attachType AttachType
pinnedPath string
typ ProgramType
}
// NewProgram creates a new Program.
@ -114,88 +122,13 @@ func NewProgram(spec *ProgramSpec) (*Program, error) {
// Loading a program for the first time will perform
// feature detection by loading small, temporary programs.
func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
if spec.BTF == nil {
return newProgramWithBTF(spec, nil, opts)
}
btfs := make(btfHandleCache)
defer btfs.close()
handle, err := btf.NewHandle(btf.ProgramSpec(spec.BTF))
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
return nil, fmt.Errorf("can't load BTF: %w", err)
}
return newProgramWithBTF(spec, handle, opts)
return newProgramWithOptions(spec, opts, btfs)
}
func newProgramWithBTF(spec *ProgramSpec, btf *btf.Handle, opts ProgramOptions) (*Program, error) {
attr, err := convertProgramSpec(spec, btf)
if err != nil {
return nil, err
}
logSize := DefaultVerifierLogSize
if opts.LogSize > 0 {
logSize = opts.LogSize
}
var logBuf []byte
if opts.LogLevel > 0 {
logBuf = make([]byte, logSize)
attr.logLevel = opts.LogLevel
attr.logSize = uint32(len(logBuf))
attr.logBuf = internal.NewSlicePointer(logBuf)
}
fd, err := bpfProgLoad(attr)
if err == nil {
prog := newProgram(fd, spec.Name, &ProgramABI{spec.Type})
prog.VerifierLog = internal.CString(logBuf)
return prog, nil
}
logErr := err
if opts.LogLevel == 0 {
// Re-run with the verifier enabled to get better error messages.
logBuf = make([]byte, logSize)
attr.logLevel = 1
attr.logSize = uint32(len(logBuf))
attr.logBuf = internal.NewSlicePointer(logBuf)
_, logErr = bpfProgLoad(attr)
}
err = internal.ErrorWithLog(err, logBuf, logErr)
return nil, fmt.Errorf("can't load program: %w", err)
}
// NewProgramFromFD creates a program from a raw fd.
//
// You should not use fd after calling this function.
//
// Requires at least Linux 4.11.
func NewProgramFromFD(fd int) (*Program, error) {
if fd < 0 {
return nil, errors.New("invalid fd")
}
bpfFd := internal.NewFD(uint32(fd))
name, abi, err := newProgramABIFromFd(bpfFd)
if err != nil {
bpfFd.Forget()
return nil, err
}
return newProgram(bpfFd, name, abi), nil
}
func newProgram(fd *internal.FD, name string, abi *ProgramABI) *Program {
return &Program{
name: name,
fd: fd,
abi: *abi,
}
}
func convertProgramSpec(spec *ProgramSpec, handle *btf.Handle) (*bpfProgLoadAttr, error) {
func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandleCache) (*Program, error) {
if len(spec.Instructions) == 0 {
return nil, errors.New("Instructions cannot be empty")
}
@ -208,8 +141,15 @@ func convertProgramSpec(spec *ProgramSpec, handle *btf.Handle) (*bpfProgLoadAttr
return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian)
}
insns := make(asm.Instructions, len(spec.Instructions))
copy(insns, spec.Instructions)
if err := fixupJumpsAndCalls(insns); err != nil {
return nil, err
}
buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize))
err := spec.Instructions.Marshal(buf, internal.NativeEndian)
err := insns.Marshal(buf, internal.NativeEndian)
if err != nil {
return nil, err
}
@ -229,24 +169,39 @@ func convertProgramSpec(spec *ProgramSpec, handle *btf.Handle) (*bpfProgLoadAttr
attr.progName = newBPFObjName(spec.Name)
}
if handle != nil && spec.BTF != nil {
attr.progBTFFd = uint32(handle.FD())
recSize, bytes, err := btf.ProgramLineInfos(spec.BTF)
if err != nil {
return nil, fmt.Errorf("can't get BTF line infos: %w", err)
var btfDisabled bool
if spec.BTF != nil {
if relos, err := btf.ProgramRelocations(spec.BTF, nil); err != nil {
return nil, fmt.Errorf("CO-RE relocations: %s", err)
} else if len(relos) > 0 {
return nil, fmt.Errorf("applying CO-RE relocations: %w", ErrNotSupported)
}
attr.lineInfoRecSize = recSize
attr.lineInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize))
attr.lineInfo = internal.NewSlicePointer(bytes)
recSize, bytes, err = btf.ProgramFuncInfos(spec.BTF)
if err != nil {
return nil, fmt.Errorf("can't get BTF function infos: %w", err)
handle, err := btfs.load(btf.ProgramSpec(spec.BTF))
btfDisabled = errors.Is(err, btf.ErrNotSupported)
if err != nil && !btfDisabled {
return nil, fmt.Errorf("load BTF: %w", err)
}
if handle != nil {
attr.progBTFFd = uint32(handle.FD())
recSize, bytes, err := btf.ProgramLineInfos(spec.BTF)
if err != nil {
return nil, fmt.Errorf("get BTF line infos: %w", err)
}
attr.lineInfoRecSize = recSize
attr.lineInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize))
attr.lineInfo = internal.NewSlicePointer(bytes)
recSize, bytes, err = btf.ProgramFuncInfos(spec.BTF)
if err != nil {
return nil, fmt.Errorf("get BTF function infos: %w", err)
}
attr.funcInfoRecSize = recSize
attr.funcInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize))
attr.funcInfo = internal.NewSlicePointer(bytes)
}
attr.funcInfoRecSize = recSize
attr.funcInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize))
attr.funcInfo = internal.NewSlicePointer(bytes)
}
if spec.AttachTo != "" {
@ -259,19 +214,100 @@ func convertProgramSpec(spec *ProgramSpec, handle *btf.Handle) (*bpfProgLoadAttr
}
}
return attr, nil
logSize := DefaultVerifierLogSize
if opts.LogSize > 0 {
logSize = opts.LogSize
}
var logBuf []byte
if opts.LogLevel > 0 {
logBuf = make([]byte, logSize)
attr.logLevel = opts.LogLevel
attr.logSize = uint32(len(logBuf))
attr.logBuf = internal.NewSlicePointer(logBuf)
}
fd, err := bpfProgLoad(attr)
if err == nil {
return &Program{internal.CString(logBuf), fd, spec.Name, "", spec.Type}, nil
}
logErr := err
if opts.LogLevel == 0 {
// Re-run with the verifier enabled to get better error messages.
logBuf = make([]byte, logSize)
attr.logLevel = 1
attr.logSize = uint32(len(logBuf))
attr.logBuf = internal.NewSlicePointer(logBuf)
_, logErr = bpfProgLoad(attr)
}
if errors.Is(logErr, unix.EPERM) && logBuf[0] == 0 {
// EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can
// check that the log is empty to reduce false positives.
return nil, fmt.Errorf("load program: RLIMIT_MEMLOCK may be too low: %w", logErr)
}
err = internal.ErrorWithLog(err, logBuf, logErr)
if btfDisabled {
return nil, fmt.Errorf("load program without BTF: %w", err)
}
return nil, fmt.Errorf("load program: %w", err)
}
// NewProgramFromFD creates a program from a raw fd.
//
// You should not use fd after calling this function.
//
// Requires at least Linux 4.10.
func NewProgramFromFD(fd int) (*Program, error) {
if fd < 0 {
return nil, errors.New("invalid fd")
}
return newProgramFromFD(internal.NewFD(uint32(fd)))
}
// NewProgramFromID returns the program for a given id.
//
// Returns ErrNotExist, if there is no eBPF program with the given id.
func NewProgramFromID(id ProgramID) (*Program, error) {
fd, err := bpfObjGetFDByID(internal.BPF_PROG_GET_FD_BY_ID, uint32(id))
if err != nil {
return nil, fmt.Errorf("get program by id: %w", err)
}
return newProgramFromFD(fd)
}
func newProgramFromFD(fd *internal.FD) (*Program, error) {
info, err := newProgramInfoFromFd(fd)
if err != nil {
fd.Close()
return nil, fmt.Errorf("discover program type: %w", err)
}
return &Program{"", fd, "", "", info.Type}, nil
}
func (p *Program) String() string {
if p.name != "" {
return fmt.Sprintf("%s(%s)#%v", p.abi.Type, p.name, p.fd)
return fmt.Sprintf("%s(%s)#%v", p.typ, p.name, p.fd)
}
return fmt.Sprintf("%s#%v", p.abi.Type, p.fd)
return fmt.Sprintf("%s(%v)", p.typ, p.fd)
}
// ABI gets the ABI of the Program
func (p *Program) ABI() ProgramABI {
return p.abi
// Type returns the underlying type of the program.
func (p *Program) Type() ProgramType {
return p.typ
}
// Info returns metadata about the program.
//
// Requires at least 4.10.
func (p *Program) Info() (*ProgramInfo, error) {
return newProgramInfoFromFd(p.fd)
}
// FD gets the file descriptor of the Program.
@ -303,19 +339,42 @@ func (p *Program) Clone() (*Program, error) {
return nil, fmt.Errorf("can't clone program: %w", err)
}
return newProgram(dup, p.name, &p.abi), nil
return &Program{p.VerifierLog, dup, p.name, "", p.typ}, nil
}
// Pin persists the Program past the lifetime of the process that created it
// Pin persists the Program on the BPF virtual file system past the lifetime of
// the process that created it
//
// This requires bpffs to be mounted above fileName. See http://cilium.readthedocs.io/en/doc-1.0/kubernetes/install/#mounting-the-bpf-fs-optional
// This requires bpffs to be mounted above fileName. See https://docs.cilium.io/en/k8s-doc/admin/#admin-mount-bpffs
func (p *Program) Pin(fileName string) error {
if err := internal.BPFObjPin(fileName, p.fd); err != nil {
return fmt.Errorf("can't pin program: %w", err)
if err := pin(p.pinnedPath, fileName, p.fd); err != nil {
return err
}
p.pinnedPath = fileName
return nil
}
// Unpin removes the persisted state for the Program from the BPF virtual filesystem.
//
// Failed calls to Unpin will not alter the state returned by IsPinned.
//
// Unpinning an unpinned Program returns nil.
func (p *Program) Unpin() error {
if err := unpin(p.pinnedPath); err != nil {
return err
}
p.pinnedPath = ""
return nil
}
// IsPinned returns true if the Program has a non-empty pinned path.
func (p *Program) IsPinned() bool {
if p.pinnedPath == "" {
return false
}
return true
}
// Close unloads the program from the kernel.
func (p *Program) Close() error {
if p == nil {
@ -359,7 +418,7 @@ func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.D
return ret, total, nil
}
var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() (bool, error) {
var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() error {
prog, err := NewProgram(&ProgramSpec{
Type: SocketFilter,
Instructions: asm.Instructions{
@ -370,7 +429,7 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() (
})
if err != nil {
// This may be because we lack sufficient permissions, etc.
return false, err
return err
}
defer prog.Close()
@ -383,10 +442,16 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() (
}
err = bpfProgTestRun(&attr)
// Check for EINVAL specifically, rather than err != nil since we
// otherwise misdetect due to insufficient permissions.
return !errors.Is(err, unix.EINVAL), nil
if errors.Is(err, unix.EINVAL) {
// Check for EINVAL specifically, rather than err != nil since we
// otherwise misdetect due to insufficient permissions.
return internal.ErrNotSupported
}
if errors.Is(err, unix.EINTR) {
// We know that PROG_TEST_RUN is supported if we get EINTR.
return nil
}
return err
})
func (p *Program) testRun(in []byte, repeat int, reset func()) (uint32, []byte, time.Duration, error) {
@ -465,8 +530,11 @@ func unmarshalProgram(buf []byte) (*Program, error) {
return NewProgramFromID(ProgramID(id))
}
// MarshalBinary implements BinaryMarshaler.
func (p *Program) MarshalBinary() ([]byte, error) {
func marshalProgram(p *Program, length int) ([]byte, error) {
if length != 4 {
return nil, fmt.Errorf("can't marshal program to %d bytes", length)
}
value, err := p.fd.Value()
if err != nil {
return nil, err
@ -535,22 +603,22 @@ func LoadPinnedProgram(fileName string) (*Program, error) {
return nil, err
}
name, abi, err := newProgramABIFromFd(fd)
info, err := newProgramInfoFromFd(fd)
if err != nil {
_ = fd.Close()
return nil, fmt.Errorf("can't get ABI for %s: %w", fileName, err)
return nil, fmt.Errorf("info for %s: %w", fileName, err)
}
return newProgram(fd, name, abi), nil
return &Program{"", fd, filepath.Base(fileName), "", info.Type}, nil
}
// SanitizeName replaces all invalid characters in name.
// SanitizeName replaces all invalid characters in name with replacement.
// Passing a negative value for replacement will delete characters instead
// of replacing them. Use this to automatically generate valid names for maps
// and programs at runtime.
//
// Use this to automatically generate valid names for maps and
// programs at run time.
//
// Passing a negative value for replacement will delete characters
// instead of replacing them.
// The set of allowed characters depends on the running kernel version.
// Dots are only allowed as of kernel 5.2.
func SanitizeName(name string, replacement rune) string {
return strings.Map(func(char rune) rune {
if invalidBPFObjNameChar(char) {
@ -568,25 +636,9 @@ func ProgramGetNextID(startID ProgramID) (ProgramID, error) {
return ProgramID(id), err
}
// NewProgramFromID returns the program for a given id.
//
// Returns ErrNotExist, if there is no eBPF program with the given id.
func NewProgramFromID(id ProgramID) (*Program, error) {
fd, err := bpfObjGetFDByID(internal.BPF_PROG_GET_FD_BY_ID, uint32(id))
if err != nil {
return nil, err
}
name, abi, err := newProgramABIFromFd(fd)
if err != nil {
_ = fd.Close()
return nil, err
}
return newProgram(fd, name, abi), nil
}
// ID returns the systemwide unique ID of the program.
//
// Deprecated: use ProgramInfo.ID() instead.
func (p *Program) ID() (ProgramID, error) {
info, err := bpfGetProgInfoByFD(p.fd)
if err != nil {
@ -595,12 +647,16 @@ func (p *Program) ID() (ProgramID, error) {
return ProgramID(info.id), nil
}
func resolveBTFType(name string, progType ProgramType, attachType AttachType) (btf.Type, error) {
func findKernelType(name string, typ btf.Type) error {
kernel, err := btf.LoadKernelSpec()
if err != nil {
return nil, fmt.Errorf("can't resolve BTF type %s: %w", name, err)
return fmt.Errorf("can't load kernel spec: %w", err)
}
return kernel.FindType(name, typ)
}
func resolveBTFType(name string, progType ProgramType, attachType AttachType) (btf.Type, error) {
type match struct {
p ProgramType
a AttachType
@ -608,10 +664,30 @@ func resolveBTFType(name string, progType ProgramType, attachType AttachType) (b
target := match{progType, attachType}
switch target {
case match{LSM, AttachLSMMac}:
var target btf.Func
err := findKernelType("bpf_lsm_"+name, &target)
if errors.Is(err, btf.ErrNotFound) {
return nil, &internal.UnsupportedFeatureError{
Name: name + " LSM hook",
}
}
if err != nil {
return nil, fmt.Errorf("resolve BTF for LSM hook %s: %w", name, err)
}
return &target, nil
case match{Tracing, AttachTraceIter}:
var target btf.Func
if err := kernel.FindType("bpf_iter_"+name, &target); err != nil {
return nil, fmt.Errorf("can't resolve BTF for iterator %s: %w", name, err)
err := findKernelType("bpf_iter_"+name, &target)
if errors.Is(err, btf.ErrNotFound) {
return nil, &internal.UnsupportedFeatureError{
Name: name + " iterator",
}
}
if err != nil {
return nil, fmt.Errorf("resolve BTF for iterator %s: %w", name, err)
}
return &target, nil

View file

@ -1,25 +0,0 @@
eBPF
-------
[![](https://godoc.org/github.com/cilium/ebpf?status.svg)](https://godoc.org/github.com/cilium/ebpf)
eBPF is a pure Go library that provides utilities for loading, compiling, and debugging eBPF programs. It has minimal external dependencies and is intended to be used in long running processes.
[ebpf/asm](https://godoc.org/github.com/cilium/ebpf/asm) contains a basic assembler.
The library is maintained by [Cloudflare](https://www.cloudflare.com) and [Cilium](https://www.cilium.io). Feel free to [join](https://cilium.herokuapp.com/) the [libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack.
## Current status
The package is production ready, but **the API is explicitly unstable
right now**. Expect to update your code if you want to follow along.
## Requirements
* A version of Go that is [supported by upstream](https://golang.org/doc/devel/release.html#policy)
* Linux 4.9, 4.19 or 5.4 (versions in-between should work, but are not tested)
## Useful resources
* [Cilium eBPF documentation](https://cilium.readthedocs.io/en/latest/bpf/#bpf-guide) (recommended)
* [Linux documentation on BPF](http://elixir.free-electrons.com/linux/latest/source/Documentation/networking/filter.txt)
* [eBPF features by Linux version](https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md)

View file

@ -3,7 +3,6 @@ package ebpf
import (
"errors"
"fmt"
"os"
"unsafe"
"github.com/cilium/ebpf/internal"
@ -12,9 +11,7 @@ import (
)
// Generic errors returned by BPF syscalls.
var (
ErrNotExist = errors.New("requested object does not exist")
)
var ErrNotExist = errors.New("requested object does not exist")
// bpfObjName is a null-terminated string made up of
// 'A-Za-z0-9_' characters.
@ -27,18 +24,20 @@ func newBPFObjName(name string) bpfObjName {
return result
}
// invalidBPFObjNameChar returns true if char may not appear in
// a BPF object name.
func invalidBPFObjNameChar(char rune) bool {
dotAllowed := objNameAllowsDot() == nil
switch {
case char >= 'A' && char <= 'Z':
fallthrough
return false
case char >= 'a' && char <= 'z':
fallthrough
return false
case char >= '0' && char <= '9':
fallthrough
return false
case dotAllowed && char == '.':
fallthrough
return false
case char == '_':
return false
default:
@ -69,14 +68,32 @@ type bpfMapOpAttr struct {
flags uint64
}
type bpfBatchMapOpAttr struct {
inBatch internal.Pointer
outBatch internal.Pointer
keys internal.Pointer
values internal.Pointer
count uint32
mapFd uint32
elemFlags uint64
flags uint64
}
type bpfMapInfo struct {
mapType uint32
id uint32
keySize uint32
valueSize uint32
maxEntries uint32
flags uint32
mapName bpfObjName // since 4.15 ad5b177bd73f
map_type uint32 // since 4.12 1e2709769086
id uint32
key_size uint32
value_size uint32
max_entries uint32
map_flags uint32
name bpfObjName // since 4.15 ad5b177bd73f
ifindex uint32 // since 4.16 52775b33bb50
btf_vmlinux_value_type_id uint32 // since 5.6 85d33df357b6
netns_dev uint64 // since 4.16 52775b33bb50
netns_ino uint64
btf_id uint32 // since 4.18 78958fca7ead
btf_key_type_id uint32 // since 4.18 9b2cf328b2ec
btf_value_type_id uint32
}
type bpfProgLoadAttr struct {
@ -104,18 +121,40 @@ type bpfProgLoadAttr struct {
}
type bpfProgInfo struct {
progType uint32
id uint32
tag [unix.BPF_TAG_SIZE]byte
jitedLen uint32
xlatedLen uint32
jited internal.Pointer
xlated internal.Pointer
loadTime uint64 // since 4.15 cb4d2b3f03d8
createdByUID uint32
nrMapIDs uint32
mapIds internal.Pointer
name bpfObjName
prog_type uint32
id uint32
tag [unix.BPF_TAG_SIZE]byte
jited_prog_len uint32
xlated_prog_len uint32
jited_prog_insns internal.Pointer
xlated_prog_insns internal.Pointer
load_time uint64 // since 4.15 cb4d2b3f03d8
created_by_uid uint32
nr_map_ids uint32
map_ids internal.Pointer
name bpfObjName // since 4.15 067cae47771c
ifindex uint32
gpl_compatible uint32
netns_dev uint64
netns_ino uint64
nr_jited_ksyms uint32
nr_jited_func_lens uint32
jited_ksyms internal.Pointer
jited_func_lens internal.Pointer
btf_id uint32
func_info_rec_size uint32
func_info internal.Pointer
nr_func_info uint32
nr_line_info uint32
line_info internal.Pointer
jited_line_info internal.Pointer
nr_jited_line_info uint32
line_info_rec_size uint32
jited_line_info_rec_size uint32
nr_prog_tags uint32
prog_tags internal.Pointer
run_time_ns uint64
run_cnt uint64
}
type bpfProgTestRunAttr struct {
@ -129,12 +168,6 @@ type bpfProgTestRunAttr struct {
duration uint32
}
type bpfObjGetInfoByFDAttr struct {
fd uint32
infoLen uint32
info internal.Pointer // May be either bpfMapInfo or bpfProgInfo
}
type bpfGetFDByIDAttr struct {
id uint32
next uint32
@ -174,10 +207,6 @@ func bpfProgTestRun(attr *bpfProgTestRunAttr) error {
func bpfMapCreate(attr *bpfMapCreateAttr) (*internal.FD, error) {
fd, err := internal.BPF(internal.BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
if errors.Is(err, os.ErrPermission) {
return nil, errors.New("permission denied or insufficient rlimit to lock memory for map")
}
if err != nil {
return nil, err
}
@ -185,35 +214,25 @@ func bpfMapCreate(attr *bpfMapCreateAttr) (*internal.FD, error) {
return internal.NewFD(uint32(fd)), nil
}
var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() (bool, error) {
inner, err := bpfMapCreate(&bpfMapCreateAttr{
mapType: Array,
keySize: 4,
valueSize: 4,
maxEntries: 1,
})
if err != nil {
return false, err
}
defer inner.Close()
innerFd, _ := inner.Value()
nested, err := bpfMapCreate(&bpfMapCreateAttr{
var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error {
_, err := bpfMapCreate(&bpfMapCreateAttr{
mapType: ArrayOfMaps,
keySize: 4,
valueSize: 4,
maxEntries: 1,
innerMapFd: innerFd,
// Invalid file descriptor.
innerMapFd: ^uint32(0),
})
if err != nil {
return false, nil
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
_ = nested.Close()
return true, nil
if errors.Is(err, unix.EBADF) {
return nil
}
return err
})
var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps", "5.2", func() (bool, error) {
var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps", "5.2", func() error {
// This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since
// BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check.
m, err := bpfMapCreate(&bpfMapCreateAttr{
@ -224,10 +243,10 @@ var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps
flags: unix.BPF_F_RDONLY_PROG,
})
if err != nil {
return false, nil
return internal.ErrNotSupported
}
_ = m.Close()
return true, nil
return nil
})
func bpfMapLookupElem(m *internal.FD, key, valueOut internal.Pointer) error {
@ -313,6 +332,29 @@ func objGetNextID(cmd internal.BPFCmd, start uint32) (uint32, error) {
return attr.nextID, wrapObjError(err)
}
func bpfMapBatch(cmd internal.BPFCmd, m *internal.FD, inBatch, outBatch, keys, values internal.Pointer, count uint32, opts *BatchOptions) (uint32, error) {
fd, err := m.Value()
if err != nil {
return 0, err
}
attr := bpfBatchMapOpAttr{
inBatch: inBatch,
outBatch: outBatch,
keys: keys,
values: values,
count: count,
mapFd: fd,
}
if opts != nil {
attr.elemFlags = opts.ElemFlags
attr.flags = opts.Flags
}
_, err = internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
// always return count even on an error, as things like update might partially be fulfilled.
return attr.count, wrapMapError(err)
}
func wrapObjError(err error) error {
if err == nil {
return nil
@ -337,6 +379,10 @@ func wrapMapError(err error) error {
return ErrKeyExist
}
if errors.Is(err, unix.ENOTSUPP) {
return ErrNotSupported
}
return errors.New(err.Error())
}
@ -353,28 +399,9 @@ func bpfMapFreeze(m *internal.FD) error {
return err
}
func bpfGetObjectInfoByFD(fd *internal.FD, info unsafe.Pointer, size uintptr) error {
value, err := fd.Value()
if err != nil {
return err
}
// available from 4.13
attr := bpfObjGetInfoByFDAttr{
fd: value,
infoLen: uint32(size),
info: internal.NewPointer(info),
}
_, err = internal.BPF(internal.BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
if err != nil {
return fmt.Errorf("fd %d: %w", fd, err)
}
return nil
}
func bpfGetProgInfoByFD(fd *internal.FD) (*bpfProgInfo, error) {
var info bpfProgInfo
if err := bpfGetObjectInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil {
if err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil {
return nil, fmt.Errorf("can't get program info: %w", err)
}
return &info, nil
@ -382,14 +409,14 @@ func bpfGetProgInfoByFD(fd *internal.FD) (*bpfProgInfo, error) {
func bpfGetMapInfoByFD(fd *internal.FD) (*bpfMapInfo, error) {
var info bpfMapInfo
err := bpfGetObjectInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info))
err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info))
if err != nil {
return nil, fmt.Errorf("can't get map info: %w", err)
}
return &info, nil
}
var haveObjName = internal.FeatureTest("object names", "4.15", func() (bool, error) {
var haveObjName = internal.FeatureTest("object names", "4.15", func() error {
attr := bpfMapCreateAttr{
mapType: Array,
keySize: 4,
@ -400,16 +427,16 @@ var haveObjName = internal.FeatureTest("object names", "4.15", func() (bool, err
fd, err := bpfMapCreate(&attr)
if err != nil {
return false, nil
return internal.ErrNotSupported
}
_ = fd.Close()
return true, nil
return nil
})
var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() (bool, error) {
var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() error {
if err := haveObjName(); err != nil {
return false, err
return err
}
attr := bpfMapCreateAttr{
@ -422,11 +449,37 @@ var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func()
fd, err := bpfMapCreate(&attr)
if err != nil {
return false, nil
return internal.ErrNotSupported
}
_ = fd.Close()
return true, nil
return nil
})
var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error {
var maxEntries uint32 = 2
attr := bpfMapCreateAttr{
mapType: Hash,
keySize: 4,
valueSize: 4,
maxEntries: maxEntries,
}
fd, err := bpfMapCreate(&attr)
if err != nil {
return internal.ErrNotSupported
}
defer fd.Close()
keys := []uint32{1, 2}
values := []uint32{3, 4}
kp, _ := marshalPtr(keys, 8)
vp, _ := marshalPtr(values, 8)
nilPtr := internal.NewPointer(nil)
_, err = bpfMapBatch(internal.BPF_MAP_UPDATE_BATCH, fd, nilPtr, nilPtr, kp, vp, maxEntries, nil)
if err != nil {
return internal.ErrNotSupported
}
return nil
})
func bpfObjGetFDByID(cmd internal.BPFCmd, id uint32) (*internal.FD, error) {

View file

@ -1,6 +1,6 @@
package ebpf
//go:generate stringer -output types_string.go -type=MapType,ProgramType,AttachType
//go:generate stringer -output types_string.go -type=MapType,ProgramType,AttachType,PinType
// MapType indicates the type map structure
// that will be initialized in the kernel.
@ -85,10 +85,19 @@ const (
// hasPerCPUValue returns true if the Map stores a value per CPU.
func (mt MapType) hasPerCPUValue() bool {
if mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash {
return true
}
return false
return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash
}
// canStoreMap returns true if the map type accepts a map fd
// for update and returns a map id for lookup.
func (mt MapType) canStoreMap() bool {
return mt == ArrayOfMaps || mt == HashOfMaps
}
// canStoreProgram returns true if the map type accepts a program fd
// for update and returns a program id for lookup.
func (mt MapType) canStoreProgram() bool {
return mt == ProgramArray
}
// ProgramType of the eBPF program
@ -96,60 +105,37 @@ type ProgramType uint32
// eBPF program types
const (
// Unrecognized program type
UnspecifiedProgram ProgramType = iota
// SocketFilter socket or seccomp filter
SocketFilter
// Kprobe program
Kprobe
// SchedCLS traffic control shaper
SchedCLS
// SchedACT routing control shaper
SchedACT
// TracePoint program
TracePoint
// XDP program
XDP
// PerfEvent program
PerfEvent
// CGroupSKB program
CGroupSKB
// CGroupSock program
CGroupSock
// LWTIn program
LWTIn
// LWTOut program
LWTOut
// LWTXmit program
LWTXmit
// SockOps program
SockOps
// SkSKB program
SkSKB
// CGroupDevice program
CGroupDevice
// SkMsg program
SkMsg
// RawTracepoint program
RawTracepoint
// CGroupSockAddr program
CGroupSockAddr
// LWTSeg6Local program
LWTSeg6Local
// LircMode2 program
LircMode2
// SkReuseport program
SkReuseport
// FlowDissector program
FlowDissector
// CGroupSysctl program
CGroupSysctl
// RawTracepointWritable program
RawTracepointWritable
// CGroupSockopt program
CGroupSockopt
// Tracing program
Tracing
StructOps
Extension
LSM
SkLookup
)
// AttachType of the eBPF program, needed to differentiate allowed context accesses in
@ -157,7 +143,7 @@ const (
// Will cause invalid argument (EINVAL) at program load time if set incorrectly.
type AttachType uint32
// AttachNone is an alias for AttachCGroupInetIngress for readability reasons
// AttachNone is an alias for AttachCGroupInetIngress for readability reasons.
const AttachNone AttachType = 0
const (
@ -190,7 +176,38 @@ const (
AttachModifyReturn
AttachLSMMac
AttachTraceIter
AttachCgroupInet4GetPeername
AttachCgroupInet6GetPeername
AttachCgroupInet4GetSockname
AttachCgroupInet6GetSockname
AttachXDPDevMap
AttachCgroupInetSockRelease
AttachXDPCPUMap
AttachSkLookup
AttachXDP
)
// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command
type AttachFlags uint32
// PinType determines whether a map is pinned into a BPFFS.
type PinType int
// Valid pin types.
//
// Mirrors enum libbpf_pin_type.
const (
PinNone PinType = iota
// Pin an object by using its name as the filename.
PinByName
)
// BatchOptions batch map operations options
//
// Mirrors libbpf struct bpf_map_batch_opts
// Currently BPF_F_FLAG is the only supported
// flag (for ElemFlags).
type BatchOptions struct {
ElemFlags uint64
Flags uint64
}

View file

@ -1,4 +1,4 @@
// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,AttachType"; DO NOT EDIT.
// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,AttachType,PinType"; DO NOT EDIT.
package ebpf
@ -77,11 +77,15 @@ func _() {
_ = x[RawTracepointWritable-24]
_ = x[CGroupSockopt-25]
_ = x[Tracing-26]
_ = x[StructOps-27]
_ = x[Extension-28]
_ = x[LSM-29]
_ = x[SkLookup-30]
}
const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracing"
const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookup"
var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265}
var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294}
func (i ProgramType) String() string {
if i >= ProgramType(len(_ProgramType_index)-1) {
@ -123,11 +127,20 @@ func _() {
_ = x[AttachModifyReturn-26]
_ = x[AttachLSMMac-27]
_ = x[AttachTraceIter-28]
_ = x[AttachCgroupInet4GetPeername-29]
_ = x[AttachCgroupInet6GetPeername-30]
_ = x[AttachCgroupInet4GetSockname-31]
_ = x[AttachCgroupInet6GetSockname-32]
_ = x[AttachXDPDevMap-33]
_ = x[AttachCgroupInetSockRelease-34]
_ = x[AttachXDPCPUMap-35]
_ = x[AttachSkLookup-36]
_ = x[AttachXDP-37]
}
const _AttachType_name = "AttachNoneAttachCGroupInetEgressAttachCGroupInetSockCreateAttachCGroupSockOpsAttachSkSKBStreamParserAttachSkSKBStreamVerdictAttachCGroupDeviceAttachSkMsgVerdictAttachCGroupInet4BindAttachCGroupInet6BindAttachCGroupInet4ConnectAttachCGroupInet6ConnectAttachCGroupInet4PostBindAttachCGroupInet6PostBindAttachCGroupUDP4SendmsgAttachCGroupUDP6SendmsgAttachLircMode2AttachFlowDissectorAttachCGroupSysctlAttachCGroupUDP4RecvmsgAttachCGroupUDP6RecvmsgAttachCGroupGetsockoptAttachCGroupSetsockoptAttachTraceRawTpAttachTraceFEntryAttachTraceFExitAttachModifyReturnAttachLSMMacAttachTraceIter"
const _AttachType_name = "AttachNoneAttachCGroupInetEgressAttachCGroupInetSockCreateAttachCGroupSockOpsAttachSkSKBStreamParserAttachSkSKBStreamVerdictAttachCGroupDeviceAttachSkMsgVerdictAttachCGroupInet4BindAttachCGroupInet6BindAttachCGroupInet4ConnectAttachCGroupInet6ConnectAttachCGroupInet4PostBindAttachCGroupInet6PostBindAttachCGroupUDP4SendmsgAttachCGroupUDP6SendmsgAttachLircMode2AttachFlowDissectorAttachCGroupSysctlAttachCGroupUDP4RecvmsgAttachCGroupUDP6RecvmsgAttachCGroupGetsockoptAttachCGroupSetsockoptAttachTraceRawTpAttachTraceFEntryAttachTraceFExitAttachModifyReturnAttachLSMMacAttachTraceIterAttachCgroupInet4GetPeernameAttachCgroupInet6GetPeernameAttachCgroupInet4GetSocknameAttachCgroupInet6GetSocknameAttachXDPDevMapAttachCgroupInetSockReleaseAttachXDPCPUMapAttachSkLookupAttachXDP"
var _AttachType_index = [...]uint16{0, 10, 32, 58, 77, 100, 124, 142, 160, 181, 202, 226, 250, 275, 300, 323, 346, 361, 380, 398, 421, 444, 466, 488, 504, 521, 537, 555, 567, 582}
var _AttachType_index = [...]uint16{0, 10, 32, 58, 77, 100, 124, 142, 160, 181, 202, 226, 250, 275, 300, 323, 346, 361, 380, 398, 421, 444, 466, 488, 504, 521, 537, 555, 567, 582, 610, 638, 666, 694, 709, 736, 751, 765, 774}
func (i AttachType) String() string {
if i >= AttachType(len(_AttachType_index)-1) {
@ -135,3 +148,21 @@ func (i AttachType) String() string {
}
return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]]
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[PinNone-0]
_ = x[PinByName-1]
}
const _PinType_name = "PinNonePinByName"
var _PinType_index = [...]uint8{0, 7, 16}
func (i PinType) String() string {
if i < 0 || i >= PinType(len(_PinType_index)-1) {
return "PinType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _PinType_name[_PinType_index[i]:_PinType_index[i+1]]
}

View file

@ -3,16 +3,16 @@ module github.com/containerd/cgroups
go 1.13
require (
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775
github.com/coreos/go-systemd/v22 v22.0.0
github.com/cilium/ebpf v0.4.0
github.com/coreos/go-systemd/v22 v22.1.0
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
github.com/docker/go-units v0.4.0
github.com/godbus/dbus/v5 v5.0.3
github.com/gogo/protobuf v1.3.1
github.com/gogo/protobuf v1.3.2
github.com/opencontainers/runtime-spec v1.0.2
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.6.0
github.com/stretchr/testify v1.2.2
github.com/sirupsen/logrus v1.7.0
github.com/stretchr/testify v1.6.1
github.com/urfave/cli v1.22.2
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
)

View file

@ -55,3 +55,7 @@ func (n *netclsController) Create(path string, resources *specs.LinuxResources)
}
return nil
}
func (n *netclsController) Update(path string, resources *specs.LinuxResources) error {
return n.Create(path, resources)
}

File diff suppressed because it is too large Load diff

View file

@ -18,6 +18,7 @@ package cgroups
import (
"fmt"
"os"
v1 "github.com/containerd/cgroups/stats/v1"
specs "github.com/opencontainers/runtime-spec/specs-go"
@ -46,7 +47,6 @@ const (
// available on most linux systems
func Subsystems() []Name {
n := []Name{
Hugetlb,
Freezer,
Pids,
NetCLS,
@ -59,9 +59,12 @@ func Subsystems() []Name {
Blkio,
Rdma,
}
if !isUserNS {
if !RunningInUserNS() {
n = append(n, Devices)
}
if _, err := os.Stat("/sys/kernel/mm/hugepages"); err == nil {
n = append(n, Hugetlb)
}
return n
}

View file

@ -36,7 +36,8 @@ import (
)
var (
isUserNS = runningInUserNS()
nsOnce sync.Once
inUserNS bool
checkMode sync.Once
cgMode CGMode
)
@ -81,33 +82,37 @@ func Mode() CGMode {
return cgMode
}
// runningInUserNS detects whether we are currently running in a user namespace.
// RunningInUserNS detects whether we are currently running in a user namespace.
// Copied from github.com/lxc/lxd/shared/util.go
func runningInUserNS() bool {
file, err := os.Open("/proc/self/uid_map")
if err != nil {
// This kernel-provided file only exists if user namespaces are supported
return false
}
defer file.Close()
func RunningInUserNS() bool {
nsOnce.Do(func() {
file, err := os.Open("/proc/self/uid_map")
if err != nil {
// This kernel-provided file only exists if user namespaces are supported
return
}
defer file.Close()
buf := bufio.NewReader(file)
l, _, err := buf.ReadLine()
if err != nil {
return false
}
buf := bufio.NewReader(file)
l, _, err := buf.ReadLine()
if err != nil {
return
}
line := string(l)
var a, b, c int64
fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
/*
* We assume we are in the initial user namespace if we have a full
* range - 4294967295 uids starting at uid 0.
*/
if a == 0 && b == 0 && c == 4294967295 {
return false
}
return true
line := string(l)
var a, b, c int64
fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
/*
* We assume we are in the initial user namespace if we have a full
* range - 4294967295 uids starting at uid 0.
*/
if a == 0 && b == 0 && c == 4294967295 {
return
}
inUserNS = true
})
return inUserNS
}
// defaults returns all known groups
@ -132,7 +137,7 @@ func defaults(root string) ([]Subsystem, error) {
}
// only add the devices cgroup if we are not in a user namespace
// because modifications are not allowed
if !isUserNS {
if !RunningInUserNS() {
s = append(s, NewDevices(root))
}
// add the hugetlb cgroup if error wasn't due to missing hugetlb

View file

@ -19,6 +19,7 @@ package v2
import (
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/link"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
@ -42,12 +43,23 @@ func LoadAttachCgroupDeviceFilter(insts asm.Instructions, license string, dirFD
if err != nil {
return nilCloser, err
}
if err := prog.Attach(dirFD, ebpf.AttachCGroupDevice, unix.BPF_F_ALLOW_MULTI); err != nil {
err = link.RawAttachProgram(link.RawAttachProgramOptions{
Target: dirFD,
Program: prog,
Attach: ebpf.AttachCGroupDevice,
Flags: unix.BPF_F_ALLOW_MULTI,
})
if err != nil {
return nilCloser, errors.Wrap(err, "failed to call BPF_PROG_ATTACH (BPF_CGROUP_DEVICE, BPF_F_ALLOW_MULTI)")
}
closer := func() error {
if err := prog.Detach(dirFD, ebpf.AttachCGroupDevice, unix.BPF_F_ALLOW_MULTI); err != nil {
return errors.Wrap(err, "failed to call BPF_PROG_DETACH (BPF_CGROUP_DEVICE, BPF_F_ALLOW_MULTI)")
err = link.RawDetachProgram(link.RawDetachProgramOptions{
Target: dirFD,
Program: prog,
Attach: ebpf.AttachCGroupDevice,
})
if err != nil {
return errors.Wrap(err, "failed to call BPF_PROG_DETACH (BPF_CGROUP_DEVICE)")
}
return nil
}

File diff suppressed because it is too large Load diff