inode.c 19 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8 9 10
/*
 * Minimal file system backend for holding eBPF maps and programs,
 * used by bpf(2) object pinning.
 *
 * Authors:
 *
 *	Daniel Borkmann <daniel@iogearbox.net>
 */

11
#include <linux/init.h>
12 13 14 15 16
#include <linux/magic.h>
#include <linux/major.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/fs.h>
17 18
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
19 20 21
#include <linux/kdev_t.h>
#include <linux/filter.h>
#include <linux/bpf.h>
22
#include <linux/bpf_trace.h>
23
#include <linux/kstrtox.h>
24
#include "preload/bpf_preload.h"
25 26 27 28 29

enum bpf_type {
	BPF_TYPE_UNSPEC	= 0,
	BPF_TYPE_PROG,
	BPF_TYPE_MAP,
30
	BPF_TYPE_LINK,
31 32 33 34 35 36
};

static void *bpf_any_get(void *raw, enum bpf_type type)
{
	switch (type) {
	case BPF_TYPE_PROG:
37
		bpf_prog_inc(raw);
38 39
		break;
	case BPF_TYPE_MAP:
40
		bpf_map_inc_with_uref(raw);
41
		break;
42 43 44
	case BPF_TYPE_LINK:
		bpf_link_inc(raw);
		break;
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
	default:
		WARN_ON_ONCE(1);
		break;
	}

	return raw;
}

static void bpf_any_put(void *raw, enum bpf_type type)
{
	switch (type) {
	case BPF_TYPE_PROG:
		bpf_prog_put(raw);
		break;
	case BPF_TYPE_MAP:
60
		bpf_map_put_with_uref(raw);
61
		break;
62 63 64
	case BPF_TYPE_LINK:
		bpf_link_put(raw);
		break;
65 66 67 68 69 70 71 72 73 74
	default:
		WARN_ON_ONCE(1);
		break;
	}
}

static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
{
	void *raw;

75
	raw = bpf_map_get_with_uref(ufd);
76 77 78 79 80 81 82
	if (!IS_ERR(raw)) {
		*type = BPF_TYPE_MAP;
		return raw;
	}

	raw = bpf_prog_get(ufd);
	if (!IS_ERR(raw)) {
83
		*type = BPF_TYPE_PROG;
84
		return raw;
85 86
	}

87 88 89 90 91 92 93
	raw = bpf_link_get_from_fd(ufd);
	if (!IS_ERR(raw)) {
		*type = BPF_TYPE_LINK;
		return raw;
	}

	return ERR_PTR(-EINVAL);
94 95 96 97 98 99
}

static const struct inode_operations bpf_dir_iops;

static const struct inode_operations bpf_prog_iops = { };
static const struct inode_operations bpf_map_iops  = { };
100
static const struct inode_operations bpf_link_iops  = { };
101

102 103 104
struct inode *bpf_get_inode(struct super_block *sb,
			    const struct inode *dir,
			    umode_t mode)
105 106 107 108 109 110
{
	struct inode *inode;

	switch (mode & S_IFMT) {
	case S_IFDIR:
	case S_IFREG:
111
	case S_IFLNK:
112 113 114 115 116 117 118 119 120 121
		break;
	default:
		return ERR_PTR(-EINVAL);
	}

	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOSPC);

	inode->i_ino = get_next_ino();
122
	simple_inode_init_ts(inode);
123

124
	inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
125 126 127 128 129 130 131 132 133 134 135

	return inode;
}

static int bpf_inode_type(const struct inode *inode, enum bpf_type *type)
{
	*type = BPF_TYPE_UNSPEC;
	if (inode->i_op == &bpf_prog_iops)
		*type = BPF_TYPE_PROG;
	else if (inode->i_op == &bpf_map_iops)
		*type = BPF_TYPE_MAP;
136 137
	else if (inode->i_op == &bpf_link_iops)
		*type = BPF_TYPE_LINK;
138 139 140 141 142 143
	else
		return -EACCES;

	return 0;
}

144 145 146 147 148 149
static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
				struct inode *dir)
{
	d_instantiate(dentry, inode);
	dget(dentry);

150
	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
151 152
}

153
static int bpf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
154
		     struct dentry *dentry, umode_t mode)
155 156 157 158 159 160 161 162 163 164 165 166 167
{
	struct inode *inode;

	inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR);
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	inode->i_op = &bpf_dir_iops;
	inode->i_fop = &simple_dir_operations;

	inc_nlink(inode);
	inc_nlink(dir);

168
	bpf_dentry_finalize(dentry, inode, dir);
169 170 171
	return 0;
}

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
struct map_iter {
	void *key;
	bool done;
};

static struct map_iter *map_iter(struct seq_file *m)
{
	return m->private;
}

static struct bpf_map *seq_file_to_map(struct seq_file *m)
{
	return file_inode(m->file)->i_private;
}

static void map_iter_free(struct map_iter *iter)
{
	if (iter) {
		kfree(iter->key);
		kfree(iter);
	}
}

static struct map_iter *map_iter_alloc(struct bpf_map *map)
{
	struct map_iter *iter;

	iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN);
	if (!iter)
		goto error;

	iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN);
	if (!iter->key)
		goto error;

	return iter;

error:
	map_iter_free(iter);
	return NULL;
}

static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
	struct bpf_map *map = seq_file_to_map(m);
	void *key = map_iter(m)->key;
218
	void *prev_key;
219

220
	(*pos)++;
221 222 223 224
	if (map_iter(m)->done)
		return NULL;

	if (unlikely(v == SEQ_START_TOKEN))
225 226 227
		prev_key = NULL;
	else
		prev_key = key;
228

229
	rcu_read_lock();
230
	if (map->ops->map_get_next_key(map, prev_key, key)) {
231
		map_iter(m)->done = true;
232
		key = NULL;
233
	}
234
	rcu_read_unlock();
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
	return key;
}

static void *map_seq_start(struct seq_file *m, loff_t *pos)
{
	if (map_iter(m)->done)
		return NULL;

	return *pos ? map_iter(m)->key : SEQ_START_TOKEN;
}

static void map_seq_stop(struct seq_file *m, void *v)
{
}

static int map_seq_show(struct seq_file *m, void *v)
{
	struct bpf_map *map = seq_file_to_map(m);
	void *key = map_iter(m)->key;

	if (unlikely(v == SEQ_START_TOKEN)) {
		seq_puts(m, "# WARNING!! The output is for debug purpose only\n");
		seq_puts(m, "# WARNING!! The output format will change\n");
	} else {
		map->ops->map_seq_show_elem(map, key, m);
	}

	return 0;
}

static const struct seq_operations bpffs_map_seq_ops = {
	.start	= map_seq_start,
	.next	= map_seq_next,
	.show	= map_seq_show,
	.stop	= map_seq_stop,
};

static int bpffs_map_open(struct inode *inode, struct file *file)
{
	struct bpf_map *map = inode->i_private;
	struct map_iter *iter;
	struct seq_file *m;
	int err;

	iter = map_iter_alloc(map);
	if (!iter)
		return -ENOMEM;

	err = seq_open(file, &bpffs_map_seq_ops);
	if (err) {
		map_iter_free(iter);
		return err;
	}

	m = file->private_data;
	m->private = iter;

	return 0;
}

static int bpffs_map_release(struct inode *inode, struct file *file)
{
	struct seq_file *m = file->private_data;

	map_iter_free(map_iter(m));

	return seq_release(inode, file);
}

/* bpffs_map_fops should only implement the basic
 * read operation for a BPF map.  The purpose is to
 * provide a simple user intuitive way to do
 * "cat bpffs/pathto/a-pinned-map".
 *
 * Other operations (e.g. write, lookup...) should be realized by
 * the userspace tools (e.g. bpftool) through the
 * BPF_OBJ_GET_INFO_BY_FD and the map's lookup/update
 * interface.
 */
static const struct file_operations bpffs_map_fops = {
	.open		= bpffs_map_open,
	.read		= seq_read,
	.release	= bpffs_map_release,
};

320 321 322 323 324 325 326 327 328
static int bpffs_obj_open(struct inode *inode, struct file *file)
{
	return -EIO;
}

static const struct file_operations bpffs_obj_fops = {
	.open		= bpffs_obj_open,
};

329
static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
330 331
			 const struct inode_operations *iops,
			 const struct file_operations *fops)
332
{
333 334
	struct inode *dir = dentry->d_parent->d_inode;
	struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode);
335 336 337 338
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	inode->i_op = iops;
339
	inode->i_fop = fops;
340
	inode->i_private = raw;
341

342
	bpf_dentry_finalize(dentry, inode, dir);
343 344 345
	return 0;
}

346
static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
347
{
348 349
	return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops,
			     &bpffs_obj_fops);
350
}
351

352 353
static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
{
354 355 356
	struct bpf_map *map = arg;

	return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
357 358
			     bpf_map_support_seq_show(map) ?
			     &bpffs_map_fops : &bpffs_obj_fops);
359 360
}

361 362
static int bpf_mklink(struct dentry *dentry, umode_t mode, void *arg)
{
363 364
	struct bpf_link *link = arg;

365
	return bpf_mkobj_ops(dentry, mode, arg, &bpf_link_iops,
366 367
			     bpf_link_is_iter(link) ?
			     &bpf_iter_fops : &bpffs_obj_fops);
368 369
}

370 371
static struct dentry *
bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
372
{
373
	/* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
374
	 * extensions. That allows popoulate_bpffs() create special files.
375
	 */
376 377
	if ((dir->i_mode & S_IALLUGO) &&
	    strchr(dentry->d_name.name, '.'))
378
		return ERR_PTR(-EPERM);
379

380
	return simple_lookup(dir, dentry, flags);
381 382
}

383
static int bpf_symlink(struct mnt_idmap *idmap, struct inode *dir,
384
		       struct dentry *dentry, const char *target)
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
{
	char *link = kstrdup(target, GFP_USER | __GFP_NOWARN);
	struct inode *inode;

	if (!link)
		return -ENOMEM;

	inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK);
	if (IS_ERR(inode)) {
		kfree(link);
		return PTR_ERR(inode);
	}

	inode->i_op = &simple_symlink_inode_operations;
	inode->i_link = link;

	bpf_dentry_finalize(dentry, inode, dir);
	return 0;
}

405
static const struct inode_operations bpf_dir_iops = {
406
	.lookup		= bpf_lookup,
407
	.mkdir		= bpf_mkdir,
408
	.symlink	= bpf_symlink,
409
	.rmdir		= simple_rmdir,
410 411
	.rename		= simple_rename,
	.link		= simple_link,
412 413 414
	.unlink		= simple_unlink,
};

415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
/* pin iterator link into bpffs */
static int bpf_iter_link_pin_kernel(struct dentry *parent,
				    const char *name, struct bpf_link *link)
{
	umode_t mode = S_IFREG | S_IRUSR;
	struct dentry *dentry;
	int ret;

	inode_lock(parent->d_inode);
	dentry = lookup_one_len(name, parent, strlen(name));
	if (IS_ERR(dentry)) {
		inode_unlock(parent->d_inode);
		return PTR_ERR(dentry);
	}
	ret = bpf_mkobj_ops(dentry, mode, link, &bpf_link_iops,
			    &bpf_iter_fops);
	dput(dentry);
	inode_unlock(parent->d_inode);
	return ret;
}

436
static int bpf_obj_do_pin(int path_fd, const char __user *pathname, void *raw,
437 438 439 440 441 442 443 444
			  enum bpf_type type)
{
	struct dentry *dentry;
	struct inode *dir;
	struct path path;
	umode_t mode;
	int ret;

445
	dentry = user_path_create(path_fd, pathname, &path, 0);
446 447 448 449 450 451 452 453 454
	if (IS_ERR(dentry))
		return PTR_ERR(dentry);

	dir = d_inode(path.dentry);
	if (dir->i_op != &bpf_dir_iops) {
		ret = -EPERM;
		goto out;
	}

455 456 457 458 459
	mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
	ret = security_path_mknod(&path, dentry, mode, 0);
	if (ret)
		goto out;

460 461 462 463 464 465 466
	switch (type) {
	case BPF_TYPE_PROG:
		ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw);
		break;
	case BPF_TYPE_MAP:
		ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
		break;
467 468 469
	case BPF_TYPE_LINK:
		ret = vfs_mkobj(dentry, mode, bpf_mklink, raw);
		break;
470 471 472
	default:
		ret = -EPERM;
	}
473 474 475 476 477
out:
	done_path_create(&path, dentry);
	return ret;
}

478
int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname)
479 480 481 482 483 484
{
	enum bpf_type type;
	void *raw;
	int ret;

	raw = bpf_fd_probe_obj(ufd, &type);
485 486
	if (IS_ERR(raw))
		return PTR_ERR(raw);
487

488
	ret = bpf_obj_do_pin(path_fd, pathname, raw, type);
489 490
	if (ret != 0)
		bpf_any_put(raw, type);
491

492 493 494
	return ret;
}

495
static void *bpf_obj_do_get(int path_fd, const char __user *pathname,
496
			    enum bpf_type *type, int flags)
497 498 499 500 501 502
{
	struct inode *inode;
	struct path path;
	void *raw;
	int ret;

503
	ret = user_path_at(path_fd, pathname, LOOKUP_FOLLOW, &path);
504 505 506 507
	if (ret)
		return ERR_PTR(ret);

	inode = d_backing_inode(path.dentry);
508
	ret = path_permission(&path, ACC_MODE(flags));
509 510 511 512 513 514 515 516
	if (ret)
		goto out;

	ret = bpf_inode_type(inode, type);
	if (ret)
		goto out;

	raw = bpf_any_get(inode->i_private, *type);
517 518
	if (!IS_ERR(raw))
		touch_atime(&path);
519 520 521 522 523 524 525 526

	path_put(&path);
	return raw;
out:
	path_put(&path);
	return ERR_PTR(ret);
}

527
int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags)
528 529
{
	enum bpf_type type = BPF_TYPE_UNSPEC;
530
	int f_flags;
531
	void *raw;
532
	int ret;
533

534 535 536 537
	f_flags = bpf_get_file_flag(flags);
	if (f_flags < 0)
		return f_flags;

538
	raw = bpf_obj_do_get(path_fd, pathname, &type, f_flags);
539 540
	if (IS_ERR(raw))
		return PTR_ERR(raw);
541 542

	if (type == BPF_TYPE_PROG)
543
		ret = bpf_prog_new_fd(raw);
544
	else if (type == BPF_TYPE_MAP)
545
		ret = bpf_map_new_fd(raw, f_flags);
546
	else if (type == BPF_TYPE_LINK)
547
		ret = (f_flags != O_RDWR) ? -EINVAL : bpf_link_new_fd(raw);
548
	else
549
		return -ENOENT;
550

551
	if (ret < 0)
552 553 554
		bpf_any_put(raw, type);
	return ret;
}
555 556 557 558

static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
{
	struct bpf_prog *prog;
559
	int ret = inode_permission(&nop_mnt_idmap, inode, MAY_READ);
560 561 562 563 564
	if (ret)
		return ERR_PTR(ret);

	if (inode->i_op == &bpf_map_iops)
		return ERR_PTR(-EINVAL);
565 566
	if (inode->i_op == &bpf_link_iops)
		return ERR_PTR(-EINVAL);
567 568 569 570 571 572 573 574 575 576 577 578
	if (inode->i_op != &bpf_prog_iops)
		return ERR_PTR(-EACCES);

	prog = inode->i_private;

	ret = security_bpf_prog(prog);
	if (ret < 0)
		return ERR_PTR(ret);

	if (!bpf_prog_get_ok(prog, &type, false))
		return ERR_PTR(-EINVAL);

579 580
	bpf_prog_inc(prog);
	return prog;
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
}

struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
{
	struct bpf_prog *prog;
	struct path path;
	int ret = kern_path(name, LOOKUP_FOLLOW, &path);
	if (ret)
		return ERR_PTR(ret);
	prog = __get_prog_inode(d_backing_inode(path.dentry), type);
	if (!IS_ERR(prog))
		touch_atime(&path);
	path_put(&path);
	return prog;
}
EXPORT_SYMBOL(bpf_prog_get_type_path);
597

598 599 600 601 602
/*
 * Display the mount options in /proc/mounts.
 */
static int bpf_show_options(struct seq_file *m, struct dentry *root)
{
603
	struct bpf_mount_opts *opts = root->d_sb->s_fs_info;
604
	umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;
605
	u64 mask;
606 607 608

	if (mode != S_IRWXUGO)
		seq_printf(m, ",mode=%o", mode);
609

610 611
	mask = (1ULL << __MAX_BPF_CMD) - 1;
	if ((opts->delegate_cmds & mask) == mask)
612 613 614 615
		seq_printf(m, ",delegate_cmds=any");
	else if (opts->delegate_cmds)
		seq_printf(m, ",delegate_cmds=0x%llx", opts->delegate_cmds);

616 617
	mask = (1ULL << __MAX_BPF_MAP_TYPE) - 1;
	if ((opts->delegate_maps & mask) == mask)
618 619 620 621 622 623 624 625 626 627 628 629 630
		seq_printf(m, ",delegate_maps=any");
	else if (opts->delegate_maps)
		seq_printf(m, ",delegate_maps=0x%llx", opts->delegate_maps);

	if (opts->delegate_progs == ~0ULL)
		seq_printf(m, ",delegate_progs=any");
	else if (opts->delegate_progs)
		seq_printf(m, ",delegate_progs=0x%llx", opts->delegate_progs);

	if (opts->delegate_attachs == ~0ULL)
		seq_printf(m, ",delegate_attachs=any");
	else if (opts->delegate_attachs)
		seq_printf(m, ",delegate_attachs=0x%llx", opts->delegate_attachs);
631 632 633
	return 0;
}

Al Viro's avatar
Al Viro committed
634
static void bpf_free_inode(struct inode *inode)
635 636 637 638 639 640 641 642 643 644
{
	enum bpf_type type;

	if (S_ISLNK(inode->i_mode))
		kfree(inode->i_link);
	if (!bpf_inode_type(inode, &type))
		bpf_any_put(inode->i_private, type);
	free_inode_nonrcu(inode);
}

645
const struct super_operations bpf_super_ops = {
646 647
	.statfs		= simple_statfs,
	.drop_inode	= generic_delete_inode,
648
	.show_options	= bpf_show_options,
Al Viro's avatar
Al Viro committed
649
	.free_inode	= bpf_free_inode,
650 651
};

652 653
enum {
	OPT_MODE,
654 655 656 657
	OPT_DELEGATE_CMDS,
	OPT_DELEGATE_MAPS,
	OPT_DELEGATE_PROGS,
	OPT_DELEGATE_ATTACHS,
658 659
};

660
static const struct fs_parameter_spec bpf_fs_parameters[] = {
661
	fsparam_u32oct	("mode",			OPT_MODE),
662 663 664 665
	fsparam_string	("delegate_cmds",		OPT_DELEGATE_CMDS),
	fsparam_string	("delegate_maps",		OPT_DELEGATE_MAPS),
	fsparam_string	("delegate_progs",		OPT_DELEGATE_PROGS),
	fsparam_string	("delegate_attachs",		OPT_DELEGATE_ATTACHS),
666 667 668 669
	{}
};

static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
670
{
671
	struct bpf_mount_opts *opts = fc->s_fs_info;
672
	struct fs_parse_result result;
673 674
	int opt, err;
	u64 msk;
675

676
	opt = fs_parse(fc, bpf_fs_parameters, param, &result);
677
	if (opt < 0) {
678 679 680 681
		/* We might like to report bad mount options here, but
		 * traditionally we've ignored all mount options, so we'd
		 * better continue to ignore non-existing options for bpf.
		 */
682 683 684 685 686 687 688 689 690 691 692
		if (opt == -ENOPARAM) {
			opt = vfs_parse_fs_param_source(fc, param);
			if (opt != -ENOPARAM)
				return opt;

			return 0;
		}

		if (opt < 0)
			return opt;
	}
693 694 695 696 697

	switch (opt) {
	case OPT_MODE:
		opts->mode = result.uint_32 & S_IALLUGO;
		break;
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
	case OPT_DELEGATE_CMDS:
	case OPT_DELEGATE_MAPS:
	case OPT_DELEGATE_PROGS:
	case OPT_DELEGATE_ATTACHS:
		if (strcmp(param->string, "any") == 0) {
			msk = ~0ULL;
		} else {
			err = kstrtou64(param->string, 0, &msk);
			if (err)
				return err;
		}
		/* Setting delegation mount options requires privileges */
		if (msk && !capable(CAP_SYS_ADMIN))
			return -EPERM;
		switch (opt) {
		case OPT_DELEGATE_CMDS: opts->delegate_cmds |= msk; break;
		case OPT_DELEGATE_MAPS: opts->delegate_maps |= msk; break;
		case OPT_DELEGATE_PROGS: opts->delegate_progs |= msk; break;
		case OPT_DELEGATE_ATTACHS: opts->delegate_attachs |= msk; break;
		default: return -EINVAL;
		}
		break;
720 721 722 723 724
	}

	return 0;
}

725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
struct bpf_preload_ops *bpf_preload_ops;
EXPORT_SYMBOL_GPL(bpf_preload_ops);

static bool bpf_preload_mod_get(void)
{
	/* If bpf_preload.ko wasn't loaded earlier then load it now.
	 * When bpf_preload is built into vmlinux the module's __init
	 * function will populate it.
	 */
	if (!bpf_preload_ops) {
		request_module("bpf_preload");
		if (!bpf_preload_ops)
			return false;
	}
	/* And grab the reference, so the module doesn't disappear while the
	 * kernel is interacting with the kernel module and its UMD.
	 */
	if (!try_module_get(bpf_preload_ops->owner)) {
		pr_err("bpf_preload module get failed.\n");
		return false;
	}
	return true;
}

static void bpf_preload_mod_put(void)
{
	if (bpf_preload_ops)
		/* now user can "rmmod bpf_preload" if necessary */
		module_put(bpf_preload_ops->owner);
}

static DEFINE_MUTEX(bpf_preload_lock);

static int populate_bpffs(struct dentry *parent)
{
	struct bpf_preload_info objs[BPF_PRELOAD_LINKS] = {};
	int err = 0, i;

	/* grab the mutex to make sure the kernel interactions with bpf_preload
764
	 * are serialized
765 766 767 768 769 770 771
	 */
	mutex_lock(&bpf_preload_lock);

	/* if bpf_preload.ko wasn't built into vmlinux then load it */
	if (!bpf_preload_mod_get())
		goto out;

772 773 774 775 776 777 778 779 780
	err = bpf_preload_ops->preload(objs);
	if (err)
		goto out_put;
	for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
		bpf_link_inc(objs[i].link);
		err = bpf_iter_link_pin_kernel(parent,
					       objs[i].link_name, objs[i].link);
		if (err) {
			bpf_link_put(objs[i].link);
781 782 783 784 785 786 787 788 789 790
			goto out_put;
		}
	}
out_put:
	bpf_preload_mod_put();
out:
	mutex_unlock(&bpf_preload_lock);
	return err;
}

791
static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
792
{
793
	static const struct tree_descr bpf_rfiles[] = { { "" } };
794
	struct bpf_mount_opts *opts = sb->s_fs_info;
795 796 797
	struct inode *inode;
	int ret;

798 799 800 801
	/* Mounting an instance of BPF FS requires privileges */
	if (fc->user_ns != &init_user_ns && !capable(CAP_SYS_ADMIN))
		return -EPERM;

802 803 804 805 806 807 808 809 810
	ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
	if (ret)
		return ret;

	sb->s_op = &bpf_super_ops;

	inode = sb->s_root->d_inode;
	inode->i_op = &bpf_dir_iops;
	inode->i_mode &= ~S_IALLUGO;
811
	populate_bpffs(sb->s_root);
812
	inode->i_mode |= S_ISVTX | opts->mode;
813 814 815
	return 0;
}

816 817 818 819 820 821
static int bpf_get_tree(struct fs_context *fc)
{
	return get_tree_nodev(fc, bpf_fill_super);
}

static void bpf_free_fc(struct fs_context *fc)
822
{
823
	kfree(fc->s_fs_info);
824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
}

static const struct fs_context_operations bpf_context_ops = {
	.free		= bpf_free_fc,
	.parse_param	= bpf_parse_param,
	.get_tree	= bpf_get_tree,
};

/*
 * Set up the filesystem mount context.
 */
static int bpf_init_fs_context(struct fs_context *fc)
{
	struct bpf_mount_opts *opts;

	opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL);
	if (!opts)
		return -ENOMEM;

	opts->mode = S_IRWXUGO;

845 846 847 848 849 850 851
	/* start out with no BPF token delegation enabled */
	opts->delegate_cmds = 0;
	opts->delegate_maps = 0;
	opts->delegate_progs = 0;
	opts->delegate_attachs = 0;

	fc->s_fs_info = opts;
852 853
	fc->ops = &bpf_context_ops;
	return 0;
854 855
}

856 857 858 859 860 861 862 863
static void bpf_kill_super(struct super_block *sb)
{
	struct bpf_mount_opts *opts = sb->s_fs_info;

	kill_litter_super(sb);
	kfree(opts);
}

864 865 866
static struct file_system_type bpf_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "bpf",
867
	.init_fs_context = bpf_init_fs_context,
868
	.parameters	= bpf_fs_parameters,
869 870
	.kill_sb	= bpf_kill_super,
	.fs_flags	= FS_USERNS_MOUNT,
871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
};

static int __init bpf_init(void)
{
	int ret;

	ret = sysfs_create_mount_point(fs_kobj, "bpf");
	if (ret)
		return ret;

	ret = register_filesystem(&bpf_fs_type);
	if (ret)
		sysfs_remove_mount_point(fs_kobj, "bpf");

	return ret;
}
fs_initcall(bpf_init);