xfrm_policy.c 25.2 KB
Newer Older
1 2
/* 
 * xfrm_policy.c
3
 *
4 5 6 7 8 9 10 11
 * Changes:
 *	Mitsuru KANDA @USAGI
 * 	Kazunori MIYAZAWA @USAGI
 * 	Kunihiro Ishiguro
 * 		IPv6 support
 * 	Kazunori MIYAZAWA @USAGI
 * 	YOSHIFUJI Hideaki
 * 		Split up af-specific portion
12
 *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
13 14 15
 * 	
 */

16
#include <linux/config.h>
17 18 19
#include <net/xfrm.h>
#include <net/ip.h>

20 21
DECLARE_MUTEX(xfrm_cfg_sem);

22 23
static rwlock_t xfrm_policy_lock = RW_LOCK_UNLOCKED;

24
struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];
25

26 27
static rwlock_t xfrm_policy_afinfo_lock = RW_LOCK_UNLOCKED;
static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
28

29
kmem_cache_t *xfrm_dst_cache;
30

31
int xfrm_register_type(struct xfrm_type *type, unsigned short family)
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
32
{
33 34
	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
	struct xfrm_type_map *typemap;
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
35 36
	int err = 0;

37 38 39 40 41 42 43
	if (unlikely(afinfo == NULL))
		return -EAFNOSUPPORT;
	typemap = afinfo->type_map;

	write_lock(&typemap->lock);
	if (likely(typemap->map[type->proto] == NULL))
		typemap->map[type->proto] = type;
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
44 45
	else
		err = -EEXIST;
46 47
	write_unlock(&typemap->lock);
	xfrm_policy_put_afinfo(afinfo);
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
48 49 50
	return err;
}

51
int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
52
{
53 54
	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
	struct xfrm_type_map *typemap;
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
55 56
	int err = 0;

57 58 59 60 61 62
	if (unlikely(afinfo == NULL))
		return -EAFNOSUPPORT;
	typemap = afinfo->type_map;

	write_lock(&typemap->lock);
	if (unlikely(typemap->map[type->proto] != type))
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
63 64
		err = -ENOENT;
	else
65 66 67
		typemap->map[type->proto] = NULL;
	write_unlock(&typemap->lock);
	xfrm_policy_put_afinfo(afinfo);
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
68 69 70
	return err;
}

71
struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
72
{
73 74
	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
	struct xfrm_type_map *typemap;
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
75 76
	struct xfrm_type *type;

77 78 79 80 81 82 83
	if (unlikely(afinfo == NULL))
		return NULL;
	typemap = afinfo->type_map;

	read_lock(&typemap->lock);
	type = typemap->map[proto];
	if (unlikely(type && !try_module_get(type->owner)))
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
84
		type = NULL;
85 86
	read_unlock(&typemap->lock);
	xfrm_policy_put_afinfo(afinfo);
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
87 88 89
	return type;
}

90 91
int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, 
		    unsigned short family)
92
{
93
	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
94 95
	int err = 0;

96 97
	if (unlikely(afinfo == NULL))
		return -EAFNOSUPPORT;
98

99 100
	if (likely(afinfo->dst_lookup != NULL))
		err = afinfo->dst_lookup(dst, fl);
101 102
	else
		err = -EINVAL;
103
	xfrm_policy_put_afinfo(afinfo);
104 105 106
	return err;
}

Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
107 108
void xfrm_put_type(struct xfrm_type *type)
{
109
	module_put(type->owner);
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
110 111
}

112 113 114 115 116 117 118 119 120 121 122 123 124
static inline unsigned long make_jiffies(long secs)
{
	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
		return MAX_SCHEDULE_TIMEOUT-1;
	else
	        return secs*HZ;
}

static void xfrm_policy_timer(unsigned long data)
{
	struct xfrm_policy *xp = (struct xfrm_policy*)data;
	unsigned long now = (unsigned long)xtime.tv_sec;
	long next = LONG_MAX;
125
	u32 index;
126 127 128 129 130 131 132 133 134 135 136 137 138 139

	if (xp->dead)
		goto out;

	if (xp->lft.hard_add_expires_seconds) {
		long tmo = xp->lft.hard_add_expires_seconds +
			xp->curlft.add_time - now;
		if (tmo <= 0)
			goto expired;
		if (tmo < next)
			next = tmo;
	}
	if (next != LONG_MAX &&
	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
James Morris's avatar
James Morris committed
140
		xfrm_pol_hold(xp);
141 142 143 144 145 146

out:
	xfrm_pol_put(xp);
	return;

expired:
147
	index = xp->index;
148 149 150
	xfrm_pol_put(xp);

	/* Not 100% correct. id can be recycled in theory */
151
	xp = xfrm_policy_byid(0, index, 1);
152 153 154 155 156 157
	if (xp) {
		xfrm_policy_kill(xp);
		xfrm_pol_put(xp);
	}
}

158 159 160 161 162

/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
 * SPD calls.
 */

Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
163
struct xfrm_policy *xfrm_policy_alloc(int gfp)
164 165 166
{
	struct xfrm_policy *policy;

Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
167
	policy = kmalloc(sizeof(struct xfrm_policy), gfp);
168 169 170 171 172

	if (policy) {
		memset(policy, 0, sizeof(struct xfrm_policy));
		atomic_set(&policy->refcnt, 1);
		policy->lock = RW_LOCK_UNLOCKED;
173 174 175
		init_timer(&policy->timer);
		policy->timer.data = (unsigned long)policy;
		policy->timer.function = xfrm_policy_timer;
176 177 178 179 180 181 182 183 184 185 186 187 188 189
	}
	return policy;
}

/* Destroy xfrm_policy: descendant resources must be released to this moment. */

void __xfrm_policy_destroy(struct xfrm_policy *policy)
{
	if (!policy->dead)
		BUG();

	if (policy->bundles)
		BUG();

190 191 192
	if (del_timer(&policy->timer))
		BUG();

193 194 195 196 197 198 199 200 201 202 203
	kfree(policy);
}

/* Rule must be locked. Release descentant resources, announce
 * entry dead. The rule must be unlinked from lists to the moment.
 */

void xfrm_policy_kill(struct xfrm_policy *policy)
{
	struct dst_entry *dst;

Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
204 205 206 207
	write_lock_bh(&policy->lock);
	if (policy->dead)
		goto out;

208 209 210 211 212 213
	policy->dead = 1;

	while ((dst = policy->bundles) != NULL) {
		policy->bundles = dst->next;
		dst_free(dst);
	}
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
214

215 216 217
	if (del_timer(&policy->timer))
		atomic_dec(&policy->refcnt);

Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
218 219 220 221
out:
	write_unlock_bh(&policy->lock);
}

222 223 224 225 226 227
/* Generate new index... KAME seems to generate them ordered by cost
 * of an absolute inpredictability of ordering of rules. This will not pass. */
static u32 xfrm_gen_index(int dir)
{
	u32 idx;
	struct xfrm_policy *p;
228
	static u32 idx_generator;
229 230

	for (;;) {
231 232 233 234
		idx = (idx_generator | dir);
		idx_generator += 8;
		if (idx == 0)
			idx = 8;
235 236 237 238 239 240 241 242 243
		for (p = xfrm_policy_list[dir]; p; p = p->next) {
			if (p->index == idx)
				break;
		}
		if (!p)
			return idx;
	}
}

Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
244 245 246
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
{
	struct xfrm_policy *pol, **p;
247 248
	struct xfrm_policy *delpol = NULL;
	struct xfrm_policy **newpos = NULL;
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
249 250 251

	write_lock_bh(&xfrm_policy_lock);
	for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) {
252
		if (!delpol && memcmp(&policy->selector, &pol->selector, sizeof(pol->selector)) == 0) {
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
253 254 255 256
			if (excl) {
				write_unlock_bh(&xfrm_policy_lock);
				return -EEXIST;
			}
257 258 259 260 261 262 263 264 265
			*p = pol->next;
			delpol = pol;
			if (policy->priority > pol->priority)
				continue;
		} else if (policy->priority >= pol->priority)
			continue;
		if (!newpos)
			newpos = p;
		if (delpol)
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
266 267
			break;
	}
268 269
	if (newpos)
		p = newpos;
James Morris's avatar
James Morris committed
270
	xfrm_pol_hold(policy);
271
	policy->next = *p;
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
272
	*p = policy;
273
	atomic_inc(&flow_cache_genid);
274
	policy->index = delpol ? delpol->index : xfrm_gen_index(dir);
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
275 276
	policy->curlft.add_time = (unsigned long)xtime.tv_sec;
	policy->curlft.use_time = 0;
277 278
	if (policy->lft.hard_add_expires_seconds &&
	    !mod_timer(&policy->timer, jiffies + HZ))
James Morris's avatar
James Morris committed
279
		xfrm_pol_hold(policy);
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
280 281
	write_unlock_bh(&xfrm_policy_lock);

282 283 284 285
	if (delpol) {
		atomic_dec(&delpol->refcnt);
		xfrm_policy_kill(delpol);
		xfrm_pol_put(delpol);
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
	}
	return 0;
}

struct xfrm_policy *xfrm_policy_delete(int dir, struct xfrm_selector *sel)
{
	struct xfrm_policy *pol, **p;

	write_lock_bh(&xfrm_policy_lock);
	for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) {
		if (memcmp(sel, &pol->selector, sizeof(*sel)) == 0) {
			*p = pol->next;
			break;
		}
	}
	if (pol)
302
		atomic_inc(&flow_cache_genid);
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
303 304 305 306 307 308 309 310 311
	write_unlock_bh(&xfrm_policy_lock);
	return pol;
}

struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete)
{
	struct xfrm_policy *pol, **p;

	write_lock_bh(&xfrm_policy_lock);
312
	for (p = &xfrm_policy_list[id & 7]; (pol=*p)!=NULL; p = &pol->next) {
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
313 314 315 316 317 318 319 320
		if (pol->index == id) {
			if (delete)
				*p = pol->next;
			break;
		}
	}
	if (pol) {
		if (delete)
321
			atomic_inc(&flow_cache_genid);
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
322
		else
James Morris's avatar
James Morris committed
323
			xfrm_pol_hold(pol);
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
324 325 326 327 328
	}
	write_unlock_bh(&xfrm_policy_lock);
	return pol;
}

329
void xfrm_policy_flush(void)
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
{
	struct xfrm_policy *xp;
	int dir;

	write_lock_bh(&xfrm_policy_lock);
	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
		while ((xp = xfrm_policy_list[dir]) != NULL) {
			xfrm_policy_list[dir] = xp->next;
			write_unlock_bh(&xfrm_policy_lock);

			xfrm_policy_kill(xp);
			xfrm_pol_put(xp);

			write_lock_bh(&xfrm_policy_lock);
		}
	}
346
	atomic_inc(&flow_cache_genid);
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
347 348 349 350 351 352 353 354 355 356 357
	write_unlock_bh(&xfrm_policy_lock);
}

int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*),
		     void *data)
{
	struct xfrm_policy *xp;
	int dir;
	int count = 0;
	int error = 0;

358
	read_lock_bh(&xfrm_policy_lock);
359
	for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
360 361 362 363 364 365 366 367 368
		for (xp = xfrm_policy_list[dir]; xp; xp = xp->next)
			count++;
	}

	if (count == 0) {
		error = -ENOENT;
		goto out;
	}

369
	for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
370
		for (xp = xfrm_policy_list[dir]; xp; xp = xp->next) {
371
			error = func(xp, dir%XFRM_POLICY_MAX, --count, data);
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
372 373 374 375 376 377
			if (error)
				goto out;
		}
	}

out:
378
	read_unlock_bh(&xfrm_policy_lock);
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
379
	return error;
380 381 382 383 384
}


/* Find policy to apply to this flow. */

385 386
void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
			void **objp, atomic_t **obj_refp)
387 388 389
{
	struct xfrm_policy *pol;

390
	read_lock_bh(&xfrm_policy_lock);
391 392
	for (pol = xfrm_policy_list[dir]; pol; pol = pol->next) {
		struct xfrm_selector *sel = &pol->selector;
393 394 395 396
		int match;

		if (pol->family != family)
			continue;
397

398
		match = xfrm_selector_match(sel, fl, family);
399
		if (match) {
James Morris's avatar
James Morris committed
400
			xfrm_pol_hold(pol);
401 402 403
			break;
		}
	}
404
	read_unlock_bh(&xfrm_policy_lock);
405 406
	if ((*objp = (void *) pol) != NULL)
		*obj_refp = &pol->refcnt;
407 408
}

409 410 411 412
struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
{
	struct xfrm_policy *pol;

413
	read_lock_bh(&xfrm_policy_lock);
414
	if ((pol = sk->policy[dir]) != NULL) {
415 416
		int match;

417
		match = xfrm_selector_match(&pol->selector, fl, sk->family);
418
		if (match)
James Morris's avatar
James Morris committed
419
			xfrm_pol_hold(pol);
420 421
		else
			pol = NULL;
422
	}
423
	read_unlock_bh(&xfrm_policy_lock);
424 425 426
	return pol;
}

427 428 429 430
void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
{
	pol->next = xfrm_policy_list[XFRM_POLICY_MAX+dir];
	xfrm_policy_list[XFRM_POLICY_MAX+dir] = pol;
James Morris's avatar
James Morris committed
431
	xfrm_pol_hold(pol);
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
}

void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
{
	struct xfrm_policy **polp;

	for (polp = &xfrm_policy_list[XFRM_POLICY_MAX+dir];
	     *polp != NULL; polp = &(*polp)->next) {
		if (*polp == pol) {
			*polp = pol->next;
			atomic_dec(&pol->refcnt);
			return;
		}
	}
}

448 449 450 451 452 453 454
int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
{
	struct xfrm_policy *old_pol;

	write_lock_bh(&xfrm_policy_lock);
	old_pol = sk->policy[dir];
	sk->policy[dir] = pol;
455 456 457 458 459 460 461
	if (pol) {
		pol->curlft.add_time = (unsigned long)xtime.tv_sec;
		pol->index = xfrm_gen_index(XFRM_POLICY_MAX+dir);
		xfrm_sk_policy_link(pol, dir);
	}
	if (old_pol)
		xfrm_sk_policy_unlink(old_pol, dir);
462 463 464 465 466 467 468 469 470
	write_unlock_bh(&xfrm_policy_lock);

	if (old_pol) {
		xfrm_policy_kill(old_pol);
		xfrm_pol_put(old_pol);
	}
	return 0;
}

471
static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
472 473 474 475 476 477 478 479 480 481
{
	struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);

	if (newp) {
		newp->selector = old->selector;
		newp->lft = old->lft;
		newp->curlft = old->curlft;
		newp->action = old->action;
		newp->flags = old->flags;
		newp->xfrm_nr = old->xfrm_nr;
482
		newp->index = old->index;
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
483 484
		memcpy(newp->xfrm_vec, old->xfrm_vec,
		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
485 486 487
		write_lock_bh(&xfrm_policy_lock);
		xfrm_sk_policy_link(newp, dir);
		write_unlock_bh(&xfrm_policy_lock);
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
488 489 490 491 492 493 494 495 496 497 498
	}
	return newp;
}

int __xfrm_sk_clone_policy(struct sock *sk)
{
	struct xfrm_policy *p0, *p1;
	p0 = sk->policy[0];
	p1 = sk->policy[1];
	sk->policy[0] = NULL;
	sk->policy[1] = NULL;
499
	if (p0 && (sk->policy[0] = clone_policy(p0, 0)) == NULL)
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
500
		return -ENOMEM;
501
	if (p1 && (sk->policy[1] = clone_policy(p1, 1)) == NULL)
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
502 503 504 505
		return -ENOMEM;
	return 0;
}

506
void __xfrm_sk_free_policy(struct xfrm_policy *pol, int dir)
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
507
{
508 509 510 511
	write_lock_bh(&xfrm_policy_lock);
	xfrm_sk_policy_unlink(pol, dir);
	write_unlock_bh(&xfrm_policy_lock);

Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
512 513 514 515
	xfrm_policy_kill(pol);
	xfrm_pol_put(pol);
}

516 517 518
/* Resolve list of templates for the flow, given policy. */

static int
519 520 521
xfrm_tmpl_resolve(struct xfrm_policy *policy, struct flowi *fl,
		  struct xfrm_state **xfrm,
		  unsigned short family)
522
{
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
523
	int nx;
524
	int i, error;
525 526
	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
527

Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
528 529
	for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
		struct xfrm_state *x;
530 531
		xfrm_address_t *remote = daddr;
		xfrm_address_t *local  = saddr;
532 533 534
		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];

		if (tmpl->mode) {
535 536
			remote = &tmpl->id.daddr;
			local = &tmpl->saddr;
537 538
		}

539
		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563

		if (x && x->km.state == XFRM_STATE_VALID) {
			xfrm[nx++] = x;
			daddr = remote;
			saddr = local;
			continue;
		}
		if (x) {
			error = (x->km.state == XFRM_STATE_ERROR ?
				 -EINVAL : -EAGAIN);
			xfrm_state_put(x);
		}

		if (!tmpl->optional)
			goto fail;
	}
	return nx;

fail:
	for (nx--; nx>=0; nx--)
		xfrm_state_put(xfrm[nx]);
	return error;
}

564 565 566 567
/* Check that the bundle accepts the flow and its components are
 * still valid.
 */

568 569
static struct dst_entry *
xfrm_find_bundle(struct flowi *fl, struct rtable *rt, struct xfrm_policy *policy, unsigned short family)
570
{
571 572 573 574 575 576 577
	struct dst_entry *x;
	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
	if (unlikely(afinfo == NULL))
		return ERR_PTR(-EINVAL);
	x = afinfo->find_bundle(fl, rt, policy);
	xfrm_policy_put_afinfo(afinfo);
	return x;
578 579 580 581 582 583
}

/* Allocate chain of dst_entry's, attach known xfrm's, calculate
 * all the metrics... Shortly, bundle a bundle.
 */

584
static int
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
585
xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
586 587
		   struct flowi *fl, struct dst_entry **dst_p,
		   unsigned short family)
588 589
{
	int err;
590 591 592 593 594
	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
	if (unlikely(afinfo == NULL))
		return -EINVAL;
	err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);
	xfrm_policy_put_afinfo(afinfo);
595 596 597
	return err;
}

598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
static inline int policy_to_flow_dir(int dir)
{
	if (XFRM_POLICY_IN == FLOW_DIR_IN &&
	    XFRM_POLICY_OUT == FLOW_DIR_OUT &&
	    XFRM_POLICY_FWD == FLOW_DIR_FWD)
		return dir;
	switch (dir) {
	default:
	case XFRM_POLICY_IN:
		return FLOW_DIR_IN;
	case XFRM_POLICY_OUT:
		return FLOW_DIR_OUT;
	case XFRM_POLICY_FWD:
		return FLOW_DIR_FWD;
	};
}

615 616 617 618 619 620 621 622 623 624 625 626
/* Main function: finds/creates a bundle for given flow.
 *
 * At the moment we eat a raw IP route. Mostly to speed up lookups
 * on interfaces with disabled IPsec.
 */
int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
		struct sock *sk, int flags)
{
	struct xfrm_policy *policy;
	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
	struct rtable *rt = (struct rtable*)*dst_p;
	struct dst_entry *dst;
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
627
	int nx = 0;
628 629
	int err;
	u32 genid;
630
	u16 family = (*dst_p)->ops->family;
631

632 633 634 635 636 637 638 639 640
	switch (family) {
	case AF_INET:
		if (!fl->fl4_src)
			fl->fl4_src = rt->rt_src;
		if (!fl->fl4_dst)
			fl->fl4_dst = rt->rt_dst;
	case AF_INET6:
		/* Still not clear... */
	default:
641
		/* nothing */;
642 643
	}

644
restart:
645
	genid = atomic_read(&flow_cache_genid);
646 647 648 649 650 651 652 653 654
	policy = NULL;
	if (sk && sk->policy[1])
		policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);

	if (!policy) {
		/* To accelerate a bit...  */
		if ((rt->u.dst.flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT])
			return 0;

655 656 657
		policy = flow_cache_lookup(fl, family,
					   policy_to_flow_dir(XFRM_POLICY_OUT),
					   xfrm_policy_lookup);
658
	}
659

660 661 662
	if (!policy)
		return 0;

663
	policy->curlft.use_time = (unsigned long)xtime.tv_sec;
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
664

665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
	switch (policy->action) {
	case XFRM_POLICY_BLOCK:
		/* Prohibit the flow */
		xfrm_pol_put(policy);
		return -EPERM;

	case XFRM_POLICY_ALLOW:
		if (policy->xfrm_nr == 0) {
			/* Flow passes not transformed. */
			xfrm_pol_put(policy);
			return 0;
		}

		/* Try to find matching bundle.
		 *
		 * LATER: help from flow cache. It is optional, this
		 * is required only for output policy.
		 */
683 684 685 686
		dst = xfrm_find_bundle(fl, rt, policy, family);
		if (IS_ERR(dst)) {
			xfrm_pol_put(policy);
			return PTR_ERR(dst);
687 688 689 690 691
		}

		if (dst)
			break;

692 693
		nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);

Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
694 695
		if (unlikely(nx<0)) {
			err = nx;
696 697 698 699 700 701 702
			if (err == -EAGAIN) {
				struct task_struct *tsk = current;
				DECLARE_WAITQUEUE(wait, tsk);
				if (!flags)
					goto error;

				__set_task_state(tsk, TASK_INTERRUPTIBLE);
703
				add_wait_queue(&km_waitq, &wait);
704
				err = xfrm_tmpl_resolve(policy, fl, xfrm, family);
705 706 707
				if (err == -EAGAIN)
					schedule();
				__set_task_state(tsk, TASK_RUNNING);
708
				remove_wait_queue(&km_waitq, &wait);
709 710 711 712 713 714

				if (err == -EAGAIN && signal_pending(current)) {
					err = -ERESTART;
					goto error;
				}
				if (err == -EAGAIN ||
715
				    genid != atomic_read(&flow_cache_genid))
716 717 718 719
					goto restart;
			}
			if (err)
				goto error;
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
720 721 722 723
		} else if (nx == 0) {
			/* Flow passes not transformed. */
			xfrm_pol_put(policy);
			return 0;
724 725 726
		}

		dst = &rt->u.dst;
727 728
		err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);

729 730
		if (unlikely(err)) {
			int i;
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
731
			for (i=0; i<nx; i++)
732 733 734 735 736 737 738 739 740 741 742 743 744
				xfrm_state_put(xfrm[i]);
			goto error;
		}

		write_lock_bh(&policy->lock);
		if (unlikely(policy->dead)) {
			/* Wow! While we worked on resolving, this
			 * policy has gone. Retry. It is not paranoia,
			 * we just cannot enlist new bundle to dead object.
			 */
			write_unlock_bh(&policy->lock);

			xfrm_pol_put(policy);
745
			if (dst)
746 747 748 749 750
				dst_free(dst);
			goto restart;
		}
		dst->next = policy->bundles;
		policy->bundles = dst;
751
		dst_hold(dst);
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772
		write_unlock_bh(&policy->lock);
	}
	*dst_p = dst;
	ip_rt_put(rt);
	xfrm_pol_put(policy);
	return 0;

error:
	ip_rt_put(rt);
	xfrm_pol_put(policy);
	*dst_p = NULL;
	return err;
}

/* When skb is transformed back to its "native" form, we have to
 * check policy restrictions. At the moment we make this in maximally
 * stupid way. Shame on me. :-) Of course, connected sockets must
 * have policy cached at them.
 */

static inline int
773 774
xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x, 
	      unsigned short family)
775 776 777 778 779
{
	return	x->id.proto == tmpl->id.proto &&
		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
		x->props.mode == tmpl->mode &&
		(tmpl->aalgos & (1<<x->props.aalgo)) &&
780
		!(x->props.mode && xfrm_state_addr_cmp(tmpl, x, family));
781 782 783
}

static inline int
784 785
xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int idx,
	       unsigned short family)
786 787
{
	for (; idx < sp->len; idx++) {
788
		if (xfrm_state_ok(tmpl, sp->x[idx].xvec, family))
789 790 791 792 793
			return ++idx;
	}
	return -1;
}

794 795
static int
_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
796
{
797
	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
798

799 800
	if (unlikely(afinfo == NULL))
		return -EAFNOSUPPORT;
801

802 803 804
	afinfo->decode_session(skb, fl);
	xfrm_policy_put_afinfo(afinfo);
	return 0;
805 806 807 808
}

int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 
			unsigned short family)
809 810 811 812
{
	struct xfrm_policy *pol;
	struct flowi fl;

813
	if (_decode_session(skb, &fl, family) < 0)
814
		return 0;
815 816 817 818

	/* First, check used SA against their selectors. */
	if (skb->sp) {
		int i;
819

820
		for (i=skb->sp->len-1; i>=0; i--) {
821 822 823 824 825 826 827 828 829
		  struct sec_decap_state *xvec = &(skb->sp->x[i]);
			if (!xfrm_selector_match(&xvec->xvec->sel, &fl, family))
				return 0;

			/* If there is a post_input processor, try running it */
			if (xvec->xvec->type->post_input &&
			    (xvec->xvec->type->post_input)(xvec->xvec,
							   &(xvec->decap),
							   skb) != 0)
830 831 832 833
				return 0;
		}
	}

834 835
	pol = NULL;
	if (sk && sk->policy[dir])
836
		pol = xfrm_sk_policy_lookup(sk, dir, &fl);
837 838

	if (!pol)
839 840 841
		pol = flow_cache_lookup(&fl, family,
					policy_to_flow_dir(dir),
					xfrm_policy_lookup);
842 843 844 845

	if (!pol)
		return 1;

846
	pol->curlft.use_time = (unsigned long)xtime.tv_sec;
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
847

848 849 850
	if (pol->action == XFRM_POLICY_ALLOW) {
		if (pol->xfrm_nr != 0) {
			struct sec_path *sp;
851
			static struct sec_path dummy;
852 853 854
			int i, k;

			if ((sp = skb->sp) == NULL)
855
				sp = &dummy;
856 857 858 859 860 861 862

			/* For each tmpl search corresponding xfrm.
			 * Order is _important_. Later we will implement
			 * some barriers, but at the moment barriers
			 * are implied between each two transformations.
			 */
			for (i = pol->xfrm_nr-1, k = 0; i >= 0; i--) {
863 864
				if (pol->xfrm_vec[i].optional)
					continue;
865
				k = xfrm_policy_ok(pol->xfrm_vec+i, sp, k, family);
866 867 868 869 870 871 872 873 874 875 876 877 878
				if (k < 0)
					goto reject;
			}
		}
		xfrm_pol_put(pol);
		return 1;
	}

reject:
	xfrm_pol_put(pol);
	return 0;
}

879
int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
880 881 882
{
	struct flowi fl;

883
	if (_decode_session(skb, &fl, family) < 0)
884
		return 0;
885 886 887 888

	return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
}

Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
889 890
/* Optimize later using cookies and generation ids. */

891
static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
892
{
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
893 894 895 896 897 898 899 900
	struct dst_entry *child = dst;

	while (child) {
		if (child->obsolete > 0 ||
		    (child->xfrm && child->xfrm->km.state != XFRM_STATE_VALID)) {
			dst_release(dst);
			return NULL;
		}
901
		child = child->child;
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
902 903 904
	}

	return dst;
905 906
}

907
static void xfrm_dst_destroy(struct dst_entry *dst)
908 909 910 911 912
{
	xfrm_state_put(dst->xfrm);
	dst->xfrm = NULL;
}

913
static void xfrm_link_failure(struct sk_buff *skb)
914 915 916 917 918
{
	/* Impossible. Such dst must be popped before reaches point of failure. */
	return;
}

919
static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
920 921 922 923 924 925 926 927 928 929
{
	if (dst) {
		if (dst->obsolete) {
			dst_release(dst);
			dst = NULL;
		}
	}
	return dst;
}

930
static void __xfrm_garbage_collect(void)
931 932 933 934 935 936
{
	int i;
	struct xfrm_policy *pol;
	struct dst_entry *dst, **dstp, *gc_list = NULL;

	read_lock_bh(&xfrm_policy_lock);
937
	for (i=0; i<2*XFRM_POLICY_MAX; i++) {
938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
		for (pol = xfrm_policy_list[i]; pol; pol = pol->next) {
			write_lock(&pol->lock);
			dstp = &pol->bundles;
			while ((dst=*dstp) != NULL) {
				if (atomic_read(&dst->__refcnt) == 0) {
					*dstp = dst->next;
					dst->next = gc_list;
					gc_list = dst;
				} else {
					dstp = &dst->next;
				}
			}
			write_unlock(&pol->lock);
		}
	}
	read_unlock_bh(&xfrm_policy_lock);

	while (gc_list) {
		dst = gc_list;
		gc_list = dst->next;
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
958
		dst_free(dst);
959
	}
960
}
961

Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
static int bundle_depends_on(struct dst_entry *dst, struct xfrm_state *x)
{
	do {
		if (dst->xfrm == x)
			return 1;
	} while ((dst = dst->child) != NULL);
	return 0;
}

int xfrm_flush_bundles(struct xfrm_state *x)
{
	int i;
	struct xfrm_policy *pol;
	struct dst_entry *dst, **dstp, *gc_list = NULL;

	read_lock_bh(&xfrm_policy_lock);
978
	for (i=0; i<2*XFRM_POLICY_MAX; i++) {
Alexey Kuznetsov's avatar
Alexey Kuznetsov committed
979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
		for (pol = xfrm_policy_list[i]; pol; pol = pol->next) {
			write_lock(&pol->lock);
			dstp = &pol->bundles;
			while ((dst=*dstp) != NULL) {
				if (bundle_depends_on(dst, x)) {
					*dstp = dst->next;
					dst->next = gc_list;
					gc_list = dst;
				} else {
					dstp = &dst->next;
				}
			}
			write_unlock(&pol->lock);
		}
	}
	read_unlock_bh(&xfrm_policy_lock);

	while (gc_list) {
		dst = gc_list;
		gc_list = dst->next;
		dst_free(dst);
	}

	return 0;
}

1005 1006 1007 1008 1009 1010 1011 1012 1013
/* Well... that's _TASK_. We need to scan through transformation
 * list and figure out what mss tcp should generate in order to
 * final datagram fit to mtu. Mama mia... :-)
 *
 * Apparently, some easy way exists, but we used to choose the most
 * bizarre ones. :-) So, raising Kalashnikov... tra-ta-ta.
 *
 * Consider this function as something like dark humour. :-)
 */
1014
static int xfrm_get_mss(struct dst_entry *dst, u32 mtu)
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
{
	int res = mtu - dst->header_len;

	for (;;) {
		struct dst_entry *d = dst;
		int m = res;

		do {
			struct xfrm_state *x = d->xfrm;
			if (x) {
1025 1026 1027
				spin_lock_bh(&x->lock);
				if (x->km.state == XFRM_STATE_VALID &&
				    x->type && x->type->get_max_size)
1028 1029 1030
					m = x->type->get_max_size(d->xfrm, m);
				else
					m += x->props.header_len;
1031
				spin_unlock_bh(&x->lock);
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
			}
		} while ((d = d->child) != NULL);

		if (m <= mtu)
			break;
		res -= (m - mtu);
		if (res < 88)
			return mtu;
	}

	return res + dst->header_len;
}

1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
{
	int err = 0;
	if (unlikely(afinfo == NULL))
		return -EINVAL;
	if (unlikely(afinfo->family >= NPROTO))
		return -EAFNOSUPPORT;
	write_lock(&xfrm_policy_afinfo_lock);
	if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
		err = -ENOBUFS;
	else {
		struct dst_ops *dst_ops = afinfo->dst_ops;
		if (likely(dst_ops->kmem_cachep == NULL))
			dst_ops->kmem_cachep = xfrm_dst_cache;
		if (likely(dst_ops->check == NULL))
			dst_ops->check = xfrm_dst_check;
		if (likely(dst_ops->destroy == NULL))
			dst_ops->destroy = xfrm_dst_destroy;
		if (likely(dst_ops->negative_advice == NULL))
			dst_ops->negative_advice = xfrm_negative_advice;
		if (likely(dst_ops->link_failure == NULL))
			dst_ops->link_failure = xfrm_link_failure;
		if (likely(dst_ops->get_mss == NULL))
			dst_ops->get_mss = xfrm_get_mss;
		if (likely(afinfo->garbage_collect == NULL))
			afinfo->garbage_collect = __xfrm_garbage_collect;
		xfrm_policy_afinfo[afinfo->family] = afinfo;
	}
	write_unlock(&xfrm_policy_afinfo_lock);
	return err;
}
1076

1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
{
	int err = 0;
	if (unlikely(afinfo == NULL))
		return -EINVAL;
	if (unlikely(afinfo->family >= NPROTO))
		return -EAFNOSUPPORT;
	write_lock(&xfrm_policy_afinfo_lock);
	if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
		if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
			err = -EINVAL;
		else {
			struct dst_ops *dst_ops = afinfo->dst_ops;
			xfrm_policy_afinfo[afinfo->family] = NULL;
			dst_ops->kmem_cachep = NULL;
			dst_ops->check = NULL;
			dst_ops->destroy = NULL;
			dst_ops->negative_advice = NULL;
			dst_ops->link_failure = NULL;
			dst_ops->get_mss = NULL;
			afinfo->garbage_collect = NULL;
		}
	}
	write_unlock(&xfrm_policy_afinfo_lock);
	return err;
}
1103

1104
struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
1105
{
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
	struct xfrm_policy_afinfo *afinfo;
	if (unlikely(family >= NPROTO))
		return NULL;
	read_lock(&xfrm_policy_afinfo_lock);
	afinfo = xfrm_policy_afinfo[family];
	if (likely(afinfo != NULL))
		read_lock(&afinfo->lock);
	read_unlock(&xfrm_policy_afinfo_lock);
	return afinfo;
}
1116

1117 1118 1119 1120 1121 1122
void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
{
	if (unlikely(afinfo == NULL))
		return;
	read_unlock(&afinfo->lock);
}
1123

1124 1125 1126 1127 1128 1129 1130 1131 1132
void __init xfrm_policy_init(void)
{
	xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
					   sizeof(struct xfrm_dst),
					   0, SLAB_HWCACHE_ALIGN,
					   NULL, NULL);
	if (!xfrm_dst_cache)
		panic("XFRM: failed to allocate xfrm_dst_cache\n");
}
1133

1134 1135
void __init xfrm_init(void)
{
1136
	xfrm_state_init();
1137
	xfrm_policy_init();
1138
}
1139