Commit ecb0c4f5 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/davem/BK/net-2.5

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents ec1a424c d9e74368
How to use the Linux packet generator module.
1. Enable CONFIG_NET_PKTGEN to compile and build pktgen.o, install it
in the place where insmod may find it.
2. Cut script "ipg" (see below).
3. Edit script to set preferred device and destination IP address.
3a. Create more scripts for different interfaces. Up to thirty-two
pktgen processes can be configured and run at once by using the
32 /proc/net/pktgen/pg* files.
4. Run in shell: ". ipg"
5. After this two commands are defined:
A. "pg" to start generator and to get results.
B. "pgset" to change generator parameters. F.e.
pgset "clone_skb 100" sets the number of coppies of the same packet
will be sent before a new packet is allocated
pgset "clone_skb 0" use multiple SKBs for packet generation
pgset "pkt_size 9014" sets packet size to 9014
pgset "frags 5" packet will consist of 5 fragments
pgset "count 200000" sets number of packets to send, set to zero
for continious sends untill explicitly
stopped.
pgset "ipg 5000" sets artificial gap inserted between packets
to 5000 nanoseconds
pgset "dst 10.0.0.1" sets IP destination address
(BEWARE! This generator is very aggressive!)
pgset "dst_min 10.0.0.1" Same as dst
pgset "dst_max 10.0.0.254" Set the maximum destination IP.
pgset "src_min 10.0.0.1" Set the minimum (or only) source IP.
pgset "src_max 10.0.0.254" Set the maximum source IP.
pgset "dstmac 00:00:00:00:00:00" sets MAC destination address
pgset "srcmac 00:00:00:00:00:00" sets MAC source address
pgset "src_mac_count 1" Sets the number of MACs we'll range through. The
'minimum' MAC is what you set with srcmac.
pgset "dst_mac_count 1" Sets the number of MACs we'll range through. The
'minimum' MAC is what you set with dstmac.
pgset "flag [name]" Set a flag to determine behaviour. Current flags
are: IPSRC_RND #IP Source is random (between min/max),
IPDST_RND, UDPSRC_RND,
UDPDST_RND, MACSRC_RND, MACDST_RND
pgset "udp_src_min 9" set UDP source port min, If < udp_src_max, then
cycle through the port range.
pgset "udp_src_max 9" set UDP source port max.
pgset "udp_dst_min 9" set UDP destination port min, If < udp_dst_max, then
cycle through the port range.
pgset "udp_dst_max 9" set UDP destination port max.
pgset stop aborts injection
Also, ^C aborts generator.
---- cut here
#! /bin/sh
modprobe pktgen
PGDEV=/proc/net/pktgen/pg0
function pgset() {
local result
echo $1 > $PGDEV
result=`cat $PGDEV | fgrep "Result: OK:"`
if [ "$result" = "" ]; then
cat $PGDEV | fgrep Result:
fi
}
function pg() {
echo inject > $PGDEV
cat $PGDEV
}
pgset "odev eth0"
pgset "dst 0.0.0.0"
---- cut here
......@@ -9,24 +9,30 @@ config CRYPTO
help
This option provides the core Cryptographic API.
config CRYPTO_HMAC
bool "HMAC support"
depends on CRYPTO
help
HMAC: Keyed-Hashing for Message Authentication (RFC2104).
This is required for IPSec.
config CRYPTO_MD4
tristate "MD4 digest algorithm"
depends on CRYPTO
help
MD4 message digest algorithm (RFC1320), including HMAC (RFC2104).
MD4 message digest algorithm (RFC1320).
config CRYPTO_MD5
tristate "MD5 digest algorithm"
depends on CRYPTO
help
MD5 message digest algorithm (RFC1321), including HMAC (RFC2104, RFC2403).
MD5 message digest algorithm (RFC1321).
config CRYPTO_SHA1
tristate "SHA-1 digest algorithm"
depends on CRYPTO
help
SHA-1 secure hash standard (FIPS 180-1), including HMAC (RFC2104, RFC2404).
SHA-1 secure hash standard (FIPS 180-1).
config CRYPTO_DES
tristate "DES and Triple DES EDE cipher algorithms"
......
......@@ -2,11 +2,12 @@
# Cryptographic API
#
export-objs := api.o
export-objs := api.o hmac.o
obj-$(CONFIG_CRYPTO) += api.o cipher.o digest.o compress.o
obj-$(CONFIG_KMOD) += autoload.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o
obj-$(CONFIG_CRYPTO_MD5) += md5.o
obj-$(CONFIG_CRYPTO_SHA1) += sha1.o
......
......@@ -67,9 +67,10 @@ static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags)
return crypto_init_compress_flags(tfm, flags);
default:
BUG();
break;
}
BUG();
return -EINVAL;
}
......@@ -99,13 +100,7 @@ struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
struct crypto_tfm *tfm = NULL;
struct crypto_alg *alg;
alg = crypto_alg_lookup(name);
#ifdef CONFIG_KMOD
if (alg == NULL) {
crypto_alg_autoload(name);
alg = crypto_alg_lookup(name);
}
#endif
alg = crypto_alg_mod_lookup(name);
if (alg == NULL)
goto out;
......@@ -113,6 +108,8 @@ struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
if (tfm == NULL)
goto out_put;
memset(tfm, 0, sizeof(*tfm));
if (alg->cra_ctxsize) {
tfm->crt_ctx = kmalloc(alg->cra_ctxsize, GFP_KERNEL);
if (tfm->crt_ctx == NULL)
......@@ -121,13 +118,24 @@ struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
tfm->__crt_alg = alg;
if (alg->cra_blocksize) {
tfm->crt_work_block = kmalloc(alg->cra_blocksize + 1,
GFP_KERNEL);
if (tfm->crt_work_block == NULL)
goto out_free_ctx;
}
if (crypto_init_flags(tfm, flags))
goto out_free_ctx;
goto out_free_work_block;
crypto_init_ops(tfm);
goto out;
out_free_work_block:
if (tfm->__crt_alg->cra_blocksize)
kfree(tfm->crt_work_block);
out_free_ctx:
if (tfm->__crt_alg->cra_ctxsize)
kfree(tfm->crt_ctx);
......@@ -145,6 +153,9 @@ void crypto_free_tfm(struct crypto_tfm *tfm)
if (tfm->__crt_alg->cra_ctxsize)
kfree(tfm->crt_ctx);
if (tfm->__crt_alg->cra_blocksize)
kfree(tfm->crt_work_block);
if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_CIPHER)
if (tfm->crt_cipher.cit_iv)
kfree(tfm->crt_cipher.cit_iv);
......@@ -207,6 +218,19 @@ int crypto_unregister_alg(struct crypto_alg *alg)
return ret;
}
int crypto_alg_available(const char *name, u32 flags)
{
int ret = 0;
struct crypto_alg *alg = crypto_alg_mod_lookup(name);
if (alg) {
crypto_alg_put(alg);
ret = 1;
}
return ret;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
struct list_head *v;
......@@ -296,3 +320,4 @@ EXPORT_SYMBOL_GPL(crypto_register_alg);
EXPORT_SYMBOL_GPL(crypto_unregister_alg);
EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
EXPORT_SYMBOL_GPL(crypto_free_tfm);
EXPORT_SYMBOL_GPL(crypto_alg_available);
......@@ -25,3 +25,13 @@ void crypto_alg_autoload(const char *name)
{
request_module(name);
}
struct crypto_alg *crypto_alg_mod_lookup(const char *name)
{
struct crypto_alg *alg = crypto_alg_lookup(name);
if (alg == NULL) {
crypto_alg_autoload(name);
alg = crypto_alg_lookup(name);
}
return alg;
}
......@@ -5,9 +5,6 @@
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* The HMAC implementation is derived from USAGI.
* Copyright (c) 2002 Kazunori Miyazawa <miyazawa@linux-ipv6.org> / USAGI
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
......@@ -61,56 +58,6 @@ static void digest(struct crypto_tfm *tfm,
crypto_digest_final(tfm, out);
}
static void hmac(struct crypto_tfm *tfm, u8 *key, unsigned int keylen,
struct scatterlist *sg, unsigned int nsg, u8 *out)
{
unsigned int i;
struct scatterlist tmp;
char ipad[crypto_tfm_alg_blocksize(tfm) + 1];
char opad[crypto_tfm_alg_blocksize(tfm) + 1];
if (keylen > crypto_tfm_alg_blocksize(tfm)) {
tmp.page = virt_to_page(key);
tmp.offset = ((long)key & ~PAGE_MASK);
tmp.length = keylen;
crypto_digest_digest(tfm, &tmp, 1, key);
keylen = crypto_tfm_alg_digestsize(tfm);
}
memset(ipad, 0, sizeof(ipad));
memset(opad, 0, sizeof(opad));
memcpy(ipad, key, keylen);
memcpy(opad, key, keylen);
for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++) {
ipad[i] ^= 0x36;
opad[i] ^= 0x5c;
}
tmp.page = virt_to_page(ipad);
tmp.offset = ((long)ipad & ~PAGE_MASK);
tmp.length = crypto_tfm_alg_blocksize(tfm);
crypto_digest_init(tfm);
crypto_digest_update(tfm, &tmp, 1);
crypto_digest_update(tfm, sg, nsg);
crypto_digest_final(tfm, out);
tmp.page = virt_to_page(opad);
tmp.offset = ((long)opad & ~PAGE_MASK);
tmp.length = crypto_tfm_alg_blocksize(tfm);
crypto_digest_init(tfm);
crypto_digest_update(tfm, &tmp, 1);
tmp.page = virt_to_page(out);
tmp.offset = ((long)out & ~PAGE_MASK);
tmp.length = crypto_tfm_alg_digestsize(tfm);
crypto_digest_update(tfm, &tmp, 1);
crypto_digest_final(tfm, out);
}
int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags)
{
return crypto_cipher_flags(flags) ? -EINVAL : 0;
......@@ -120,9 +67,8 @@ void crypto_init_digest_ops(struct crypto_tfm *tfm)
{
struct digest_tfm *ops = &tfm->crt_digest;
ops->dit_init = init;
ops->dit_update = update;
ops->dit_final = final;
ops->dit_digest = digest;
ops->dit_hmac = hmac;
ops->dit_init = init;
ops->dit_update = update;
ops->dit_final = final;
ops->dit_digest = digest;
}
/*
* Cryptographic API.
*
* HMAC: Keyed-Hashing for Message Authentication (RFC2104).
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* The HMAC implementation is derived from USAGI.
* Copyright (c) 2002 Kazunori Miyazawa <miyazawa@linux-ipv6.org> / USAGI
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/crypto.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <asm/scatterlist.h>
#include "internal.h"
static void hash_key(struct crypto_tfm *tfm, u8 *key, unsigned int keylen)
{
struct scatterlist tmp;
tmp.page = virt_to_page(key);
tmp.offset = ((long)key & ~PAGE_MASK);
tmp.length = keylen;
crypto_digest_digest(tfm, &tmp, 1, key);
}
void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen)
{
unsigned int i;
struct scatterlist tmp;
char *ipad = tfm->crt_work_block;
if (*keylen > crypto_tfm_alg_blocksize(tfm)) {
hash_key(tfm, key, *keylen);
*keylen = crypto_tfm_alg_digestsize(tfm);
}
memset(ipad, 0, crypto_tfm_alg_blocksize(tfm) + 1);
memcpy(ipad, key, *keylen);
for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++)
ipad[i] ^= 0x36;
tmp.page = virt_to_page(ipad);
tmp.offset = ((long)ipad & ~PAGE_MASK);
tmp.length = crypto_tfm_alg_blocksize(tfm);
crypto_digest_init(tfm);
crypto_digest_update(tfm, &tmp, 1);
}
void crypto_hmac_update(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg)
{
crypto_digest_update(tfm, sg, nsg);
}
void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key,
unsigned int *keylen, u8 *out)
{
unsigned int i;
struct scatterlist tmp;
char *opad = tfm->crt_work_block;
if (*keylen > crypto_tfm_alg_blocksize(tfm)) {
hash_key(tfm, key, *keylen);
*keylen = crypto_tfm_alg_digestsize(tfm);
}
crypto_digest_final(tfm, out);
memset(opad, 0, crypto_tfm_alg_blocksize(tfm) + 1);
memcpy(opad, key, *keylen);
for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++)
opad[i] ^= 0x5c;
tmp.page = virt_to_page(opad);
tmp.offset = ((long)opad & ~PAGE_MASK);
tmp.length = crypto_tfm_alg_blocksize(tfm);
crypto_digest_init(tfm);
crypto_digest_update(tfm, &tmp, 1);
tmp.page = virt_to_page(out);
tmp.offset = ((long)out & ~PAGE_MASK);
tmp.length = crypto_tfm_alg_digestsize(tfm);
crypto_digest_update(tfm, &tmp, 1);
crypto_digest_final(tfm, out);
}
void crypto_hmac(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen,
struct scatterlist *sg, unsigned int nsg, u8 *out)
{
crypto_hmac_init(tfm, key, keylen);
crypto_hmac_update(tfm, sg, nsg);
crypto_hmac_final(tfm, key, keylen, out);
}
EXPORT_SYMBOL_GPL(crypto_hmac_init);
EXPORT_SYMBOL_GPL(crypto_hmac_update);
EXPORT_SYMBOL_GPL(crypto_hmac_final);
EXPORT_SYMBOL_GPL(crypto_hmac);
......@@ -35,13 +35,21 @@ static inline void crypto_yield(struct crypto_tfm *tfm)
cond_resched();
}
static inline int crypto_cipher_flags(u32 flags)
static inline u32 crypto_cipher_flags(u32 flags)
{
return flags & (CRYPTO_TFM_MODE_MASK|CRYPTO_TFM_REQ_WEAK_KEY);
}
struct crypto_alg *crypto_alg_lookup(const char *name);
#ifdef CONFIG_KMOD
void crypto_alg_autoload(const char *name);
struct crypto_alg *crypto_alg_mod_lookup(const char *name);
#else
static inline struct crypto_alg *crypto_alg_mod_lookup(const char *name)
{
return crypto_alg_lookup(name);
}
#endif
int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags);
......
......@@ -46,6 +46,8 @@ static int mode = 0;
static char *xbuf;
static char *tvmem;
static char *check[] = { "des", "md5", "des3_ede", "rot13", "sha1", NULL };
static void
hexdump(unsigned char *buf, unsigned int len)
{
......@@ -64,7 +66,6 @@ test_md5(void)
char result[128];
struct crypto_tfm *tfm;
struct md5_testvec *md5_tv;
struct hmac_md5_testvec *hmac_md5_tv;
unsigned int tsize;
printk("\ntesting md5\n");
......@@ -129,14 +130,34 @@ test_md5(void)
printk("%s\n",
memcmp(result, md5_tv[4].digest,
crypto_tfm_alg_digestsize(tfm)) ? "fail" : "pass");
crypto_free_tfm(tfm);
}
printk("\ntesting hmac_md5\n");
#ifdef CONFIG_CRYPTO_HMAC
static void
test_hmac_md5(void)
{
char *p;
unsigned int i, klen;
struct scatterlist sg[2];
char result[128];
struct crypto_tfm *tfm;
struct hmac_md5_testvec *hmac_md5_tv;
unsigned int tsize;
tfm = crypto_alloc_tfm("md5", 0);
if (tfm == NULL) {
printk("failed to load transform for md5\n");
return;
}
printk("\ntesting hmac_md5\n");
tsize = sizeof (hmac_md5_tv_template);
if (tsize > TVMEMSIZE) {
printk("template (%u) too big for tvmem (%u)\n", tsize,
TVMEMSIZE);
return;
goto out;
}
memcpy(tvmem, hmac_md5_tv_template, tsize);
......@@ -151,8 +172,8 @@ test_md5(void)
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = strlen(hmac_md5_tv[i].plaintext);
crypto_digest_hmac(tfm, hmac_md5_tv[i].key,
strlen(hmac_md5_tv[i].key), sg, 1, result);
klen = strlen(hmac_md5_tv[i].key);
crypto_hmac(tfm, hmac_md5_tv[i].key, &klen, sg, 1, result);
hexdump(result, crypto_tfm_alg_digestsize(tfm));
printk("%s\n",
......@@ -179,16 +200,96 @@ test_md5(void)
sg[1].length = 12;
memset(result, 0, sizeof (result));
crypto_digest_hmac(tfm, hmac_md5_tv[1].key, strlen(hmac_md5_tv[1].key),
sg, 2, result);
klen = strlen(hmac_md5_tv[7].key);
crypto_hmac(tfm, hmac_md5_tv[7].key, &klen, sg, 2, result);
hexdump(result, crypto_tfm_alg_digestsize(tfm));
printk("%s\n",
memcmp(result, hmac_md5_tv[1].digest,
memcmp(result, hmac_md5_tv[7].digest,
crypto_tfm_alg_digestsize(tfm)) ? "fail" : "pass");
out:
crypto_free_tfm(tfm);
}
static void
test_hmac_sha1(void)
{
char *p;
unsigned int i, klen;
struct crypto_tfm *tfm;
struct hmac_sha1_testvec *hmac_sha1_tv;
struct scatterlist sg[2];
unsigned int tsize;
char result[SHA1_DIGEST_SIZE];
tfm = crypto_alloc_tfm("sha1", 0);
if (tfm == NULL) {
printk("failed to load transform for sha1\n");
return;
}
printk("\ntesting hmac_sha1\n");
tsize = sizeof (hmac_sha1_tv_template);
if (tsize > TVMEMSIZE) {
printk("template (%u) too big for tvmem (%u)\n", tsize,
TVMEMSIZE);
goto out;
}
memcpy(tvmem, hmac_sha1_tv_template, tsize);
hmac_sha1_tv = (void *) tvmem;
for (i = 0; i < HMAC_SHA1_TEST_VECTORS; i++) {
printk("test %u:\n", i + 1);
memset(result, 0, sizeof (result));
p = hmac_sha1_tv[i].plaintext;
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = strlen(hmac_sha1_tv[i].plaintext);
klen = strlen(hmac_sha1_tv[i].key);
crypto_hmac(tfm, hmac_sha1_tv[i].key, &klen, sg, 1, result);
hexdump(result, sizeof (result));
printk("%s\n",
memcmp(result, hmac_sha1_tv[i].digest,
crypto_tfm_alg_digestsize(tfm)) ? "fail" :
"pass");
}
printk("\ntesting hmac_sha1 across pages\n");
/* setup the dummy buffer first */
memset(xbuf, 0, sizeof (xbuf));
memcpy(&xbuf[IDX1], "what do ya want ", 16);
memcpy(&xbuf[IDX2], "for nothing?", 12);
p = &xbuf[IDX1];
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = 16;
p = &xbuf[IDX2];
sg[1].page = virt_to_page(p);
sg[1].offset = ((long) p & ~PAGE_MASK);
sg[1].length = 12;
memset(result, 0, sizeof (result));
klen = strlen(hmac_sha1_tv[7].key);
crypto_hmac(tfm, hmac_sha1_tv[7].key, &klen, sg, 2, result);
hexdump(result, crypto_tfm_alg_digestsize(tfm));
printk("%s\n",
memcmp(result, hmac_sha1_tv[7].digest,
crypto_tfm_alg_digestsize(tfm)) ? "fail" : "pass");
out:
crypto_free_tfm(tfm);
}
#endif /* CONFIG_CRYPTO_HMAC */
static void
test_md4(void)
......@@ -247,7 +348,6 @@ test_sha1(void)
unsigned int i;
struct crypto_tfm *tfm;
struct sha1_testvec *sha1_tv;
struct hmac_sha1_testvec *hmac_sha1_tv;
struct scatterlist sg[2];
unsigned int tsize;
char result[SHA1_DIGEST_SIZE];
......@@ -313,64 +413,6 @@ test_sha1(void)
printk("%s\n",
memcmp(result, sha1_tv[1].digest,
crypto_tfm_alg_digestsize(tfm)) ? "fail" : "pass");
printk("\ntesting hmac_sha1\n");
tsize = sizeof (hmac_sha1_tv_template);
if (tsize > TVMEMSIZE) {
printk("template (%u) too big for tvmem (%u)\n", tsize,
TVMEMSIZE);
return;
}
memcpy(tvmem, hmac_sha1_tv_template, tsize);
hmac_sha1_tv = (void *) tvmem;
for (i = 0; i < HMAC_SHA1_TEST_VECTORS; i++) {
printk("test %u:\n", i + 1);
memset(result, 0, sizeof (result));
p = hmac_sha1_tv[i].plaintext;
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = strlen(hmac_sha1_tv[i].plaintext);
crypto_digest_hmac(tfm, hmac_sha1_tv[i].key,
strlen(hmac_sha1_tv[i].key), sg, 1, result);
hexdump(result, sizeof (result));
printk("%s\n",
memcmp(result, hmac_sha1_tv[i].digest,
crypto_tfm_alg_digestsize(tfm)) ? "fail" :
"pass");
}
printk("\ntesting hmac_sha1 across pages\n");
/* setup the dummy buffer first */
memset(xbuf, 0, sizeof (xbuf));
memcpy(&xbuf[IDX1], "what do ya want ", 16);
memcpy(&xbuf[IDX2], "for nothing?", 12);
p = &xbuf[IDX1];
sg[0].page = virt_to_page(p);
sg[0].offset = ((long) p & ~PAGE_MASK);
sg[0].length = 16;
p = &xbuf[IDX2];
sg[1].page = virt_to_page(p);
sg[1].offset = ((long) p & ~PAGE_MASK);
sg[1].length = 12;
memset(result, 0, sizeof (result));
crypto_digest_hmac(tfm, hmac_sha1_tv[1].key,
strlen(hmac_sha1_tv[1].key), sg, 2, result);
hexdump(result, crypto_tfm_alg_digestsize(tfm));
printk("%s\n",
memcmp(result, hmac_sha1_tv[1].digest,
crypto_tfm_alg_digestsize(tfm)) ? "fail" : "pass");
crypto_free_tfm(tfm);
}
......@@ -1299,6 +1341,19 @@ test_des3_ede(void)
crypto_free_tfm(tfm);
}
static void
test_available(void)
{
char **name = check;
while (*name) {
printk("alg %s ", *name);
printk((crypto_alg_available(*name, 0)) ?
"found\n" : "not found\n");
name++;
}
}
static void
do_test(void)
{
......@@ -1310,6 +1365,10 @@ do_test(void)
test_des();
test_des3_ede();
test_md4();
#ifdef CONFIG_CRYPTO_HMAC
test_hmac_md5();
test_hmac_sha1();
#endif
break;
case 1:
......@@ -1332,6 +1391,21 @@ do_test(void)
test_md4();
break;
#ifdef CONFIG_CRYPTO_HMAC
case 100:
test_hmac_md5();
break;
case 101:
test_hmac_sha1();
break;
#endif
case 1000:
test_available();
break;
default:
/* useful for debugging */
printk("not testing anything\n");
......
......@@ -105,6 +105,7 @@ struct md5_testvec {
0xac, 0x49, 0xda, 0x2e, 0x21, 0x07, 0xb6, 0x7a } }
};
#ifdef CONFIG_CRYPTO_HMAC
/*
* HMAC-MD5 test vectors from RFC2202
* (These need to be fixed to not use strlen).
......@@ -214,7 +215,19 @@ struct hmac_md5_testvec hmac_md5_tv_template[] =
{ 0x6f, 0x63, 0x0f, 0xad, 0x67, 0xcd, 0xa0, 0xee,
0x1f, 0xb1, 0xf5, 0x62, 0xdb, 0x3a, 0xa5, 0x3e }
}
},
/* cross page test, need to retain key */
{
{ 'J', 'e', 'f', 'e', 0 },
"what do ya want for nothing?",
{ 0x75, 0x0c, 0x78, 0x3e, 0x6a, 0xb0, 0xb5, 0x03,
0xea, 0xa8, 0x6e, 0x31, 0x0a, 0x5d, 0xb7, 0x38 }
},
};
......@@ -334,9 +347,22 @@ struct hmac_sha1_testvec {
{ 0xe8, 0xe9, 0x9d, 0x0f, 0x45, 0x23, 0x7d, 0x78, 0x6d, 0x6b,
0xba, 0xa7, 0x96, 0x5c, 0x78, 0x08, 0xbb, 0xff, 0x1a, 0x91 }
}
},
/* cross page test */
{
{ 'J', 'e', 'f', 'e', 0 },
"what do ya want for nothing?",
{ 0xef, 0xfc, 0xdf, 0x6a, 0xe5, 0xeb, 0x2f, 0xa2, 0xd2, 0x74,
0x16, 0xd5, 0xf1, 0x84, 0xdf, 0x9c, 0x25, 0x9a, 0x7c, 0x79 }
},
};
#endif /* CONFIG_CRYPTO_HMAC */
/*
* SHA1 test vectors from from FIPS PUB 180-1
......
......@@ -17,9 +17,11 @@
#define _LINUX_CRYPTO_H
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/string.h>
#include <asm/page.h>
/*
* Algorithm masks and types.
......@@ -111,6 +113,11 @@ struct crypto_alg {
int crypto_register_alg(struct crypto_alg *alg);
int crypto_unregister_alg(struct crypto_alg *alg);
/*
* Algorithm query interface.
*/
int crypto_alg_available(const char *name, u32 flags);
/*
* Transforms: user-instantiated objects which encapsulate algorithms
* and core processing logic. Managed via crypto_alloc_tfm() and
......@@ -136,9 +143,6 @@ struct digest_tfm {
void (*dit_final)(struct crypto_tfm *tfm, u8 *out);
void (*dit_digest)(struct crypto_tfm *tfm, struct scatterlist *sg,
unsigned int nsg, u8 *out);
void (*dit_hmac)(struct crypto_tfm *tfm, u8 *key,
unsigned int keylen, struct scatterlist *sg,
unsigned int nsg, u8 *out);
};
struct compress_tfm {
......@@ -153,6 +157,7 @@ struct compress_tfm {
struct crypto_tfm {
void *crt_ctx;
void *crt_work_block;
u32 crt_flags;
union {
......@@ -254,16 +259,6 @@ static inline void crypto_digest_digest(struct crypto_tfm *tfm,
tfm->crt_digest.dit_digest(tfm, sg, nsg, out);
}
static inline void crypto_digest_hmac(struct crypto_tfm *tfm,
u8 *key, unsigned int keylen,
struct scatterlist *sg,
unsigned int nsg, u8 *out)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
tfm->crt_digest.dit_hmac(tfm, key, keylen, sg, nsg, out);
}
static inline int crypto_cipher_setkey(struct crypto_tfm *tfm,
const u8 *key, unsigned int keylen)
{
......@@ -313,4 +308,18 @@ static inline void crypto_comp_decompress(struct crypto_tfm *tfm)
tfm->crt_compress.cot_decompress(tfm);
}
/*
* HMAC support.
*/
#ifdef CONFIG_CRYPTO_HMAC
void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen);
void crypto_hmac_update(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg);
void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key,
unsigned int *keylen, u8 *out);
void crypto_hmac(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen,
struct scatterlist *sg, unsigned int nsg, u8 *out);
#endif /* CONFIG_CRYPTO_HMAC */
#endif /* _LINUX_CRYPTO_H */
......@@ -69,6 +69,7 @@ struct in_addr {
#define IP_RECVTOS 13
#define IP_MTU 14
#define IP_FREEBIND 15
#define IP_IPSEC_POLICY 16
/* BSD compatibility */
#define IP_RECVRETOPTS IP_RETOPTS
......
/*
* Definitions for the SECurity layer
*
* Author:
* Robert Muchsel <muchsel@acm.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IPSEC_H
#define _LINUX_IPSEC_H
#include <linux/config.h>
#include <linux/socket.h>
#include <net/sock.h>
#include <linux/skbuff.h>
/* The definitions, required to talk to KAME racoon IKE. */
/* Values for the set/getsockopt calls */
#include <linux/pfkeyv2.h>
/* These defines are compatible with NRL IPv6, however their semantics
is different */
#define IPSEC_PORT_ANY 0
#define IPSEC_ULPROTO_ANY 255
#define IPSEC_PROTO_ANY 255
#define IPSEC_LEVEL_NONE -1 /* send plaintext, accept any */
#define IPSEC_LEVEL_DEFAULT 0 /* encrypt/authenticate if possible */
/* the default MUST be 0, because a */
/* socket is initialized with 0's */
#define IPSEC_LEVEL_USE 1 /* use outbound, don't require inbound */
#define IPSEC_LEVEL_REQUIRE 2 /* require both directions */
#define IPSEC_LEVEL_UNIQUE 2 /* for compatibility only */
enum {
IPSEC_MODE_ANY = 0, /* We do not support this for SA */
IPSEC_MODE_TRANSPORT = 1,
IPSEC_MODE_TUNNEL = 2
};
#ifdef __KERNEL__
enum {
IPSEC_DIR_ANY = 0,
IPSEC_DIR_INBOUND = 1,
IPSEC_DIR_OUTBOUND = 2,
IPSEC_DIR_FWD = 3, /* It is our own */
IPSEC_DIR_MAX = 4,
IPSEC_DIR_INVALID = 5
};
/* skb bit flags set on packet input processing */
enum {
IPSEC_POLICY_DISCARD = 0,
IPSEC_POLICY_NONE = 1,
IPSEC_POLICY_IPSEC = 2,
IPSEC_POLICY_ENTRUST = 3,
IPSEC_POLICY_BYPASS = 4
};
#define RCV_SEC 0x0f /* options on receive */
#define RCV_AUTH 0x01 /* was authenticated */
#define RCV_CRYPT 0x02 /* was encrypted */
#define RCV_TUNNEL 0x04 /* was tunneled */
#define SND_SEC 0xf0 /* options on send, these are */
#define SND_AUTH 0x10 /* currently unused */
#define SND_CRYPT 0x20
#define SND_TUNNEL 0x40
enum {
IPSEC_LEVEL_DEFAULT = 0,
IPSEC_LEVEL_USE = 1,
IPSEC_LEVEL_REQUIRE = 2,
IPSEC_LEVEL_UNIQUE = 3
};
/*
* FIXME: ignores network encryption for now..
*/
#ifdef CONFIG_NET_SECURITY
static __inline__ int ipsec_sk_policy(struct sock *sk, struct sk_buff *skb)
{
return ((sk->authentication < IPSEC_LEVEL_REQUIRE) ||
(skb->security & RCV_AUTH)) &&
((sk->encryption < IPSEC_LEVEL_REQUIRE) ||
(skb->security & RCV_CRYPT));
}
#define IPSEC_MANUAL_REQID_MAX 0x3fff
#else
#define IPSEC_REPLAYWSIZE 32
static __inline__ int ipsec_sk_policy(struct sock *sk, struct sk_buff *skb)
{
return 1;
}
#endif /* CONFIG */
#endif /* __KERNEL__ */
#endif /* _LINUX_IPSEC_H */
......@@ -187,7 +187,7 @@ struct sadb_x_policy {
struct sadb_x_ipsecrequest {
uint16_t sadb_x_ipsecrequest_len;
uint16_t sadb_x_ipsecrequest_exttype;
uint16_t sadb_x_ipsecrequest_proto;
uint8_t sadb_x_ipsecrequest_mode;
uint8_t sadb_x_ipsecrequest_level;
uint16_t sadb_x_ipsecrequest_reqid;
......@@ -249,8 +249,8 @@ struct sadb_x_ipsecrequest {
/* Encryption algorithms */
#define SADB_EALG_NONE 0
#define SADB_EALG_DESCBC 2
#define SADB_EALG_3DESCBC 3
#define SADB_EALG_DESCBC 1
#define SADB_EALG_3DESCBC 2
#define SADB_EALG_NULL 11
#define SADB_EALG_MAX 11
......
......@@ -44,6 +44,7 @@ struct dst_entry
#define DST_HOST 1
#define DST_NOXFRM 2
#define DST_NOPOLICY 4
#define DST_NOHASH 8
unsigned long lastuse;
unsigned long expires;
......@@ -138,8 +139,15 @@ struct dst_entry * dst_clone(struct dst_entry * dst)
static inline
void dst_release(struct dst_entry * dst)
{
if (dst)
if (dst) {
if (atomic_read(&dst->__refcnt) < 1) {
__label__ __lbl;
printk("BUG: dst underflow %d: %p\n",
atomic_read(&dst->__refcnt), &&__lbl);
__lbl:
}
atomic_dec(&dst->__refcnt);
}
}
/* Children define the path of the packet through the
......@@ -237,7 +245,7 @@ extern void dst_init(void);
struct flowi;
extern int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
struct sock *sk, int flags);
extern void xfrm_init(void);
#endif
......
......@@ -38,6 +38,7 @@ struct inet_protocol
{
int (*handler)(struct sk_buff *skb);
void (*err_handler)(struct sk_buff *skb, u32 info);
int no_policy;
};
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
......
......@@ -114,6 +114,7 @@ extern void ip_rt_advice(struct rtable **rp, int advice);
extern void rt_cache_flush(int how);
extern int __ip_route_output_key(struct rtable **, const struct flowi *flp);
extern int ip_route_output_key(struct rtable **, struct flowi *flp);
extern int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk, int flags);
extern int ip_route_input(struct sk_buff*, u32 dst, u32 src, u8 tos, struct net_device *devin);
extern unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu);
extern void ip_rt_send_redirect(struct sk_buff *skb);
......
......@@ -107,6 +107,7 @@ struct sock {
wait_queue_head_t *sleep; /* Sock wait queue */
struct dst_entry *dst_cache; /* Destination cache */
rwlock_t dst_lock;
struct xfrm_policy *policy[2];
atomic_t rmem_alloc; /* Receive queue bytes committed */
struct sk_buff_head receive_queue; /* Incoming packets */
atomic_t wmem_alloc; /* Transmit queue bytes committed */
......
......@@ -3,6 +3,7 @@
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/crypto.h>
#include <net/dst.h>
#include <net/route.h>
......@@ -113,6 +114,38 @@ struct xfrm_selector
void *owner;
};
struct xfrm_lifetime_cfg
{
u64 soft_byte_limit;
u64 hard_byte_limit;
u64 soft_packet_limit;
u64 hard_packet_limit;
u64 soft_add_expires_seconds;
u64 hard_add_expires_seconds;
u64 soft_use_expires_seconds;
u64 hard_use_expires_seconds;
};
struct xfrm_lifetime_cur
{
u64 bytes;
u64 packets;
u64 add_time;
u64 use_time;
};
struct xfrm_replay_state
{
u32 oseq;
u32 seq;
u32 bitmap;
};
struct xfrm_algo {
char alg_name[CRYPTO_MAX_ALG_NAME];
int alg_key_len; /* in bits */
char alg_key[0];
};
/* Full description of state of transformer. */
struct xfrm_state
......@@ -130,40 +163,39 @@ struct xfrm_state
struct {
int state;
u32 seq;
u32 warn_bytes;
u64 warn_bytes;
} km;
/* Parameters of this state. */
struct {
u8 mode;
u8 algo;
u8 replay_window;
u8 aalgo, ealgo, calgo;
u16 reqid;
xfrm_address_t saddr;
int header_len;
int trailer_len;
u32 hard_byte_limit;
u32 soft_byte_limit;
u32 replay_window;
/* More... */
} props;
struct xfrm_lifetime_cfg lft;
/* Data for transformer */
struct xfrm_algo *aalg;
struct xfrm_algo *ealg;
struct xfrm_algo *calg;
/* State for replay detection */
struct {
u32 oseq;
u32 seq;
u32 bitmap;
} replay;
struct xfrm_replay_state replay;
/* Statistics */
struct {
unsigned long lastuse;
unsigned long expires;
u32 bytes;
u32 replay_window;
u32 replay;
u32 integrity_failed;
/* More... */
} stats;
struct xfrm_lifetime_cur curlft;
/* Reference to data common to all the instances of this
* transformer. */
struct xfrm_type *type;
......@@ -182,14 +214,12 @@ enum {
XFRM_STATE_DEAD
};
#define XFRM_DST_HSIZE 1024
struct xfrm_type
{
char *description;
atomic_t refcnt;
struct module *owner;
__u8 proto;
__u8 algo;
int (*init_state)(struct xfrm_state *x, void *args);
void (*destructor)(struct xfrm_state *);
......@@ -199,6 +229,11 @@ struct xfrm_type
u32 (*get_max_size)(struct xfrm_state *, int size);
};
extern int xfrm_register_type(struct xfrm_type *type);
extern int xfrm_unregister_type(struct xfrm_type *type);
extern struct xfrm_type *xfrm_get_type(u8 proto);
extern void xfrm_put_type(struct xfrm_type *type);
struct xfrm_tmpl
{
/* id in template is interpreted as:
......@@ -212,6 +247,8 @@ struct xfrm_tmpl
/* Source address of tunnel. Ignored, if it is not a tunnel. */
xfrm_address_t saddr;
__u16 reqid;
/* Mode: transport/tunnel */
__u8 mode;
......@@ -219,7 +256,9 @@ struct xfrm_tmpl
__u8 share;
/* Bit mask of algos allowed for acquisition */
__u32 algos;
__u32 aalgos;
__u32 ealgos;
__u32 calgos;
/* If template statically resolved, hold ref here */
struct xfrm_state *resolved;
......@@ -238,8 +277,8 @@ enum
enum
{
XFRM_POLICY_IN = 0,
XFRM_POLICY_FWD = 1,
XFRM_POLICY_OUT = 2,
XFRM_POLICY_OUT = 1,
XFRM_POLICY_FWD = 2,
XFRM_POLICY_MAX = 3
};
......@@ -254,8 +293,8 @@ struct xfrm_policy
u32 priority;
u32 index;
struct xfrm_selector selector;
unsigned long expires;
unsigned long lastuse;
struct xfrm_lifetime_cfg lft;
struct xfrm_lifetime_cur curlft;
struct dst_entry *bundles;
__u8 action;
#define XFRM_POLICY_ALLOW 0
......@@ -267,6 +306,19 @@ struct xfrm_policy
struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
};
struct xfrm_mgr
{
struct list_head list;
char *id;
int (*notify)(struct xfrm_state *x, int event);
int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
struct xfrm_policy *(*compile_policy)(int opt, u8 *data, int len, int *dir);
};
extern int xfrm_register_km(struct xfrm_mgr *km);
extern int xfrm_unregister_km(struct xfrm_mgr *km);
extern struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX];
static inline void xfrm_pol_hold(struct xfrm_policy *policy)
......@@ -346,13 +398,16 @@ secpath_put(struct sec_path *sp)
__secpath_destroy(sp);
}
extern int __xfrm_policy_check(int dir, struct sk_buff *skb);
extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb);
static inline int xfrm_policy_check(int dir, struct sk_buff *skb)
static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
{
if (sk && sk->policy[XFRM_POLICY_IN])
return __xfrm_policy_check(sk, dir, skb);
return !xfrm_policy_list[dir] ||
(skb->dst->flags & DST_NOPOLICY) ||
__xfrm_policy_check(dir, skb);
__xfrm_policy_check(sk, dir, skb);
}
extern int __xfrm_route_forward(struct sk_buff *skb);
......@@ -366,21 +421,37 @@ static inline int xfrm_route_forward(struct sk_buff *skb)
extern void xfrm_state_init(void);
extern void xfrm_input_init(void);
extern int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), void *);
extern struct xfrm_state *xfrm_state_alloc(void);
extern struct xfrm_state *xfrm_state_find(u32 daddr, struct flowi *fl, struct xfrm_tmpl *tmpl);
extern struct xfrm_state *xfrm_state_find(u32 daddr, struct flowi *fl, struct xfrm_tmpl *tmpl, struct xfrm_policy *pol);
extern int xfrm_state_check_expire(struct xfrm_state *x);
extern void xfrm_state_insert(struct xfrm_state *x);
extern int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb);
extern struct xfrm_state * xfrm_state_lookup(u32 daddr, u32 spi, u8 proto);
extern struct xfrm_policy *xfrm_policy_lookup(int dir, struct flowi *fl);
extern struct xfrm_state *xfrm_state_lookup(u32 daddr, u32 spi, u8 proto);
extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
extern void xfrm_state_delete(struct xfrm_state *x);
extern void xfrm_state_flush(u8 proto);
extern int xfrm_replay_check(struct xfrm_state *x, u32 seq);
extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq);
extern int xfrm_check_selectors(struct xfrm_state **x, int n, struct flowi *fl);
extern int xfrm4_rcv(struct sk_buff *skb);
extern int xfrm_user_policy(struct sock *sk, int optname, u8 *optval, int optlen);
struct xfrm_policy *xfrm_policy_alloc(void);
extern int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *);
struct xfrm_policy *xfrm_policy_lookup(int dir, struct flowi *fl);
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
struct xfrm_policy *xfrm_policy_delete(int dir, struct xfrm_selector *sel);
struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete);
void xfrm_policy_flush(void);
void xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
struct xfrm_state * xfrm_find_acq(u8 mode, u16 reqid, u8 proto, u32 daddr, u32 saddr);
extern void xfrm_policy_flush(void);
extern void xfrm_policy_kill(struct xfrm_policy *);
extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
extern struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl);
extern wait_queue_head_t *km_waitq;
extern void km_notify(struct xfrm_state *x, int event);
extern int km_query(struct xfrm_state *x);
extern int ah4_init(void);
extern void km_warn_expired(struct xfrm_state *x);
extern void km_expired(struct xfrm_state *x);
extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *pol);
......@@ -139,6 +139,15 @@ config UNIX
Say Y unless you know what you are doing.
config NET_KEY
tristate "PF_KEY sockets"
---help---
PF_KEYv2 socket family, compatible to KAME ones.
They are required if you are going to use IPsec tools ported
from KAME.
Say Y unless you know what you are doing.
config INET
bool "TCP/IP networking"
---help---
......@@ -621,5 +630,25 @@ source "net/sched/Kconfig"
#bool 'Network code profiler' CONFIG_NET_PROFILE
endmenu
menu "Network testing"
config NET_PKTGEN
tristate "Packet Generator (USE WITH CAUTION)"
---help---
This module will inject preconfigured packets, at a configurable
rate, out of a given interface. It is used for network interface
stress testing and performance analysis. If you don't understand
what was just said, you don't need it: say N.
Documentation on how to use the packet generaor can be found
at <file:Documentation/networking/pktgen.txt>.
This code is also available as a module called pktgen.o ( = code
which can be inserted in and removed from the running kernel
whenever you want). If you want to compile it as a module, say M
here and read <file:Documentation/modules.txt>.
endmenu
endmenu
......@@ -16,6 +16,7 @@ obj-$(CONFIG_INET) += ipv4/
obj-$(CONFIG_UNIX) += unix/
obj-$(CONFIG_IPV6) += ipv6/
obj-$(CONFIG_PACKET) += packet/
obj-$(CONFIG_NET_KEY) += key/
obj-$(CONFIG_NET_SCHED) += sched/
obj-$(CONFIG_BRIDGE) += bridge/
obj-$(CONFIG_IPX) += ipx/
......
......@@ -57,7 +57,8 @@ static struct rtable __fake_rtable = {
dst: {
__refcnt: ATOMIC_INIT(1),
dev: &__fake_net_device,
pmtu: 1500
path: &__fake_rtable.u.dst,
metrics: {[RTAX_MTU] 1500},
}
},
......@@ -109,8 +110,8 @@ static void __br_dnat_complain(void)
* Let us now consider the case that ip_route_input() fails:
*
* After a "echo '0' > /proc/sys/net/ipv4/ip_forward" ip_route_input()
* will fail, while ip_route_output() will return success. The source
* address for ip_route_output() is set to zero, so ip_route_output()
* will fail, while __ip_route_output_key() will return success. The source
* address for __ip_route_output_key() is set to zero, so __ip_route_output_key
* thinks we're handling a locally generated packet and won't care
* if IP forwarding is allowed. We send a warning message to the users's
* log telling her to put IP forwarding on.
......@@ -158,8 +159,11 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
dev)) {
struct rtable *rt;
struct flowi fl = { .nl_u =
{ .ip4_u = { .daddr = iph->daddr, .saddr = 0 ,
.tos = iph->tos} }, .proto = 0};
if (!ip_route_output(&rt, iph->daddr, 0, iph->tos, 0)) {
if (!ip_route_output_key(&rt, &fl)) {
/* Bridged-and-DNAT'ed traffic doesn't
* require ip_forwarding.
*/
......
......@@ -17,6 +17,7 @@ obj-$(CONFIG_NET) += dev.o dev_mcast.o dst.o neighbour.o rtnetlink.o utils.o
obj-$(CONFIG_NETFILTER) += netfilter.o
obj-$(CONFIG_NET_DIVERT) += dv.o
obj-$(CONFIG_NET_PROFILE) += profile.o
obj-$(CONFIG_NET_PKTGEN) += pktgen.o
obj-$(CONFIG_NET_RADIO) += wireless.o
# Ugly. I wish all wireless drivers were moved in drivers/net/wireless
obj-$(CONFIG_NET_PCMCIA_RADIO) += wireless.o
......
......@@ -36,6 +36,7 @@ static spinlock_t dst_lock = SPIN_LOCK_UNLOCKED;
static unsigned long dst_gc_timer_expires;
static unsigned long dst_gc_timer_inc = DST_GC_MAX;
static void dst_run_gc(unsigned long);
static void ___dst_free(struct dst_entry * dst);
static struct timer_list dst_gc_timer =
{ data: DST_GC_MIN, function: dst_run_gc };
......@@ -59,12 +60,26 @@ static void dst_run_gc(unsigned long dummy)
delayed++;
continue;
}
if (dst->child) {
dst->child->next = dst->next;
*dstp = dst->child;
} else
*dstp = dst->next;
dst_destroy(dst);
*dstp = dst->next;
dst = dst_destroy(dst);
if (dst) {
/* NOHASH and still referenced. Unless it is already
* on gc list, invalidate it and add to gc list.
*
* Note: this is temporary. Actually, NOHASH dst's
* must be obsoleted when parent is obsoleted.
* But we do not have state "obsoleted, but
* referenced by parent", so it is right.
*/
if (dst->obsolete > 1)
continue;
___dst_free(dst);
dst->next = *dstp;
*dstp = dst;
dstp = &dst->next;
}
}
if (!dst_garbage_list) {
dst_gc_timer_inc = DST_GC_MAX;
......@@ -120,10 +135,8 @@ void * dst_alloc(struct dst_ops * ops)
return dst;
}
void __dst_free(struct dst_entry * dst)
static void ___dst_free(struct dst_entry * dst)
{
spin_lock_bh(&dst_lock);
/* The first case (dev==NULL) is required, when
protocol module is unloaded.
*/
......@@ -132,6 +145,12 @@ void __dst_free(struct dst_entry * dst)
dst->output = dst_blackhole;
}
dst->obsolete = 2;
}
void __dst_free(struct dst_entry * dst)
{
spin_lock_bh(&dst_lock);
___dst_free(dst);
dst->next = dst_garbage_list;
dst_garbage_list = dst;
if (dst_gc_timer_inc > DST_GC_INC) {
......@@ -141,7 +160,6 @@ void __dst_free(struct dst_entry * dst)
dst_gc_timer.expires = jiffies + dst_gc_timer_expires;
add_timer(&dst_gc_timer);
}
spin_unlock_bh(&dst_lock);
}
......@@ -177,10 +195,19 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
kmem_cache_free(dst->ops->kmem_cachep, dst);
dst = child;
if (dst && !atomic_read(&dst->__refcnt))
goto again;
return dst;
if (dst) {
if (atomic_dec_and_test(&dst->__refcnt)) {
/* We were real parent of this dst, so kill child. */
if (dst->flags&DST_NOHASH)
goto again;
} else {
/* Child is still referenced, return it for freeing. */
if (dst->flags&DST_NOHASH)
return dst;
/* Child is still in his hash table */
}
}
return NULL;
}
static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
......
This diff is collapsed.
......@@ -348,5 +348,19 @@ config SYN_COOKIES
If unsure, say N.
config INET_AH
tristate "IP: AH transformation"
---help---
Support for IPsec AH.
If unsure, say Y.
config INET_ESP
tristate "IP: ESP transformation"
---help---
Support for IPsec ESP.
If unsure, say Y.
source "net/ipv4/netfilter/Kconfig"
......@@ -16,9 +16,11 @@ obj-$(CONFIG_IP_MROUTE) += ipmr.o
obj-$(CONFIG_NET_IPIP) += ipip.o
obj-$(CONFIG_NET_IPGRE) += ip_gre.o
obj-$(CONFIG_SYN_COOKIES) += syncookies.o
obj-$(CONFIG_INET_AH) += ah.o
obj-$(CONFIG_INET_ESP) += esp.o
obj-$(CONFIG_IP_PNP) += ipconfig.o
obj-$(CONFIG_NETFILTER) += netfilter/
obj-y += xfrm_policy.o xfrm_state.o xfrm_input.o ah.o
obj-y += xfrm_policy.o xfrm_state.o xfrm_input.o
include $(TOPDIR)/Rules.make
......@@ -1038,11 +1038,13 @@ static struct inet_protocol igmp_protocol = {
static struct inet_protocol tcp_protocol = {
.handler = tcp_v4_rcv,
.err_handler = tcp_v4_err,
.no_policy = 1,
};
static struct inet_protocol udp_protocol = {
.handler = udp_rcv,
.err_handler = udp_err,
.no_policy = 1,
};
static struct inet_protocol icmp_protocol = {
......
#include <linux/config.h>
#include <linux/module.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <linux/crypto.h>
#include <linux/pfkeyv2.h>
#include <net/icmp.h>
#include <asm/scatterlist.h>
struct ah_data
{
u8 *key;
int key_len;
u8 *work_digest;
int digest_len;
void (*digest)(struct xfrm_state*,
void (*digest)(struct ah_data*,
struct sk_buff *skb,
u8 *digest);
......@@ -67,12 +72,19 @@ void skb_ah_walk(const struct sk_buff *skb, struct crypto_tfm *tfm)
int len = skb->len;
int start = skb->len - skb->data_len;
int i, copy = start - offset;
struct scatterlist sg;
/* Checksum header. */
if (copy > 0) {
if (copy > len)
copy = len;
tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx, skb->data+offset, copy);
sg.page = virt_to_page(skb->data + offset);
sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
sg.length = copy;
crypto_hmac_update(tfm, &sg, 1);
if ((len -= copy) == 0)
return;
offset += copy;
......@@ -85,14 +97,17 @@ void skb_ah_walk(const struct sk_buff *skb, struct crypto_tfm *tfm)
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
u8 *vaddr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
vaddr = kmap_skb_frag(frag);
tfm->__crt_alg->cra_digest.dia_update(tfm->crt_ctx, vaddr+frag->page_offset+offset-start, copy);
kunmap_skb_frag(vaddr);
sg.page = frag->page;
sg.offset = frag->page_offset + offset-start;
sg.length = copy;
crypto_hmac_update(tfm, &sg, 1);
if (!(len -= copy))
return;
offset += copy;
......@@ -124,6 +139,18 @@ void skb_ah_walk(const struct sk_buff *skb, struct crypto_tfm *tfm)
BUG();
}
static void
ah_hmac_digest(struct ah_data *ahp, struct sk_buff *skb, u8 *auth_data)
{
struct crypto_tfm *tfm = ahp->tfm;
memset(auth_data, 0, ahp->digest_len);
crypto_hmac_init(tfm, ahp->key, &ahp->key_len);
skb_ah_walk(skb, tfm);
crypto_hmac_final(tfm, ahp->key, &ahp->key_len, ahp->work_digest);
memcpy(auth_data, ahp->work_digest, ahp->digest_len);
}
int ah_output(struct sk_buff *skb)
{
int err;
......@@ -149,12 +176,13 @@ int ah_output(struct sk_buff *skb)
iph = skb->nh.iph;
if (x->props.mode) {
top_iph = (struct iphdr*)skb_push(skb, x->props.header_len);
top_iph->ihl = 4;
top_iph->version = 5;
top_iph->ihl = 5;
top_iph->version = 4;
top_iph->tos = 0;
top_iph->tot_len = htons(skb->len);
top_iph->id = inet_getid(((struct rtable*)dst)->peer, 0);
top_iph->frag_off = 0;
if (!(iph->frag_off&htons(IP_DF)))
__ip_select_ident(top_iph, dst, 0);
top_iph->ttl = 0;
top_iph->protocol = IPPROTO_AH;
top_iph->check = 0;
......@@ -186,7 +214,7 @@ int ah_output(struct sk_buff *skb)
ah->reserved = 0;
ah->spi = x->id.spi;
ah->seq_no = htonl(++x->replay.oseq);
ahp->digest(x, skb, ah->auth_data);
ahp->digest(ahp, skb, ah->auth_data);
top_iph->tos = iph->tos;
top_iph->ttl = iph->ttl;
if (x->props.mode) {
......@@ -202,7 +230,7 @@ int ah_output(struct sk_buff *skb)
skb->nh.raw = skb->data;
x->stats.bytes += skb->len;
x->curlft.bytes += skb->len;
spin_unlock_bh(&x->lock);
if ((skb->dst = dst_pop(dst)) == NULL)
goto error;
......@@ -258,7 +286,7 @@ int ah_input(struct xfrm_state *x, struct sk_buff *skb)
u8 auth_data[ahp->digest_len];
memcpy(auth_data, ah->auth_data, ahp->digest_len);
skb_push(skb, skb->data - skb->nh.raw);
ahp->digest(x, skb, ah->auth_data);
ahp->digest(ahp, skb, ah->auth_data);
if (memcmp(ah->auth_data, auth_data, ahp->digest_len)) {
x->stats.integrity_failed++;
goto out;
......@@ -295,16 +323,90 @@ void ah4_err(struct sk_buff *skb, u32 info)
xfrm_state_put(x);
}
int ah_init_state(struct xfrm_state *x, void *args)
{
struct ah_data *ahp = NULL;
if (x->aalg == NULL || x->aalg->alg_key_len == 0 ||
x->aalg->alg_key_len > 512)
goto error;
ahp = kmalloc(sizeof(*ahp), GFP_KERNEL);
if (ahp == NULL)
return -ENOMEM;
memset(ahp, 0, sizeof(*ahp));
ahp->key = x->aalg->alg_key;
ahp->key_len = (x->aalg->alg_key_len+7)/8;
ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
if (!ahp->tfm)
goto error;
ahp->digest = ah_hmac_digest;
ahp->digest_len = 12;
ahp->work_digest = kmalloc(crypto_tfm_alg_digestsize(ahp->tfm),
GFP_KERNEL);
if (!ahp->work_digest)
goto error;
x->props.header_len = (12 + ahp->digest_len + 7)&~7;
if (x->props.mode)
x->props.header_len += 20;
x->data = ahp;
return 0;
error:
if (ahp) {
if (ahp->work_digest)
kfree(ahp->work_digest);
if (ahp->tfm)
crypto_free_tfm(ahp->tfm);
kfree(ahp);
}
return -EINVAL;
}
void ah_destroy(struct xfrm_state *x)
{
struct ah_data *ahp = x->data;
if (ahp->work_digest) {
kfree(ahp->work_digest);
ahp->work_digest = NULL;
}
if (ahp->tfm) {
crypto_free_tfm(ahp->tfm);
ahp->tfm = NULL;
}
}
static struct xfrm_type ah_type =
{
.description = "AH4",
.proto = IPPROTO_AH,
.init_state = ah_init_state,
.destructor = ah_destroy,
.input = ah_input,
.output = ah_output
};
static struct inet_protocol ah4_protocol = {
.handler = xfrm4_rcv,
.err_handler = ah4_err,
.no_policy = 1,
};
int __init ah4_init(void)
{
SET_MODULE_OWNER(&ah_type);
if (xfrm_register_type(&ah_type) < 0) {
printk(KERN_INFO "ip ah init: can't add xfrm type\n");
return -EAGAIN;
}
if (inet_add_protocol(&ah4_protocol, IPPROTO_AH) < 0) {
printk(KERN_INFO "ip ah init: can't add protocol\n");
xfrm_unregister_type(&ah_type);
return -EAGAIN;
}
return 0;
......@@ -314,4 +416,10 @@ static void __exit ah4_fini(void)
{
if (inet_del_protocol(&ah4_protocol, IPPROTO_AH) < 0)
printk(KERN_INFO "ip ah close: can't remove protocol\n");
if (xfrm_unregister_type(&ah_type) < 0)
printk(KERN_INFO "ip ah close: can't remove xfrm type\n");
}
module_init(ah4_init);
module_exit(ah4_fini);
MODULE_LICENSE("GPL");
This diff is collapsed.
......@@ -1026,7 +1026,8 @@ static int fib_seq_show(struct seq_file *seq, void *v)
"%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
fi->fib_dev ? fi->fib_dev->name : "*", prefix,
fi->fib_nh->nh_gw, flags, 0, 0, fi->fib_priority,
mask, fi->fib_advmss + 40, fi->fib_window,
mask, (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
fi->fib_window,
fi->fib_rtt >> 3);
else
snprintf(bf, sizeof(bf),
......
......@@ -60,7 +60,7 @@ int ip_forward(struct sk_buff *skb)
struct rtable *rt; /* Route we use */
struct ip_options * opt = &(IPCB(skb)->opt);
if (!xfrm_policy_check(XFRM_POLICY_FWD, skb))
if (!xfrm_policy_check(NULL, XFRM_POLICY_FWD, skb))
goto drop;
if (IPCB(skb)->opt.router_alert && ip_call_ra_chain(skb))
......
......@@ -523,11 +523,11 @@ void ipgre_err(struct sk_buff *skb, u32 info)
/* change mtu on this route */
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
if (rel_info > skb2->dst->pmtu) {
if (rel_info > dst_pmtu(skb2->dst)) {
kfree_skb(skb2);
return;
}
skb2->dst->pmtu = rel_info;
skb2->dst->ops->update_pmtu(skb2->dst, rel_info);
rel_info = htonl(rel_info);
} else if (type == ICMP_TIME_EXCEEDED) {
struct ip_tunnel *t = (struct ip_tunnel*)skb2->dev->priv;
......
......@@ -223,15 +223,6 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb)
struct inet_protocol *ipprot;
resubmit:
/* Fuck... This IS ugly. */
if (protocol != IPPROTO_AH &&
protocol != IPPROTO_ESP &&
!xfrm_policy_check(XFRM_POLICY_IN, skb)) {
kfree_skb(skb);
return 0;
}
hash = protocol & (MAX_INET_PROTOS - 1);
raw_sk = raw_v4_htable[hash];
......@@ -242,7 +233,14 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb)
raw_v4_input(skb, skb->nh.iph, hash);
if ((ipprot = inet_protos[hash]) != NULL) {
int ret = ipprot->handler(skb);
int ret;
if (!ipprot->no_policy &&
!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb)) {
kfree_skb(skb);
return 0;
}
ret = ipprot->handler(skb);
if (ret < 0) {
protocol = -ret;
goto resubmit;
......@@ -250,9 +248,11 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb)
IP_INC_STATS_BH(IpInDelivers);
} else {
if (!raw_sk) {
IP_INC_STATS_BH(IpInUnknownProtos);
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_PROT_UNREACH, 0);
if (xfrm_policy_check(NULL, XFRM_POLICY_IN, skb)) {
IP_INC_STATS_BH(IpInUnknownProtos);
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_PROT_UNREACH, 0);
}
} else
IP_INC_STATS_BH(IpInDelivers);
kfree_skb(skb);
......
......@@ -321,7 +321,7 @@ int ip_queue_xmit(struct sk_buff *skb)
* keep trying until route appears or the connection times
* itself out.
*/
if (ip_route_output_key(&rt, &fl))
if (ip_route_output_flow(&rt, &fl, sk, 0))
goto no_route;
}
__sk_dst_set(sk, &rt->u.dst);
......@@ -1220,6 +1220,10 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
{ .daddr = daddr,
.saddr = rt->rt_spec_dst,
.tos = RT_TOS(skb->nh.iph->tos) } },
/* Not quite clean, but right. */
.uli_u = { .ports =
{ .sport = skb->h.th->dest,
.dport = skb->h.th->source } },
.proto = sk->protocol };
if (ip_route_output_key(&rt, &fl))
return;
......
......@@ -36,6 +36,7 @@
#include <linux/route.h>
#include <linux/mroute.h>
#include <net/route.h>
#include <net/xfrm.h>
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <net/transp_v6.h>
#endif
......@@ -624,6 +625,10 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int opt
inet->freebind = !!val;
break;
case IP_IPSEC_POLICY:
err = xfrm_user_policy(sk, optname, optval, optlen);
break;
default:
#ifdef CONFIG_NETFILTER
err = nf_setsockopt(sk, PF_INET, optname, optval,
......
......@@ -452,11 +452,11 @@ void ipip_err(struct sk_buff *skb, u32 info)
/* change mtu on this route */
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
if (rel_info > skb2->dst->pmtu) {
if (rel_info > dst_pmtu(skb2->dst)) {
kfree_skb(skb2);
return;
}
skb2->dst->pmtu = rel_info;
skb2->dst->ops->update_pmtu(skb2->dst, rel_info);
rel_info = htonl(rel_info);
} else if (type == ICMP_TIME_EXCEEDED) {
struct ip_tunnel *t = (struct ip_tunnel*)skb2->dev->priv;
......
......@@ -1111,7 +1111,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
{
struct dst_entry *dst = skb->dst;
if (skb->len <= dst->pmtu)
if (skb->len <= dst_pmtu(dst))
return dst->output(skb);
else
return ip_fragment(skb, dst->output);
......@@ -1167,7 +1167,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c,
dev = rt->u.dst.dev;
if (skb->len+encap > rt->u.dst.pmtu && (ntohs(iph->frag_off) & IP_DF)) {
if (skb->len+encap > dst_pmtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
/* Do not fragment multicasts. Alas, IPv4 does not
allow to send ICMP, so that packets will disappear
to blackhole.
......
......@@ -85,14 +85,14 @@ ipt_tcpmss_target(struct sk_buff **pskb,
return NF_DROP; /* or IPT_CONTINUE ?? */
}
if((*pskb)->dst->pmtu <= (sizeof(struct iphdr) + sizeof(struct tcphdr))) {
if(dst_pmtu((*pskb)->dst) <= (sizeof(struct iphdr) + sizeof(struct tcphdr))) {
if (net_ratelimit())
printk(KERN_ERR
"ipt_tcpmss_target: unknown or invalid path-MTU (%d)\n", (*pskb)->dst->pmtu);
"ipt_tcpmss_target: unknown or invalid path-MTU (%d)\n", dst_pmtu((*pskb)->dst));
return NF_DROP; /* or IPT_CONTINUE ?? */
}
newmss = (*pskb)->dst->pmtu - sizeof(struct iphdr) - sizeof(struct tcphdr);
newmss = dst_pmtu((*pskb)->dst->pmtu) - sizeof(struct iphdr) - sizeof(struct tcphdr);
} else
newmss = tcpmssinfo->mss;
......
......@@ -38,8 +38,8 @@ match(const struct sk_buff *skb,
& ((const unsigned long *)info->in_mask)[i];
}
if ((ret != 0) ^ !(info->invert & IPT_PHYSDEV_OP_MATCH_IN))
return 1;
if ((ret == 0) ^ !(info->invert & IPT_PHYSDEV_OP_MATCH_IN))
return 0;
for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
ret |= (((const unsigned long *)outdev)[i]
......
......@@ -64,6 +64,7 @@
#include <net/raw.h>
#include <net/inet_common.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <linux/netfilter_ipv4.h>
struct sock *raw_v4_htable[RAWV4_HTABLE_SIZE];
......@@ -237,6 +238,11 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
int raw_rcv(struct sock *sk, struct sk_buff *skb)
{
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb)) {
kfree_skb(skb);
return NET_RX_DROP;
}
skb_push(skb, skb->data - skb->nh.raw);
raw_rcv_skb(sk, skb);
......@@ -425,7 +431,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
.saddr = saddr,
.tos = tos } },
.proto = IPPROTO_RAW };
err = ip_route_output_key(&rt, &fl);
err = ip_route_output_flow(&rt, &fl, sk, msg->msg_flags&MSG_DONTWAIT);
}
if (err)
goto done;
......
......@@ -2034,6 +2034,15 @@ int ip_route_output_key(struct rtable **rp, struct flowi *flp)
return flp->proto ? xfrm_lookup((struct dst_entry**)rp, flp, NULL, 0) : 0;
}
int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk, int flags)
{
int err;
if ((err = __ip_route_output_key(rp, flp)) != 0)
return err;
return flp->proto ? xfrm_lookup((struct dst_entry**)rp, flp, sk, flags) : 0;
}
static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
int nowait)
{
......
......@@ -3454,10 +3454,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
__set_current_state(TASK_RUNNING);
if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) {
__skb_pull(skb, tcp_header_len);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
NET_INC_STATS_BH(TCPHPHitsToUser);
eaten = 1;
/* Predicted packet is in window by definition.
* seq == rcv_nxt and rcv_wup <= rcv_nxt.
* Hence, check seq<=rcv_wup reduces to:
......@@ -3467,6 +3463,11 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
TCPOLEN_TSTAMP_ALIGNED) &&
tp->rcv_nxt == tp->rcv_wup)
tcp_store_ts_recent(tp);
__skb_pull(skb, tcp_header_len);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
NET_INC_STATS_BH(TCPHPHitsToUser);
eaten = 1;
}
}
if (!eaten) {
......
......@@ -64,11 +64,11 @@
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/inet_common.h>
#include <net/xfrm.h>
#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/stddef.h>
#include <linux/ipsec.h>
extern int sysctl_ip_dynaddr;
extern int sysctl_ip_default_ttl;
......@@ -1299,7 +1299,7 @@ static struct dst_entry* tcp_v4_route_req(struct sock *sk,
{ .sport = inet_sk(sk)->sport,
.dport = req->rmt_port } } };
if (ip_route_output_key(&rt, &fl)) {
if (ip_route_output_flow(&rt, &fl, sk, 0)) {
IP_INC_STATS_BH(IpOutNoRoutes);
return NULL;
}
......@@ -1796,12 +1796,12 @@ int tcp_v4_rcv(struct sk_buff *skb)
goto no_tcp_socket;
process:
if (!ipsec_sk_policy(sk, skb))
goto discard_and_relse;
if (sk->state == TCP_TIME_WAIT)
goto do_time_wait;
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
skb->dev = NULL;
bh_lock_sock(sk);
......@@ -1818,6 +1818,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
return ret;
no_tcp_socket:
if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
bad_packet:
TCP_INC_STATS_BH(TcpInErrs);
......@@ -1835,6 +1838,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
goto discard_it;
do_time_wait:
if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_and_relse;
if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
TCP_INC_STATS_BH(TcpInErrs);
goto discard_and_relse;
......@@ -1950,7 +1956,7 @@ int tcp_v4_rebuild_header(struct sock *sk)
{ .sport = inet->sport,
.dport = inet->dport } } };
err = ip_route_output_key(&rt, &fl);
err = ip_route_output_flow(&rt, &fl, sk, 0);
}
if (!err) {
__sk_dst_set(sk, &rt->u.dst);
......
......@@ -104,6 +104,7 @@
#include <net/route.h>
#include <net/inet_common.h>
#include <net/checksum.h>
#include <net/xfrm.h>
/*
* Snmp MIB for the UDP layer
......@@ -600,7 +601,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
.uli_u = { .ports =
{ .sport = inet->sport,
.dport = dport } } };
err = ip_route_output_key(&rt, &fl);
err = ip_route_output_flow(&rt, &fl, sk, msg->msg_flags&MSG_DONTWAIT);
if (err)
goto out;
......@@ -943,6 +944,10 @@ static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
/*
* Charge it to the socket, dropping if the queue is full.
*/
if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb)) {
kfree_skb(skb);
return -1;
}
#if defined(CONFIG_FILTER)
if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
......@@ -1070,6 +1075,9 @@ int udp_rcv(struct sk_buff *skb)
return 0;
}
if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
/* No socket. Drop packet silently, if checksum is wrong */
if (udp_checksum_complete(skb))
goto csum_error;
......@@ -1110,6 +1118,7 @@ int udp_rcv(struct sk_buff *skb)
NIPQUAD(daddr),
ntohs(uh->dest),
ulen));
drop:
UDP_INC_STATS_BH(UdpInErrors);
kfree_skb(skb);
return(0);
......
......@@ -123,8 +123,10 @@ int xfrm4_rcv(struct sk_buff *skb)
skb->sp->len += xfrm_nr;
if (decaps) {
dst_release(skb->dst);
skb->dst = NULL;
if (!(skb->dev->flags&IFF_LOOPBACK)) {
dst_release(skb->dst);
skb->dst = NULL;
}
netif_rx(skb);
return 0;
} else {
......
This diff is collapsed.
This diff is collapsed.
......@@ -107,7 +107,8 @@ struct rt6_info ip6_null_entry = {
.error = -ENETUNREACH,
.input = ip6_pkt_discard,
.output = ip6_pkt_discard,
.ops = &ip6_dst_ops
.ops = &ip6_dst_ops,
.path = (struct dst_entry*)&ip6_null_entry,
}
},
.rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
......@@ -1252,6 +1253,9 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
*/
idev = __in6_dev_get(arg->dev);
if (idev == NULL)
return 0;
/* For administrative MTU increase, there is no way to discover
IPv6 PMTU increase, so PMTU increase should be updated here.
Since RFC 1981 doesn't include administrative MTU increase
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment