Commit 3a747d70 authored by David S. Miller's avatar David S. Miller

Merge nuts.davemloft.net:/disk1/BK/net-2.6.11-work

into nuts.davemloft.net:/disk1/BK/net-2.6.11
parents 37e19a48 dcc994b3
......@@ -411,7 +411,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
# CONFIG_IP_NF_TARGET_REDIRECT is not set
# CONFIG_IP_NF_TARGET_NETMAP is not set
# CONFIG_IP_NF_TARGET_SAME is not set
# CONFIG_IP_NF_NAT_LOCAL is not set
# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -239,7 +239,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=y
CONFIG_IP_NF_TARGET_REDIRECT=y
CONFIG_IP_NF_TARGET_NETMAP=y
CONFIG_IP_NF_TARGET_SAME=y
# CONFIG_IP_NF_NAT_LOCAL is not set
# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
CONFIG_IP_NF_NAT_IRC=y
CONFIG_IP_NF_NAT_FTP=y
......
......@@ -399,7 +399,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
# CONFIG_IP_NF_TARGET_NETMAP is not set
# CONFIG_IP_NF_TARGET_SAME is not set
CONFIG_IP_NF_NAT_LOCAL=y
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -534,7 +534,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=y
CONFIG_IP_NF_TARGET_REDIRECT=y
CONFIG_IP_NF_TARGET_NETMAP=y
CONFIG_IP_NF_TARGET_SAME=y
# CONFIG_IP_NF_NAT_LOCAL is not set
# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
CONFIG_IP_NF_MANGLE=y
CONFIG_IP_NF_TARGET_TOS=y
......
......@@ -375,7 +375,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
CONFIG_IP_NF_NAT_LOCAL=y
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -301,7 +301,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
CONFIG_IP_NF_NAT_LOCAL=y
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -330,7 +330,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
CONFIG_IP_NF_NAT_LOCAL=y
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -301,7 +301,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
CONFIG_IP_NF_NAT_LOCAL=y
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -302,7 +302,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
CONFIG_IP_NF_NAT_LOCAL=y
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -337,7 +337,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
CONFIG_IP_NF_NAT_LOCAL=y
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -302,7 +302,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
CONFIG_IP_NF_NAT_LOCAL=y
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -301,7 +301,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
CONFIG_IP_NF_NAT_LOCAL=y
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -355,7 +355,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
CONFIG_IP_NF_NAT_LOCAL=y
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -290,7 +290,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
CONFIG_IP_NF_NAT_LOCAL=y
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -301,7 +301,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
CONFIG_IP_NF_NAT_LOCAL=y
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -342,7 +342,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
CONFIG_IP_NF_NAT_LOCAL=y
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -437,7 +437,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
CONFIG_IP_NF_NAT_LOCAL=y
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -342,7 +342,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
CONFIG_IP_NF_NAT_LOCAL=y
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -338,7 +338,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
# CONFIG_IP_NF_NAT_LOCAL is not set
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -396,7 +396,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
# CONFIG_IP_NF_NAT_LOCAL is not set
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -331,7 +331,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
# CONFIG_IP_NF_NAT_LOCAL is not set
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -302,7 +302,6 @@ CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_NAT_NEEDED=y
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
# CONFIG_IP_NF_NAT_LOCAL is not set
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -354,7 +354,6 @@ CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_NAT_NEEDED=y
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
# CONFIG_IP_NF_NAT_LOCAL is not set
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -457,7 +457,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
# CONFIG_IP_NF_NAT_LOCAL is not set
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -367,7 +367,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
# CONFIG_IP_NF_NAT_LOCAL is not set
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -319,7 +319,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
# CONFIG_IP_NF_TARGET_NETMAP is not set
# CONFIG_IP_NF_TARGET_SAME is not set
# CONFIG_IP_NF_NAT_LOCAL is not set
# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
CONFIG_IP_NF_NAT_FTP=m
# CONFIG_IP_NF_MANGLE is not set
......
......@@ -249,7 +249,6 @@ CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_NAT_NEEDED=y
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
# CONFIG_IP_NF_NAT_LOCAL is not set
# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -332,7 +332,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
# CONFIG_IP_NF_TARGET_NETMAP is not set
# CONFIG_IP_NF_TARGET_SAME is not set
# CONFIG_IP_NF_NAT_LOCAL is not set
# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -479,7 +479,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
# CONFIG_IP_NF_NAT_LOCAL is not set
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -343,7 +343,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
# CONFIG_IP_NF_TARGET_NETMAP is not set
# CONFIG_IP_NF_TARGET_SAME is not set
# CONFIG_IP_NF_NAT_LOCAL is not set
# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
CONFIG_IP_NF_NAT_FTP=m
# CONFIG_IP_NF_MANGLE is not set
......
......@@ -464,7 +464,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
# CONFIG_IP_NF_NAT_LOCAL is not set
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -440,7 +440,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=y
CONFIG_IP_NF_TARGET_REDIRECT=y
CONFIG_IP_NF_TARGET_NETMAP=y
CONFIG_IP_NF_TARGET_SAME=y
# CONFIG_IP_NF_NAT_LOCAL is not set
# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
CONFIG_IP_NF_MANGLE=y
CONFIG_IP_NF_TARGET_TOS=y
......
......@@ -316,7 +316,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
# CONFIG_IP_NF_NAT_LOCAL is not set
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -408,7 +408,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
# CONFIG_IP_NF_NAT_LOCAL is not set
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -374,7 +374,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
# CONFIG_IP_NF_NAT_LOCAL is not set
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -601,7 +601,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_SAME=m
CONFIG_IP_NF_NAT_LOCAL=y
CONFIG_IP_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_NAT_IRC=m
CONFIG_IP_NF_NAT_FTP=m
......
......@@ -274,5 +274,6 @@ config CRYPTO_TEST
help
Quick & dirty crypto test module.
source "drivers/crypto/Kconfig"
endmenu
......@@ -48,7 +48,7 @@ static inline u64 RORu64(u64 x, u64 y)
return (x >> y) | (x << (64 - y));
}
const u64 sha512_K[80] = {
static const u64 sha512_K[80] = {
0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL,
0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL,
......
......@@ -255,7 +255,7 @@ test_hmac(char *algo, struct hmac_testvec * template, unsigned int tcount)
#endif /* CONFIG_CRYPTO_HMAC */
void
static void
test_cipher(char * algo, int mode, int enc, struct cipher_testvec * template, unsigned int tcount)
{
unsigned int ret, i, j, k, temp;
......
......@@ -63,7 +63,7 @@ struct cipher_testvec {
*/
#define MD4_TEST_VECTORS 7
struct hash_testvec md4_tv_template [] = {
static struct hash_testvec md4_tv_template [] = {
{
.plaintext = "",
.digest = { 0x31, 0xd6, 0xcf, 0xe0, 0xd1, 0x6a, 0xe9, 0x31,
......@@ -109,7 +109,7 @@ struct hash_testvec md4_tv_template [] = {
*/
#define MD5_TEST_VECTORS 7
struct hash_testvec md5_tv_template[] = {
static struct hash_testvec md5_tv_template[] = {
{
.digest = { 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e },
......@@ -154,7 +154,7 @@ struct hash_testvec md5_tv_template[] = {
*/
#define SHA1_TEST_VECTORS 2
struct hash_testvec sha1_tv_template[] = {
static struct hash_testvec sha1_tv_template[] = {
{
.plaintext = "abc",
.psize = 3,
......@@ -175,7 +175,7 @@ struct hash_testvec sha1_tv_template[] = {
*/
#define SHA256_TEST_VECTORS 2
struct hash_testvec sha256_tv_template[] = {
static struct hash_testvec sha256_tv_template[] = {
{
.plaintext = "abc",
.psize = 3,
......@@ -200,7 +200,7 @@ struct hash_testvec sha256_tv_template[] = {
*/
#define SHA384_TEST_VECTORS 4
struct hash_testvec sha384_tv_template[] = {
static struct hash_testvec sha384_tv_template[] = {
{
.plaintext= "abc",
.psize = 3,
......@@ -249,7 +249,7 @@ struct hash_testvec sha384_tv_template[] = {
*/
#define SHA512_TEST_VECTORS 4
struct hash_testvec sha512_tv_template[] = {
static struct hash_testvec sha512_tv_template[] = {
{
.plaintext = "abc",
.psize = 3,
......@@ -309,7 +309,7 @@ struct hash_testvec sha512_tv_template[] = {
*/
#define WP512_TEST_VECTORS 8
struct hash_testvec wp512_tv_template[] = {
static struct hash_testvec wp512_tv_template[] = {
{
.plaintext = "",
.psize = 0,
......@@ -407,7 +407,7 @@ struct hash_testvec wp512_tv_template[] = {
#define WP384_TEST_VECTORS 8
struct hash_testvec wp384_tv_template[] = {
static struct hash_testvec wp384_tv_template[] = {
{
.plaintext = "",
.psize = 0,
......@@ -489,7 +489,7 @@ struct hash_testvec wp384_tv_template[] = {
#define WP256_TEST_VECTORS 8
struct hash_testvec wp256_tv_template[] = {
static struct hash_testvec wp256_tv_template[] = {
{
.plaintext = "",
.psize = 0,
......@@ -561,7 +561,7 @@ struct hash_testvec wp256_tv_template[] = {
*/
#define HMAC_MD5_TEST_VECTORS 7
struct hmac_testvec hmac_md5_tv_template[] =
static struct hmac_testvec hmac_md5_tv_template[] =
{
{
.key = { [0 ... 15] = 0x0b },
......@@ -625,7 +625,7 @@ struct hmac_testvec hmac_md5_tv_template[] =
*/
#define HMAC_SHA1_TEST_VECTORS 7
struct hmac_testvec hmac_sha1_tv_template[] = {
static struct hmac_testvec hmac_sha1_tv_template[] = {
{
.key = { [0 ... 19] = 0x0b },
.ksize = 20,
......@@ -690,7 +690,7 @@ struct hmac_testvec hmac_sha1_tv_template[] = {
*/
#define HMAC_SHA256_TEST_VECTORS 10
struct hmac_testvec hmac_sha256_tv_template[] = {
static struct hmac_testvec hmac_sha256_tv_template[] = {
{
.key = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
......@@ -813,7 +813,7 @@ struct hmac_testvec hmac_sha256_tv_template[] = {
#define DES3_EDE_ENC_TEST_VECTORS 3
#define DES3_EDE_DEC_TEST_VECTORS 3
struct cipher_testvec des_enc_tv_template[] = {
static struct cipher_testvec des_enc_tv_template[] = {
{ /* From Applied Cryptography */
.key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef },
.klen = 8,
......@@ -917,7 +917,7 @@ struct cipher_testvec des_enc_tv_template[] = {
},
};
struct cipher_testvec des_dec_tv_template[] = {
static struct cipher_testvec des_dec_tv_template[] = {
{ /* From Applied Cryptography */
.key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef },
.klen = 8,
......@@ -957,7 +957,7 @@ struct cipher_testvec des_dec_tv_template[] = {
},
};
struct cipher_testvec des_cbc_enc_tv_template[] = {
static struct cipher_testvec des_cbc_enc_tv_template[] = {
{ /* From OpenSSL */
.key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef},
.klen = 8,
......@@ -1012,7 +1012,7 @@ struct cipher_testvec des_cbc_enc_tv_template[] = {
},
};
struct cipher_testvec des_cbc_dec_tv_template[] = {
static struct cipher_testvec des_cbc_dec_tv_template[] = {
{ /* FIPS Pub 81 */
.key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef },
.klen = 8,
......@@ -1053,7 +1053,7 @@ struct cipher_testvec des_cbc_dec_tv_template[] = {
/*
* We really need some more test vectors, especially for DES3 CBC.
*/
struct cipher_testvec des3_ede_enc_tv_template[] = {
static struct cipher_testvec des3_ede_enc_tv_template[] = {
{ /* These are from openssl */
.key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55,
......@@ -1084,7 +1084,7 @@ struct cipher_testvec des3_ede_enc_tv_template[] = {
},
};
struct cipher_testvec des3_ede_dec_tv_template[] = {
static struct cipher_testvec des3_ede_dec_tv_template[] = {
{ /* These are from openssl */
.key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55,
......@@ -1123,7 +1123,7 @@ struct cipher_testvec des3_ede_dec_tv_template[] = {
#define BF_CBC_ENC_TEST_VECTORS 1
#define BF_CBC_DEC_TEST_VECTORS 1
struct cipher_testvec bf_enc_tv_template[] = {
static struct cipher_testvec bf_enc_tv_template[] = {
{ /* DES test vectors from OpenSSL */
.key = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, },
.klen = 8,
......@@ -1178,7 +1178,7 @@ struct cipher_testvec bf_enc_tv_template[] = {
},
};
struct cipher_testvec bf_dec_tv_template[] = {
static struct cipher_testvec bf_dec_tv_template[] = {
{ /* DES test vectors from OpenSSL */
.key = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.klen = 8,
......@@ -1233,7 +1233,7 @@ struct cipher_testvec bf_dec_tv_template[] = {
},
};
struct cipher_testvec bf_cbc_enc_tv_template[] = {
static struct cipher_testvec bf_cbc_enc_tv_template[] = {
{ /* From OpenSSL */
.key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87 },
......@@ -1252,7 +1252,7 @@ struct cipher_testvec bf_cbc_enc_tv_template[] = {
},
};
struct cipher_testvec bf_cbc_dec_tv_template[] = {
static struct cipher_testvec bf_cbc_dec_tv_template[] = {
{ /* From OpenSSL */
.key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87 },
......@@ -1279,7 +1279,7 @@ struct cipher_testvec bf_cbc_dec_tv_template[] = {
#define TF_CBC_ENC_TEST_VECTORS 4
#define TF_CBC_DEC_TEST_VECTORS 4
struct cipher_testvec tf_enc_tv_template[] = {
static struct cipher_testvec tf_enc_tv_template[] = {
{
.key = { [0 ... 15] = 0x00 },
.klen = 16,
......@@ -1312,7 +1312,7 @@ struct cipher_testvec tf_enc_tv_template[] = {
},
};
struct cipher_testvec tf_dec_tv_template[] = {
static struct cipher_testvec tf_dec_tv_template[] = {
{
.key = { [0 ... 15] = 0x00 },
.klen = 16,
......@@ -1345,7 +1345,7 @@ struct cipher_testvec tf_dec_tv_template[] = {
},
};
struct cipher_testvec tf_cbc_enc_tv_template[] = {
static struct cipher_testvec tf_cbc_enc_tv_template[] = {
{ /* Generated with Nettle */
.key = { [0 ... 15] = 0x00 },
.klen = 16,
......@@ -1391,7 +1391,7 @@ struct cipher_testvec tf_cbc_enc_tv_template[] = {
},
};
struct cipher_testvec tf_cbc_dec_tv_template[] = {
static struct cipher_testvec tf_cbc_dec_tv_template[] = {
{ /* Reverse of the first four above */
.key = { [0 ... 15] = 0x00 },
.klen = 16,
......@@ -1447,7 +1447,7 @@ struct cipher_testvec tf_cbc_dec_tv_template[] = {
#define TNEPRES_ENC_TEST_VECTORS 4
#define TNEPRES_DEC_TEST_VECTORS 4
struct cipher_testvec serpent_enc_tv_template[] =
static struct cipher_testvec serpent_enc_tv_template[] =
{
{
.input = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
......@@ -1489,7 +1489,7 @@ struct cipher_testvec serpent_enc_tv_template[] =
},
};
struct cipher_testvec tnepres_enc_tv_template[] =
static struct cipher_testvec tnepres_enc_tv_template[] =
{
{ /* KeySize=128, PT=0, I=1 */
.input = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
......@@ -1540,7 +1540,7 @@ struct cipher_testvec tnepres_enc_tv_template[] =
};
struct cipher_testvec serpent_dec_tv_template[] =
static struct cipher_testvec serpent_dec_tv_template[] =
{
{
.input = { 0x12, 0x07, 0xfc, 0xce, 0x9b, 0xd0, 0xd6, 0x47,
......@@ -1582,7 +1582,7 @@ struct cipher_testvec serpent_dec_tv_template[] =
},
};
struct cipher_testvec tnepres_dec_tv_template[] =
static struct cipher_testvec tnepres_dec_tv_template[] =
{
{
.input = { 0x41, 0xcc, 0x6b, 0x31, 0x59, 0x31, 0x45, 0x97,
......@@ -1629,7 +1629,7 @@ struct cipher_testvec tnepres_dec_tv_template[] =
#define CAST6_ENC_TEST_VECTORS 3
#define CAST6_DEC_TEST_VECTORS 3
struct cipher_testvec cast6_enc_tv_template[] =
static struct cipher_testvec cast6_enc_tv_template[] =
{
{
.key = { 0x23, 0x42, 0xbb, 0x9e, 0xfa, 0x38, 0x54, 0x2c,
......@@ -1664,7 +1664,7 @@ struct cipher_testvec cast6_enc_tv_template[] =
},
};
struct cipher_testvec cast6_dec_tv_template[] =
static struct cipher_testvec cast6_dec_tv_template[] =
{
{
.key = { 0x23, 0x42, 0xbb, 0x9e, 0xfa, 0x38, 0x54, 0x2c,
......@@ -1706,7 +1706,7 @@ struct cipher_testvec cast6_dec_tv_template[] =
#define AES_ENC_TEST_VECTORS 3
#define AES_DEC_TEST_VECTORS 3
struct cipher_testvec aes_enc_tv_template[] = {
static struct cipher_testvec aes_enc_tv_template[] = {
{ /* From FIPS-197 */
.key = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
......@@ -1743,7 +1743,7 @@ struct cipher_testvec aes_enc_tv_template[] = {
},
};
struct cipher_testvec aes_dec_tv_template[] = {
static struct cipher_testvec aes_dec_tv_template[] = {
{ /* From FIPS-197 */
.key = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
......@@ -1784,7 +1784,7 @@ struct cipher_testvec aes_dec_tv_template[] = {
#define CAST5_ENC_TEST_VECTORS 3
#define CAST5_DEC_TEST_VECTORS 3
struct cipher_testvec cast5_enc_tv_template[] =
static struct cipher_testvec cast5_enc_tv_template[] =
{
{
.key = { 0x01, 0x23, 0x45, 0x67, 0x12, 0x34, 0x56, 0x78,
......@@ -1812,7 +1812,7 @@ struct cipher_testvec cast5_enc_tv_template[] =
},
};
struct cipher_testvec cast5_dec_tv_template[] =
static struct cipher_testvec cast5_dec_tv_template[] =
{
{
.key = { 0x01, 0x23, 0x45, 0x67, 0x12, 0x34, 0x56, 0x78,
......@@ -1846,7 +1846,7 @@ struct cipher_testvec cast5_dec_tv_template[] =
#define ARC4_ENC_TEST_VECTORS 7
#define ARC4_DEC_TEST_VECTORS 7
struct cipher_testvec arc4_enc_tv_template[] =
static struct cipher_testvec arc4_enc_tv_template[] =
{
{
.key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef },
......@@ -1913,7 +1913,7 @@ struct cipher_testvec arc4_enc_tv_template[] =
},
};
struct cipher_testvec arc4_dec_tv_template[] =
static struct cipher_testvec arc4_dec_tv_template[] =
{
{
.key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef },
......@@ -1986,7 +1986,7 @@ struct cipher_testvec arc4_dec_tv_template[] =
#define TEA_ENC_TEST_VECTORS 4
#define TEA_DEC_TEST_VECTORS 4
struct cipher_testvec tea_enc_tv_template[] =
static struct cipher_testvec xtea_enc_tv_template[] =
{
{
.key = { [0 ... 15] = 0x00 },
......@@ -2030,7 +2030,7 @@ struct cipher_testvec tea_enc_tv_template[] =
}
};
struct cipher_testvec tea_dec_tv_template[] =
static struct cipher_testvec tea_dec_tv_template[] =
{
{
.key = { [0 ... 15] = 0x00 },
......@@ -2080,7 +2080,7 @@ struct cipher_testvec tea_dec_tv_template[] =
#define XTEA_ENC_TEST_VECTORS 4
#define XTEA_DEC_TEST_VECTORS 4
struct cipher_testvec xtea_enc_tv_template[] =
static struct cipher_testvec tea_enc_tv_template[] =
{
{
.key = { [0 ... 15] = 0x00 },
......@@ -2124,7 +2124,7 @@ struct cipher_testvec xtea_enc_tv_template[] =
}
};
struct cipher_testvec xtea_dec_tv_template[] =
static struct cipher_testvec xtea_dec_tv_template[] =
{
{
.key = { [0 ... 15] = 0x00 },
......@@ -2174,7 +2174,7 @@ struct cipher_testvec xtea_dec_tv_template[] =
#define KHAZAD_ENC_TEST_VECTORS 5
#define KHAZAD_DEC_TEST_VECTORS 5
struct cipher_testvec khazad_enc_tv_template[] = {
static struct cipher_testvec khazad_enc_tv_template[] = {
{
.key = { 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
......@@ -2220,7 +2220,7 @@ struct cipher_testvec khazad_enc_tv_template[] = {
},
};
struct cipher_testvec khazad_dec_tv_template[] = {
static struct cipher_testvec khazad_dec_tv_template[] = {
{
.key = { 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
......@@ -2275,7 +2275,7 @@ struct cipher_testvec khazad_dec_tv_template[] = {
#define ANUBIS_CBC_ENC_TEST_VECTORS 2
#define ANUBIS_CBC_DEC_TEST_VECTORS 2
struct cipher_testvec anubis_enc_tv_template[] = {
static struct cipher_testvec anubis_enc_tv_template[] = {
{
.key = { 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe,
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe },
......@@ -2338,7 +2338,7 @@ struct cipher_testvec anubis_enc_tv_template[] = {
},
};
struct cipher_testvec anubis_dec_tv_template[] = {
static struct cipher_testvec anubis_dec_tv_template[] = {
{
.key = { 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe,
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe },
......@@ -2401,7 +2401,7 @@ struct cipher_testvec anubis_dec_tv_template[] = {
},
};
struct cipher_testvec anubis_cbc_enc_tv_template[] = {
static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
{
.key = { 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe,
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe },
......@@ -2436,7 +2436,7 @@ struct cipher_testvec anubis_cbc_enc_tv_template[] = {
},
};
struct cipher_testvec anubis_cbc_dec_tv_template[] = {
static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
{
.key = { 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe,
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe },
......@@ -2489,7 +2489,7 @@ struct comp_testvec {
#define DEFLATE_COMP_TEST_VECTORS 2
#define DEFLATE_DECOMP_TEST_VECTORS 2
struct comp_testvec deflate_comp_tv_template[] = {
static struct comp_testvec deflate_comp_tv_template[] = {
{
.inlen = 70,
.outlen = 38,
......@@ -2525,7 +2525,7 @@ struct comp_testvec deflate_comp_tv_template[] = {
},
};
struct comp_testvec deflate_decomp_tv_template[] = {
static struct comp_testvec deflate_decomp_tv_template[] = {
{
.inlen = 122,
.outlen = 191,
......@@ -2566,7 +2566,7 @@ struct comp_testvec deflate_decomp_tv_template[] = {
*/
#define MICHAEL_MIC_TEST_VECTORS 6
struct hash_testvec michael_mic_tv_template[] =
static struct hash_testvec michael_mic_tv_template[] =
{
{
.key = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
......
......@@ -60,3 +60,4 @@ obj-$(CONFIG_EISA) += eisa/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_MMC) += mmc/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
menu "Hardware crypto devices"
config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE"
depends on CRYPTO && X86 && !X86_64
help
Some VIA processors come with an integrated crypto engine
(so called VIA PadLock ACE, Advanced Cryptography Engine)
that provides instructions for very fast {en,de}cryption
with some algorithms.
The instructions are used only when the CPU supports them.
Otherwise software encryption is used. If you are unsure,
say Y.
config CRYPTO_DEV_PADLOCK_AES
bool "Support for AES in VIA PadLock"
depends on CRYPTO_DEV_PADLOCK
default y
help
Use VIA PadLock for AES algorithm.
endmenu
obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o
padlock-objs-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
padlock-objs := padlock-generic.o $(padlock-objs-y)
/*
* Cryptographic API.
*
* Support for VIA PadLock hardware crypto engine.
*
* Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
*
* Key expansion routine taken from crypto/aes.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* ---------------------------------------------------------------------------
* Copyright (c) 2002, Dr Brian Gladman <brg@gladman.me.uk>, Worcester, UK.
* All rights reserved.
*
* LICENSE TERMS
*
* The free distribution and use of this software in both source and binary
* form is allowed (with or without changes) provided that:
*
* 1. distributions of this source code include the above copyright
* notice, this list of conditions and the following disclaimer;
*
* 2. distributions in binary form include the above copyright
* notice, this list of conditions and the following disclaimer
* in the documentation and/or other associated materials;
*
* 3. the copyright holder's name is not used to endorse products
* built using this software without specific written permission.
*
* ALTERNATIVELY, provided that this notice is retained in full, this product
* may be distributed under the terms of the GNU General Public License (GPL),
* in which case the provisions of the GPL apply INSTEAD OF those given above.
*
* DISCLAIMER
*
* This software is provided 'as is' with no explicit or implied warranties
* in respect of its properties, including, but not limited to, correctness
* and/or fitness for purpose.
* ---------------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/crypto.h>
#include <linux/interrupt.h>
#include <asm/byteorder.h>
#include "padlock.h"
#define AES_MIN_KEY_SIZE 16 /* in uint8_t units */
#define AES_MAX_KEY_SIZE 32 /* ditto */
#define AES_BLOCK_SIZE 16 /* ditto */
#define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */
#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t))
static inline int aes_hw_extkey_available (uint8_t key_len);
struct aes_ctx {
uint32_t e_data[AES_EXTENDED_KEY_SIZE+4];
uint32_t d_data[AES_EXTENDED_KEY_SIZE+4];
uint32_t *E;
uint32_t *D;
int key_length;
};
/* ====== Key management routines ====== */
static inline uint32_t
generic_rotr32 (const uint32_t x, const unsigned bits)
{
const unsigned n = bits % 32;
return (x >> n) | (x << (32 - n));
}
static inline uint32_t
generic_rotl32 (const uint32_t x, const unsigned bits)
{
const unsigned n = bits % 32;
return (x << n) | (x >> (32 - n));
}
#define rotl generic_rotl32
#define rotr generic_rotr32
/*
* #define byte(x, nr) ((unsigned char)((x) >> (nr*8)))
*/
static inline uint8_t
byte(const uint32_t x, const unsigned n)
{
return x >> (n << 3);
}
#define uint32_t_in(x) le32_to_cpu(*(const uint32_t *)(x))
#define uint32_t_out(to, from) (*(uint32_t *)(to) = cpu_to_le32(from))
#define E_KEY ctx->E
#define D_KEY ctx->D
static uint8_t pow_tab[256];
static uint8_t log_tab[256];
static uint8_t sbx_tab[256];
static uint8_t isb_tab[256];
static uint32_t rco_tab[10];
static uint32_t ft_tab[4][256];
static uint32_t it_tab[4][256];
static uint32_t fl_tab[4][256];
static uint32_t il_tab[4][256];
static inline uint8_t
f_mult (uint8_t a, uint8_t b)
{
uint8_t aa = log_tab[a], cc = aa + log_tab[b];
return pow_tab[cc + (cc < aa ? 1 : 0)];
}
#define ff_mult(a,b) (a && b ? f_mult(a, b) : 0)
#define f_rn(bo, bi, n, k) \
bo[n] = ft_tab[0][byte(bi[n],0)] ^ \
ft_tab[1][byte(bi[(n + 1) & 3],1)] ^ \
ft_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
ft_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
#define i_rn(bo, bi, n, k) \
bo[n] = it_tab[0][byte(bi[n],0)] ^ \
it_tab[1][byte(bi[(n + 3) & 3],1)] ^ \
it_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
it_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
#define ls_box(x) \
( fl_tab[0][byte(x, 0)] ^ \
fl_tab[1][byte(x, 1)] ^ \
fl_tab[2][byte(x, 2)] ^ \
fl_tab[3][byte(x, 3)] )
#define f_rl(bo, bi, n, k) \
bo[n] = fl_tab[0][byte(bi[n],0)] ^ \
fl_tab[1][byte(bi[(n + 1) & 3],1)] ^ \
fl_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
fl_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
#define i_rl(bo, bi, n, k) \
bo[n] = il_tab[0][byte(bi[n],0)] ^ \
il_tab[1][byte(bi[(n + 3) & 3],1)] ^ \
il_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
il_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
static void
gen_tabs (void)
{
uint32_t i, t;
uint8_t p, q;
/* log and power tables for GF(2**8) finite field with
0x011b as modular polynomial - the simplest prmitive
root is 0x03, used here to generate the tables */
for (i = 0, p = 1; i < 256; ++i) {
pow_tab[i] = (uint8_t) p;
log_tab[p] = (uint8_t) i;
p ^= (p << 1) ^ (p & 0x80 ? 0x01b : 0);
}
log_tab[1] = 0;
for (i = 0, p = 1; i < 10; ++i) {
rco_tab[i] = p;
p = (p << 1) ^ (p & 0x80 ? 0x01b : 0);
}
for (i = 0; i < 256; ++i) {
p = (i ? pow_tab[255 - log_tab[i]] : 0);
q = ((p >> 7) | (p << 1)) ^ ((p >> 6) | (p << 2));
p ^= 0x63 ^ q ^ ((q >> 6) | (q << 2));
sbx_tab[i] = p;
isb_tab[p] = (uint8_t) i;
}
for (i = 0; i < 256; ++i) {
p = sbx_tab[i];
t = p;
fl_tab[0][i] = t;
fl_tab[1][i] = rotl (t, 8);
fl_tab[2][i] = rotl (t, 16);
fl_tab[3][i] = rotl (t, 24);
t = ((uint32_t) ff_mult (2, p)) |
((uint32_t) p << 8) |
((uint32_t) p << 16) | ((uint32_t) ff_mult (3, p) << 24);
ft_tab[0][i] = t;
ft_tab[1][i] = rotl (t, 8);
ft_tab[2][i] = rotl (t, 16);
ft_tab[3][i] = rotl (t, 24);
p = isb_tab[i];
t = p;
il_tab[0][i] = t;
il_tab[1][i] = rotl (t, 8);
il_tab[2][i] = rotl (t, 16);
il_tab[3][i] = rotl (t, 24);
t = ((uint32_t) ff_mult (14, p)) |
((uint32_t) ff_mult (9, p) << 8) |
((uint32_t) ff_mult (13, p) << 16) |
((uint32_t) ff_mult (11, p) << 24);
it_tab[0][i] = t;
it_tab[1][i] = rotl (t, 8);
it_tab[2][i] = rotl (t, 16);
it_tab[3][i] = rotl (t, 24);
}
}
#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
#define imix_col(y,x) \
u = star_x(x); \
v = star_x(u); \
w = star_x(v); \
t = w ^ (x); \
(y) = u ^ v ^ w; \
(y) ^= rotr(u ^ t, 8) ^ \
rotr(v ^ t, 16) ^ \
rotr(t,24)
/* initialise the key schedule from the user supplied key */
#define loop4(i) \
{ t = rotr(t, 8); t = ls_box(t) ^ rco_tab[i]; \
t ^= E_KEY[4 * i]; E_KEY[4 * i + 4] = t; \
t ^= E_KEY[4 * i + 1]; E_KEY[4 * i + 5] = t; \
t ^= E_KEY[4 * i + 2]; E_KEY[4 * i + 6] = t; \
t ^= E_KEY[4 * i + 3]; E_KEY[4 * i + 7] = t; \
}
#define loop6(i) \
{ t = rotr(t, 8); t = ls_box(t) ^ rco_tab[i]; \
t ^= E_KEY[6 * i]; E_KEY[6 * i + 6] = t; \
t ^= E_KEY[6 * i + 1]; E_KEY[6 * i + 7] = t; \
t ^= E_KEY[6 * i + 2]; E_KEY[6 * i + 8] = t; \
t ^= E_KEY[6 * i + 3]; E_KEY[6 * i + 9] = t; \
t ^= E_KEY[6 * i + 4]; E_KEY[6 * i + 10] = t; \
t ^= E_KEY[6 * i + 5]; E_KEY[6 * i + 11] = t; \
}
#define loop8(i) \
{ t = rotr(t, 8); ; t = ls_box(t) ^ rco_tab[i]; \
t ^= E_KEY[8 * i]; E_KEY[8 * i + 8] = t; \
t ^= E_KEY[8 * i + 1]; E_KEY[8 * i + 9] = t; \
t ^= E_KEY[8 * i + 2]; E_KEY[8 * i + 10] = t; \
t ^= E_KEY[8 * i + 3]; E_KEY[8 * i + 11] = t; \
t = E_KEY[8 * i + 4] ^ ls_box(t); \
E_KEY[8 * i + 12] = t; \
t ^= E_KEY[8 * i + 5]; E_KEY[8 * i + 13] = t; \
t ^= E_KEY[8 * i + 6]; E_KEY[8 * i + 14] = t; \
t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \
}
static int
aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags)
{
struct aes_ctx *ctx = ctx_arg;
uint32_t i, t, u, v, w;
uint32_t P[AES_EXTENDED_KEY_SIZE];
uint32_t rounds;
if (key_len != 16 && key_len != 24 && key_len != 32) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
ctx->key_length = key_len;
ctx->E = ctx->e_data;
ctx->D = ctx->d_data;
/* Ensure 16-Bytes alignmentation of keys for VIA PadLock. */
if ((int)(ctx->e_data) & 0x0F)
ctx->E += 4 - (((int)(ctx->e_data) & 0x0F) / sizeof (ctx->e_data[0]));
if ((int)(ctx->d_data) & 0x0F)
ctx->D += 4 - (((int)(ctx->d_data) & 0x0F) / sizeof (ctx->d_data[0]));
E_KEY[0] = uint32_t_in (in_key);
E_KEY[1] = uint32_t_in (in_key + 4);
E_KEY[2] = uint32_t_in (in_key + 8);
E_KEY[3] = uint32_t_in (in_key + 12);
/* Don't generate extended keys if the hardware can do it. */
if (aes_hw_extkey_available(key_len))
return 0;
switch (key_len) {
case 16:
t = E_KEY[3];
for (i = 0; i < 10; ++i)
loop4 (i);
break;
case 24:
E_KEY[4] = uint32_t_in (in_key + 16);
t = E_KEY[5] = uint32_t_in (in_key + 20);
for (i = 0; i < 8; ++i)
loop6 (i);
break;
case 32:
E_KEY[4] = uint32_t_in (in_key + 16);
E_KEY[5] = uint32_t_in (in_key + 20);
E_KEY[6] = uint32_t_in (in_key + 24);
t = E_KEY[7] = uint32_t_in (in_key + 28);
for (i = 0; i < 7; ++i)
loop8 (i);
break;
}
D_KEY[0] = E_KEY[0];
D_KEY[1] = E_KEY[1];
D_KEY[2] = E_KEY[2];
D_KEY[3] = E_KEY[3];
for (i = 4; i < key_len + 24; ++i) {
imix_col (D_KEY[i], E_KEY[i]);
}
/* PadLock needs a different format of the decryption key. */
rounds = 10 + (key_len - 16) / 4;
for (i = 0; i < rounds; i++) {
P[((i + 1) * 4) + 0] = D_KEY[((rounds - i - 1) * 4) + 0];
P[((i + 1) * 4) + 1] = D_KEY[((rounds - i - 1) * 4) + 1];
P[((i + 1) * 4) + 2] = D_KEY[((rounds - i - 1) * 4) + 2];
P[((i + 1) * 4) + 3] = D_KEY[((rounds - i - 1) * 4) + 3];
}
P[0] = E_KEY[(rounds * 4) + 0];
P[1] = E_KEY[(rounds * 4) + 1];
P[2] = E_KEY[(rounds * 4) + 2];
P[3] = E_KEY[(rounds * 4) + 3];
memcpy(D_KEY, P, AES_EXTENDED_KEY_SIZE_B);
return 0;
}
/* Tells whether the ACE is capable to generate
the extended key for a given key_len. */
static inline int
aes_hw_extkey_available(uint8_t key_len)
{
/* TODO: We should check the actual CPU model/stepping
as it's possible that the capability will be
added in the next CPU revisions. */
if (key_len == 16)
return 1;
return 0;
}
/* ====== Encryption/decryption routines ====== */
/* This is the real call to PadLock. */
static inline void
padlock_xcrypt_ecb(uint8_t *input, uint8_t *output, uint8_t *key,
void *control_word, uint32_t count)
{
asm volatile ("pushfl; popfl"); /* enforce key reload. */
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "=m"(*output), "+S"(input), "+D"(output)
: "d"(control_word), "b"(key), "c"(count));
}
static void
aes_padlock(void *ctx_arg, uint8_t *out_arg, const uint8_t *in_arg, int encdec)
{
/* Don't blindly modify this structure - the items must
fit on 16-Bytes boundaries! */
struct padlock_xcrypt_data {
uint8_t buf[AES_BLOCK_SIZE];
union cword cword;
};
struct aes_ctx *ctx = ctx_arg;
char bigbuf[sizeof(struct padlock_xcrypt_data) + 16];
struct padlock_xcrypt_data *data;
void *key;
/* Place 'data' at the first 16-Bytes aligned address in 'bigbuf'. */
if (((long)bigbuf) & 0x0F)
data = (void*)(bigbuf + 16 - ((long)bigbuf & 0x0F));
else
data = (void*)bigbuf;
/* Prepare Control word. */
memset (data, 0, sizeof(struct padlock_xcrypt_data));
data->cword.b.encdec = !encdec; /* in the rest of cryptoapi ENC=1/DEC=0 */
data->cword.b.rounds = 10 + (ctx->key_length - 16) / 4;
data->cword.b.ksize = (ctx->key_length - 16) / 8;
/* Is the hardware capable to generate the extended key? */
if (!aes_hw_extkey_available(ctx->key_length))
data->cword.b.keygen = 1;
/* ctx->E starts with a plain key - if the hardware is capable
to generate the extended key itself we must supply
the plain key for both Encryption and Decryption. */
if (encdec == CRYPTO_DIR_ENCRYPT || data->cword.b.keygen == 0)
key = ctx->E;
else
key = ctx->D;
memcpy(data->buf, in_arg, AES_BLOCK_SIZE);
padlock_xcrypt_ecb(data->buf, data->buf, key, &data->cword, 1);
memcpy(out_arg, data->buf, AES_BLOCK_SIZE);
}
static void
aes_encrypt(void *ctx_arg, uint8_t *out, const uint8_t *in)
{
aes_padlock(ctx_arg, out, in, CRYPTO_DIR_ENCRYPT);
}
static void
aes_decrypt(void *ctx_arg, uint8_t *out, const uint8_t *in)
{
aes_padlock(ctx_arg, out, in, CRYPTO_DIR_DECRYPT);
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt
}
}
};
int __init padlock_init_aes(void)
{
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
gen_tabs();
return crypto_register_alg(&aes_alg);
}
void __exit padlock_fini_aes(void)
{
crypto_unregister_alg(&aes_alg);
}
/*
* Cryptographic API.
*
* Support for VIA PadLock hardware crypto engine.
*
* Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/crypto.h>
#include <asm/byteorder.h>
#include "padlock.h"
static int __init
padlock_init(void)
{
int ret = -ENOSYS;
if (!cpu_has_xcrypt) {
printk(KERN_ERR PFX "VIA PadLock not detected.\n");
return -ENODEV;
}
if (!cpu_has_xcrypt_enabled) {
printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
return -ENODEV;
}
#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES
if ((ret = padlock_init_aes())) {
printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
return ret;
}
#endif
if (ret == -ENOSYS)
printk(KERN_ERR PFX "Hmm, VIA PadLock was compiled without any algorithm.\n");
return ret;
}
static void __exit
padlock_fini(void)
{
#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES
padlock_fini_aes();
#endif
}
module_init(padlock_init);
module_exit(padlock_fini);
MODULE_DESCRIPTION("VIA PadLock crypto engine support.");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Michal Ludvig");
/*
* Driver for VIA PadLock
*
* Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_PADLOCK_H
#define _CRYPTO_PADLOCK_H
/* Control word. */
union cword {
uint32_t cword[4];
struct {
int rounds:4;
int algo:3;
int keygen:1;
int interm:1;
int encdec:1;
int ksize:2;
} b;
};
#define PFX "padlock: "
#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES
int padlock_init_aes(void);
void padlock_fini_aes(void);
#endif
#endif /* _CRYPTO_PADLOCK_H */
......@@ -66,7 +66,7 @@ obj-$(CONFIG_MII) += mii.o
obj-$(CONFIG_SUNDANCE) += sundance.o
obj-$(CONFIG_HAMACHI) += hamachi.o
obj-$(CONFIG_NET) += Space.o net_init.o loopback.o
obj-$(CONFIG_NET) += Space.o loopback.o
obj-$(CONFIG_SEEQ8005) += seeq8005.o
obj-$(CONFIG_ETHERTAP) += ethertap.o
obj-$(CONFIG_NET_SB1000) += sb1000.o
......
......@@ -59,6 +59,25 @@ config ARCNET_RAW
to work unless talking to a copy of the same Linux arcnet driver,
but perhaps marginally faster in that case.
config ARCNET_CAP
tristate "Enable CAP mode packet interface"
depends on ARCNET
help
ARCnet "cap mode" packet encapsulation. Used to get the hardware
acknowledge back to userspace. After the initial protocol byte every
packet is stuffed with an extra 4 byte "cookie" which doesn't
actually appear on the network. After transmit the driver will send
back a packet with protocol byte 0 containing the status of the
transmition:
0=no hardware acknowledge
1=excessive nak
2=transmition accepted by the reciever hardware
Received packets are also stuffed with the extra 4 bytes but it will
be random data.
Cap only listens to protocol 1-8.
config ARCNET_COM90xx
tristate "ARCnet COM90xx (normal) chipset driver"
depends on ARCNET
......
......@@ -5,6 +5,7 @@ obj-$(CONFIG_ARCNET) += arcnet.o
obj-$(CONFIG_ARCNET_1201) += rfc1201.o
obj-$(CONFIG_ARCNET_1051) += rfc1051.o
obj-$(CONFIG_ARCNET_RAW) += arc-rawmode.o
obj-$(CONFIG_ARCNET_CAP) += capmode.o
obj-$(CONFIG_ARCNET_COM90xx) += com90xx.o
obj-$(CONFIG_ARCNET_COM90xxIO) += com90io.o
obj-$(CONFIG_ARCNET_RIM_I) += arc-rimi.o
......
......@@ -42,7 +42,6 @@ static int build_header(struct sk_buff *skb, struct net_device *dev,
static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
int bufnum);
struct ArcProto rawmode_proto =
{
.suffix = 'r',
......@@ -50,6 +49,8 @@ struct ArcProto rawmode_proto =
.rx = rx,
.build_header = build_header,
.prepare_tx = prepare_tx,
.continue_tx = NULL,
.ack_tx = NULL
};
......@@ -121,7 +122,8 @@ static void rx(struct net_device *dev, int bufnum,
BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
skb->protocol = 0;
skb->protocol = __constant_htons(ETH_P_ARCNET);
;
netif_rx(skb);
dev->last_rx = jiffies;
}
......@@ -190,6 +192,9 @@ static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
} else
hard->offset[0] = ofs = 256 - length;
BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n",
length,ofs);
lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft, length);
......
......@@ -53,7 +53,6 @@
#include <linux/init.h>
#include <linux/arcdevice.h>
/* "do nothing" functions for protocol drivers */
static void null_rx(struct net_device *dev, int bufnum,
struct archdr *pkthdr, int length);
......@@ -69,25 +68,28 @@ static int null_prepare_tx(struct net_device *dev, struct archdr *pkt,
* arc_proto_default instead. It also must not be NULL; if you would like
* to set it to NULL, set it to &arc_proto_null instead.
*/
struct ArcProto *arc_proto_map[256], *arc_proto_default, *arc_bcast_proto;
struct ArcProto *arc_proto_map[256], *arc_proto_default,
*arc_bcast_proto, *arc_raw_proto;
struct ArcProto arc_proto_null =
{
.suffix = '?',
.mtu = XMTU,
.is_ip = 0,
.rx = null_rx,
.build_header = null_build_header,
.prepare_tx = null_prepare_tx,
.continue_tx = NULL,
.ack_tx = NULL
};
static spinlock_t arcnet_lock = SPIN_LOCK_UNLOCKED;
/* Exported function prototypes */
int arcnet_debug = ARCNET_DEBUG;
EXPORT_SYMBOL(arc_proto_map);
EXPORT_SYMBOL(arc_proto_default);
EXPORT_SYMBOL(arc_bcast_proto);
EXPORT_SYMBOL(arc_raw_proto);
EXPORT_SYMBOL(arc_proto_null);
EXPORT_SYMBOL(arcnet_unregister_proto);
EXPORT_SYMBOL(arcnet_debug);
......@@ -131,7 +133,7 @@ static int __init arcnet_init(void)
#endif
/* initialize the protocol map */
arc_proto_default = arc_bcast_proto = &arc_proto_null;
arc_raw_proto = arc_proto_default = arc_bcast_proto = &arc_proto_null;
for (count = 0; count < 256; count++)
arc_proto_map[count] = arc_proto_default;
......@@ -155,7 +157,8 @@ module_exit(arcnet_exit);
* Dump the contents of an sk_buff
*/
#if ARCNET_DEBUG_MAX & D_SKB
void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc)
void arcnet_dump_skb(struct net_device *dev,
struct sk_buff *skb, char *desc)
{
int i;
......@@ -176,18 +179,22 @@ EXPORT_SYMBOL(arcnet_dump_skb);
* Dump the contents of an ARCnet buffer
*/
#if (ARCNET_DEBUG_MAX & (D_RX | D_TX))
void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc)
void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc,
int take_arcnet_lock)
{
struct arcnet_local *lp = (struct arcnet_local *) dev->priv;
int i, length;
unsigned long flags;
unsigned long flags = 0;
static uint8_t buf[512];
/* hw.copy_from_card expects IRQ context so take the IRQ lock
to keep it single threaded */
spin_lock_irqsave(&arcnet_lock, flags);
if(take_arcnet_lock)
spin_lock_irqsave(&lp->lock, flags);
lp->hw.copy_from_card(dev, bufnum, 0, buf, 512);
spin_unlock_irqrestore(&arcnet_lock, flags);
if(take_arcnet_lock)
spin_unlock_irqrestore(&lp->lock, flags);
/* if the offset[0] byte is nonzero, this is a 256-byte packet */
length = (buf[2] ? 256 : 512);
......@@ -219,6 +226,8 @@ void arcnet_unregister_proto(struct ArcProto *proto)
arc_proto_default = &arc_proto_null;
if (arc_bcast_proto == proto)
arc_bcast_proto = arc_proto_default;
if (arc_raw_proto == proto)
arc_raw_proto = arc_proto_default;
for (count = 0; count < 256; count++) {
if (arc_proto_map[count] == proto)
......@@ -261,8 +270,11 @@ static int get_arcbuf(struct net_device *dev)
struct arcnet_local *lp = (struct arcnet_local *) dev->priv;
int buf = -1, i;
if (!atomic_dec_and_test(&lp->buf_lock)) /* already in this function */
BUGMSG(D_NORMAL, "get_arcbuf: overlap (%d)!\n", lp->buf_lock.counter);
if (!atomic_dec_and_test(&lp->buf_lock)) {
/* already in this function */
BUGMSG(D_NORMAL, "get_arcbuf: overlap (%d)!\n",
lp->buf_lock.counter);
}
else { /* we can continue */
if (lp->next_buf >= 5)
lp->next_buf -= 5;
......@@ -312,7 +324,7 @@ void arcdev_setup(struct net_device *dev)
dev->mtu = choose_mtu();
dev->addr_len = ARCNET_ALEN;
dev->tx_queue_len = 30;
dev->tx_queue_len = 100;
dev->broadcast[0] = 0x00; /* for us, broadcasts are address 0 */
dev->watchdog_timeo = TX_TIMEOUT;
......@@ -334,8 +346,16 @@ void arcdev_setup(struct net_device *dev)
struct net_device *alloc_arcdev(char *name)
{
return alloc_netdev(sizeof(struct arcnet_local),
name && *name ? name : "arc%d", arcdev_setup);
struct net_device *dev;
dev = alloc_netdev(sizeof(struct arcnet_local),
name && *name ? name : "arc%d", arcdev_setup);
if(dev) {
struct arcnet_local *lp = (struct arcnet_local *) dev->priv;
lp->lock = SPIN_LOCK_UNLOCKED;
}
return dev;
}
/*
......@@ -351,6 +371,8 @@ static int arcnet_open(struct net_device *dev)
struct arcnet_local *lp = (struct arcnet_local *) dev->priv;
int count, newmtu, error;
BUGMSG(D_INIT,"opened.");
if (!try_module_get(lp->hw.owner))
return -ENODEV;
......@@ -377,6 +399,8 @@ static int arcnet_open(struct net_device *dev)
if (newmtu < dev->mtu)
dev->mtu = newmtu;
BUGMSG(D_INIT, "arcnet_open: mtu: %d.\n", dev->mtu);
/* autodetect the encapsulation for each host. */
memset(lp->default_proto, 0, sizeof(lp->default_proto));
......@@ -390,6 +414,7 @@ static int arcnet_open(struct net_device *dev)
/* initialize buffers */
atomic_set(&lp->buf_lock, 1);
lp->next_buf = lp->first_free_buf = 0;
release_arcbuf(dev, 0);
release_arcbuf(dev, 1);
......@@ -411,17 +436,24 @@ static int arcnet_open(struct net_device *dev)
BUGMSG(D_NORMAL, "WARNING! Station address FF may confuse "
"DOS networking programs!\n");
if (ASTATUS() & RESETflag)
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
if (ASTATUS() & RESETflag) {
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
ACOMMAND(CFLAGScmd | RESETclear);
}
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
/* make sure we're ready to receive IRQ's. */
AINTMASK(0);
udelay(1); /* give it time to set the mask before
* we reset it again. (may not even be
* necessary)
*/
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
lp->intmask = NORXflag | RECONflag;
AINTMASK(lp->intmask);
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
netif_start_queue(dev);
......@@ -467,32 +499,41 @@ static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
daddr ? *(uint8_t *) daddr : -1,
type, type, len);
if (len != skb->len)
if (skb->len!=0 && len != skb->len)
BUGMSG(D_NORMAL, "arcnet_header: Yikes! skb->len(%d) != len(%d)!\n",
skb->len, len);
/*
* if the dest addr isn't provided, we can't choose an encapsulation!
* Store the packet type (eg. ETH_P_IP) for now, and we'll push on a
* real header when we do rebuild_header.
*/
if (!daddr) {
/* Type is host order - ? */
if(type == ETH_P_ARCNET) {
proto = arc_raw_proto;
BUGMSG(D_DEBUG, "arc_raw_proto used. proto='%c'\n",proto->suffix);
_daddr = daddr ? *(uint8_t *) daddr : 0;
}
else if (!daddr) {
/*
* if the dest addr isn't provided, we can't choose an encapsulation!
* Store the packet type (eg. ETH_P_IP) for now, and we'll push on a
* real header when we do rebuild_header.
*/
*(uint16_t *) skb_push(skb, 2) = type;
if (skb->nh.raw - skb->mac.raw != 2)
BUGMSG(D_NORMAL, "arcnet_header: Yikes! diff (%d) is not 2!\n",
(int)(skb->nh.raw - skb->mac.raw));
return -2; /* return error -- can't transmit yet! */
}
/* otherwise, we can just add the header as usual. */
_daddr = *(uint8_t *) daddr;
proto_num = lp->default_proto[_daddr];
proto = arc_proto_map[proto_num];
BUGMSG(D_DURING, "building header for %02Xh using protocol '%c'\n",
proto_num, proto->suffix);
if (proto == &arc_proto_null && arc_bcast_proto != proto) {
BUGMSG(D_DURING, "actually, let's use '%c' instead.\n",
arc_bcast_proto->suffix);
proto = arc_bcast_proto;
else {
/* otherwise, we can just add the header as usual. */
_daddr = *(uint8_t *) daddr;
proto_num = lp->default_proto[_daddr];
proto = arc_proto_map[proto_num];
BUGMSG(D_DURING, "building header for %02Xh using protocol '%c'\n",
proto_num, proto->suffix);
if (proto == &arc_proto_null && arc_bcast_proto != proto) {
BUGMSG(D_DURING, "actually, let's use '%c' instead.\n",
arc_bcast_proto->suffix);
proto = arc_bcast_proto;
}
}
return proto->build_header(skb, dev, type, _daddr);
}
......@@ -519,6 +560,7 @@ static int arcnet_rebuild_header(struct sk_buff *skb)
return 0;
}
type = *(uint16_t *) skb_pull(skb, 2);
BUGMSG(D_DURING, "rebuild header for protocol %Xh\n", type);
if (type == ETH_P_IP) {
#ifdef CONFIG_INET
......@@ -555,10 +597,12 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
struct arc_rfc1201 *soft;
struct ArcProto *proto;
int txbuf;
unsigned long flags;
int freeskb = 0;
BUGMSG(D_DURING,
"transmit requested (status=%Xh, txbufs=%d/%d, len=%d)\n",
ASTATUS(), lp->cur_tx, lp->next_tx, skb->len);
"transmit requested (status=%Xh, txbufs=%d/%d, len=%d, protocol %x)\n",
ASTATUS(), lp->cur_tx, lp->next_tx, skb->len,skb->protocol);
pkt = (struct archdr *) skb->data;
soft = &pkt->soft.rfc1201;
......@@ -578,38 +622,49 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
/* We're busy transmitting a packet... */
netif_stop_queue(dev);
spin_lock_irqsave(&lp->lock, flags);
AINTMASK(0);
txbuf = get_arcbuf(dev);
if (txbuf != -1) {
if (proto->prepare_tx(dev, pkt, skb->len, txbuf)) {
/* done right away */
if (proto->prepare_tx(dev, pkt, skb->len, txbuf) &&
!proto->ack_tx) {
/* done right away and we don't want to acknowledge
the package later - forget about it now */
lp->stats.tx_bytes += skb->len;
dev_kfree_skb(skb);
freeskb = 1;
} else {
/* do it the 'split' way */
lp->outgoing.proto = proto;
lp->outgoing.skb = skb;
lp->outgoing.pkt = pkt;
if (!proto->continue_tx)
BUGMSG(D_NORMAL, "bug! prep_tx==0, but no continue_tx!\n");
else if (proto->continue_tx(dev, txbuf)) {
BUGMSG(D_NORMAL,
"bug! continue_tx finished the first time! "
"(proto='%c')\n", proto->suffix);
if (proto->continue_tx &&
proto->continue_tx(dev, txbuf)) {
BUGMSG(D_NORMAL,
"bug! continue_tx finished the first time! "
"(proto='%c')\n", proto->suffix);
}
}
lp->next_tx = txbuf;
} else
dev_kfree_skb(skb);
} else {
freeskb = 1;
}
BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS());
/* make sure we didn't ignore a TX IRQ while we were in here */
AINTMASK(0);
lp->intmask |= TXFREEflag;
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
lp->intmask |= TXFREEflag|EXCNAKflag;
AINTMASK(lp->intmask);
BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS());
spin_unlock_irqrestore(&lp->lock, flags);
if (freeskb) {
dev_kfree_skb(skb);
}
return 0; /* no need to try again */
}
......@@ -628,7 +683,7 @@ static int go_tx(struct net_device *dev)
if (lp->cur_tx != -1 || lp->next_tx == -1)
return 0;
BUGLVL(D_TX) arcnet_dump_packet(dev, lp->next_tx, "go_tx");
BUGLVL(D_TX) arcnet_dump_packet(dev, lp->next_tx, "go_tx", 0);
lp->cur_tx = lp->next_tx;
lp->next_tx = -1;
......@@ -640,7 +695,8 @@ static int go_tx(struct net_device *dev)
lp->stats.tx_packets++;
lp->lasttrans_dest = lp->lastload_dest;
lp->lastload_dest = 0;
lp->intmask |= TXFREEflag;
lp->excnak_pending = 0;
lp->intmask |= TXFREEflag|EXCNAKflag;
return 1;
}
......@@ -654,7 +710,7 @@ static void arcnet_timeout(struct net_device *dev)
int status = ASTATUS();
char *msg;
spin_lock_irqsave(&arcnet_lock, flags);
spin_lock_irqsave(&lp->lock, flags);
if (status & TXFREEflag) { /* transmit _DID_ finish */
msg = " - missed IRQ?";
} else {
......@@ -665,12 +721,12 @@ static void arcnet_timeout(struct net_device *dev)
}
lp->stats.tx_errors++;
/* make sure we didn't miss a TX IRQ */
/* make sure we didn't miss a TX or a EXC NAK IRQ */
AINTMASK(0);
lp->intmask |= TXFREEflag;
lp->intmask |= TXFREEflag|EXCNAKflag;
AINTMASK(lp->intmask);
spin_unlock_irqrestore(&arcnet_lock, flags);
spin_unlock_irqrestore(&lp->lock, flags);
if (jiffies - lp->last_timeout > 10*HZ) {
BUGMSG(D_EXTRA, "tx timed out%s (status=%Xh, intmask=%Xh, dest=%02Xh)\n",
......@@ -692,18 +748,19 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = dev_id;
struct arcnet_local *lp;
int recbuf, status, didsomething, boguscount;
int recbuf, status, diagstatus, didsomething, boguscount;
int retval = IRQ_NONE;
BUGMSG(D_DURING, "\n");
BUGMSG(D_DURING, "in arcnet_interrupt\n");
spin_lock(&arcnet_lock);
lp = (struct arcnet_local *) dev->priv;
if (!lp)
BUG();
spin_lock(&lp->lock);
/*
* RESET flag was enabled - if device is not running, we must clear it right
* away (but nothing else).
......@@ -712,7 +769,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
if (ASTATUS() & RESETflag)
ACOMMAND(CFLAGScmd | RESETclear);
AINTMASK(0);
spin_unlock(&arcnet_lock);
spin_unlock(&lp->lock);
return IRQ_HANDLED;
}
......@@ -722,6 +779,10 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
boguscount = 5;
do {
status = ASTATUS();
diagstatus = (status >> 8) & 0xFF;
BUGMSG(D_DEBUG, "%s: %d: %s: status=%x\n",
__FILE__,__LINE__,__FUNCTION__,status);
didsomething = 0;
/*
......@@ -761,24 +822,55 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
didsomething++;
}
if((diagstatus & EXCNAKflag)) {
BUGMSG(D_DURING, "EXCNAK IRQ (diagstat=%Xh)\n",
diagstatus);
ACOMMAND(NOTXcmd); /* disable transmit */
lp->excnak_pending = 1;
ACOMMAND(EXCNAKclear);
lp->intmask &= ~(EXCNAKflag);
didsomething++;
}
/* a transmit finished, and we're interested in it. */
if ((status & lp->intmask & TXFREEflag) || lp->timed_out) {
lp->intmask &= ~TXFREEflag;
lp->intmask &= ~(TXFREEflag|EXCNAKflag);
BUGMSG(D_DURING, "TX IRQ (stat=%Xh)\n", status);
if (lp->cur_tx != -1 && !(status & TXACKflag) && !lp->timed_out) {
if (lp->lasttrans_dest != 0) {
BUGMSG(D_EXTRA, "transmit was not acknowledged! "
"(status=%Xh, dest=%02Xh)\n",
status, lp->lasttrans_dest);
lp->stats.tx_errors++;
lp->stats.tx_carrier_errors++;
} else {
BUGMSG(D_DURING,
"broadcast was not acknowledged; that's normal "
"(status=%Xh, dest=%02Xh)\n",
status, lp->lasttrans_dest);
if (lp->cur_tx != -1 && !lp->timed_out) {
if(!(status & TXACKflag)) {
if (lp->lasttrans_dest != 0) {
BUGMSG(D_EXTRA,
"transmit was not acknowledged! "
"(status=%Xh, dest=%02Xh)\n",
status, lp->lasttrans_dest);
lp->stats.tx_errors++;
lp->stats.tx_carrier_errors++;
} else {
BUGMSG(D_DURING,
"broadcast was not acknowledged; that's normal "
"(status=%Xh, dest=%02Xh)\n",
status, lp->lasttrans_dest);
}
}
if (lp->outgoing.proto &&
lp->outgoing.proto->ack_tx) {
int ackstatus;
if(status & TXACKflag)
ackstatus=2;
else if(lp->excnak_pending)
ackstatus=1;
else
ackstatus=0;
lp->outgoing.proto
->ack_tx(dev, ackstatus);
}
}
if (lp->cur_tx != -1)
......@@ -798,8 +890,11 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
if (lp->outgoing.proto->continue_tx(dev, txbuf)) {
/* that was the last segment */
lp->stats.tx_bytes += lp->outgoing.skb->len;
dev_kfree_skb_irq(lp->outgoing.skb);
lp->outgoing.proto = NULL;
if(!lp->outgoing.proto->ack_tx)
{
dev_kfree_skb_irq(lp->outgoing.skb);
lp->outgoing.proto = NULL;
}
}
lp->next_tx = txbuf;
}
......@@ -810,7 +905,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
/* now process the received packet, if any */
if (recbuf != -1) {
BUGLVL(D_RX) arcnet_dump_packet(dev, recbuf, "rx irq");
BUGLVL(D_RX) arcnet_dump_packet(dev, recbuf, "rx irq", 0);
arcnet_rx(dev, recbuf);
release_arcbuf(dev, recbuf);
......@@ -868,6 +963,10 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
BUGMSG(D_DURING, "not recon: clearing counters anyway.\n");
}
if(didsomething) {
retval |= IRQ_HANDLED;
}
}
while (--boguscount && didsomething);
......@@ -880,8 +979,8 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
udelay(1);
AINTMASK(lp->intmask);
spin_unlock(&arcnet_lock);
return IRQ_RETVAL(didsomething);
spin_unlock(&lp->lock);
return retval;
}
......@@ -908,7 +1007,7 @@ void arcnet_rx(struct net_device *dev, int bufnum)
}
/* get the full header, if possible */
if (sizeof(pkt.soft) < length)
if (sizeof(pkt.soft) <= length)
lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft));
else {
memset(&pkt.soft, 0, sizeof(pkt.soft));
......@@ -923,7 +1022,7 @@ void arcnet_rx(struct net_device *dev, int bufnum)
lp->stats.rx_bytes += length + ARC_HDR_SIZE;
/* call the right receiver for the protocol */
if (arc_proto_map[soft->proto] != &arc_proto_null) {
if (arc_proto_map[soft->proto]->is_ip) {
BUGLVL(D_PROTO) {
struct ArcProto
*oldp = arc_proto_map[lp->default_proto[pkt.hard.source]],
......
/*
* Linux ARCnet driver - "cap mode" packet encapsulation.
* It adds sequence numbers to packets for communicating between a user space
* application and the driver. After a transmit it sends a packet with protocol
* byte 0 back up to the userspace containing the sequence number of the packet
* plus the transmit-status on the ArcNet.
*
* Written 2002-4 by Esben Nielsen, Vestas Wind Systems A/S
* Derived from arc-rawmode.c by Avery Pennarun.
* arc-rawmode was in turned based on skeleton.c, see below.
*
* **********************
*
* The original copyright of skeleton.c was as follows:
*
* skeleton.c Written 1993 by Donald Becker.
* Copyright 1993 United States Government as represented by the
* Director, National Security Agency. This software may only be used
* and distributed according to the terms of the GNU General Public License as
* modified by SRC, incorporated herein by reference.
*
* **********************
*
* For more details, see drivers/net/arcnet.c
*
* **********************
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/if_arp.h>
#include <net/arp.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/arcdevice.h>
#define VERSION "arcnet: cap mode (`c') encapsulation support loaded.\n"
static void rx(struct net_device *dev, int bufnum,
struct archdr *pkthdr, int length);
static int build_header(struct sk_buff *skb,
struct net_device *dev,
unsigned short type,
uint8_t daddr);
static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
int bufnum);
static int ack_tx(struct net_device *dev, int acked);
struct ArcProto capmode_proto =
{
'r',
XMTU,
0,
rx,
build_header,
prepare_tx,
NULL,
ack_tx
};
void arcnet_cap_init(void)
{
int count;
for (count = 1; count <= 8; count++)
if (arc_proto_map[count] == arc_proto_default)
arc_proto_map[count] = &capmode_proto;
/* for cap mode, we only set the bcast proto if there's no better one */
if (arc_bcast_proto == arc_proto_default)
arc_bcast_proto = &capmode_proto;
arc_proto_default = &capmode_proto;
arc_raw_proto = &capmode_proto;
}
#ifdef MODULE
int __init init_module(void)
{
printk(VERSION);
arcnet_cap_init();
return 0;
}
void cleanup_module(void)
{
arcnet_unregister_proto(&capmode_proto);
}
MODULE_LICENSE("GPL");
#endif /* MODULE */
/* packet receiver */
static void rx(struct net_device *dev, int bufnum,
struct archdr *pkthdr, int length)
{
struct arcnet_local *lp = (struct arcnet_local *) dev->priv;
struct sk_buff *skb;
struct archdr *pkt = pkthdr;
char *pktbuf, *pkthdrbuf;
int ofs;
BUGMSG(D_DURING, "it's a raw(cap) packet (length=%d)\n", length);
if (length >= MinTU)
ofs = 512 - length;
else
ofs = 256 - length;
skb = alloc_skb(length + ARC_HDR_SIZE + sizeof(int), GFP_ATOMIC);
if (skb == NULL) {
BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
lp->stats.rx_dropped++;
return;
}
skb_put(skb, length + ARC_HDR_SIZE + sizeof(int));
skb->dev = dev;
pkt = (struct archdr *) skb->data;
skb->mac.raw = skb->data;
skb_pull(skb, ARC_HDR_SIZE);
/* up to sizeof(pkt->soft) has already been copied from the card */
/* squeeze in an int for the cap encapsulation */
/* use these variables to be sure we count in bytes, not in
sizeof(struct archdr) */
pktbuf=(char*)pkt;
pkthdrbuf=(char*)pkthdr;
memcpy(pktbuf, pkthdrbuf, ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto));
memcpy(pktbuf+ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto)+sizeof(int),
pkthdrbuf+ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto),
sizeof(struct archdr)-ARC_HDR_SIZE-sizeof(pkt->soft.cap.proto));
if (length > sizeof(pkt->soft))
lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
pkt->soft.raw + sizeof(pkt->soft)
+ sizeof(int),
length - sizeof(pkt->soft));
BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
skb->protocol = __constant_htons(ETH_P_ARCNET);
;
netif_rx(skb);
dev->last_rx = jiffies;
}
/*
* Create the ARCnet hard/soft headers for cap mode.
* There aren't any soft headers in cap mode - not even the protocol id.
*/
static int build_header(struct sk_buff *skb,
struct net_device *dev,
unsigned short type,
uint8_t daddr)
{
int hdr_size = ARC_HDR_SIZE;
struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
BUGMSG(D_PROTO, "Preparing header for cap packet %x.\n",
*((int*)&pkt->soft.cap.cookie[0]));
/*
* Set the source hardware address.
*
* This is pretty pointless for most purposes, but it can help in
* debugging. ARCnet does not allow us to change the source address in
* the actual packet sent)
*/
pkt->hard.source = *dev->dev_addr;
/* see linux/net/ethernet/eth.c to see where I got the following */
if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
/*
* FIXME: fill in the last byte of the dest ipaddr here to better
* comply with RFC1051 in "noarp" mode.
*/
pkt->hard.dest = 0;
return hdr_size;
}
/* otherwise, just fill it in and go! */
pkt->hard.dest = daddr;
return hdr_size; /* success */
}
static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
int bufnum)
{
struct arcnet_local *lp = (struct arcnet_local *) dev->priv;
struct arc_hardware *hard = &pkt->hard;
int ofs;
/* hard header is not included in packet length */
length -= ARC_HDR_SIZE;
/* And neither is the cookie field */
length -= sizeof(int);
BUGMSG(D_DURING, "prepare_tx: txbufs=%d/%d/%d\n",
lp->next_tx, lp->cur_tx, bufnum);
BUGMSG(D_PROTO, "Sending for cap packet %x.\n",
*((int*)&pkt->soft.cap.cookie[0]));
if (length > XMTU) {
/* should never happen! other people already check for this. */
BUGMSG(D_NORMAL, "Bug! prepare_tx with size %d (> %d)\n",
length, XMTU);
length = XMTU;
}
if (length > MinTU) {
hard->offset[0] = 0;
hard->offset[1] = ofs = 512 - length;
} else if (length > MTU) {
hard->offset[0] = 0;
hard->offset[1] = ofs = 512 - length - 3;
} else
hard->offset[0] = ofs = 256 - length;
BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n",
length,ofs);
// Copy the arcnet-header + the protocol byte down:
lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft.cap.proto,
sizeof(pkt->soft.cap.proto));
// Skip the extra integer we have written into it as a cookie
// but write the rest of the message:
lp->hw.copy_to_card(dev, bufnum, ofs+1,
((unsigned char*)&pkt->soft.cap.mes),length-1);
lp->lastload_dest = hard->dest;
return 1; /* done */
}
static int ack_tx(struct net_device *dev, int acked)
{
struct arcnet_local *lp = (struct arcnet_local *) dev->priv;
struct sk_buff *ackskb;
struct archdr *ackpkt;
int length=sizeof(struct arc_cap);
BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n",
lp->outgoing.skb->protocol, acked);
BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx");
/* Now alloc a skb to send back up through the layers: */
ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC);
if (ackskb == NULL) {
BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n");
goto free_outskb;
}
skb_put(ackskb, length + ARC_HDR_SIZE );
ackskb->dev = dev;
ackpkt = (struct archdr *) ackskb->data;
ackskb->mac.raw = ackskb->data;
/* skb_pull(ackskb, ARC_HDR_SIZE); */
memcpy(ackpkt, lp->outgoing.skb->data, ARC_HDR_SIZE+sizeof(struct arc_cap));
ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */
ackpkt->soft.cap.mes.ack=acked;
BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n",
*((int*)&ackpkt->soft.cap.cookie[0]));
ackskb->protocol = __constant_htons(ETH_P_ARCNET);
BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv");
netif_rx(ackskb);
free_outskb:
dev_kfree_skb_irq(lp->outgoing.skb);
lp->outgoing.proto = NULL; /* We are always finished when in this protocol */
return 0;
}
......@@ -41,7 +41,6 @@
#include <asm/io.h>
#define VERSION "arcnet: COM20020 ISA support (by David Woodhouse et al.)\n"
......
......@@ -117,7 +117,7 @@ int com20020_check(struct net_device *dev)
lp->config = 0x21 | (lp->timeout << 3) | (lp->backplane << 2);
/* set node ID to 0x42 (but transmitter is disabled, so it's okay) */
SETCONF;
outb(0x42, ioaddr + 7);
outb(0x42, ioaddr + BUS_ALIGN*7);
status = ASTATUS();
......@@ -129,7 +129,7 @@ int com20020_check(struct net_device *dev)
/* Enable TX */
outb(0x39, _CONFIG);
outb(inb(ioaddr + 8), ioaddr + 7);
outb(inb(ioaddr + BUS_ALIGN*8), ioaddr + BUS_ALIGN*7);
ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
......@@ -173,7 +173,7 @@ int com20020_found(struct net_device *dev, int shared)
dev->set_multicast_list = com20020_set_mc_list;
if (!dev->dev_addr[0])
dev->dev_addr[0] = inb(ioaddr + 8); /* FIXME: do this some other way! */
dev->dev_addr[0] = inb(ioaddr + BUS_ALIGN*8); /* FIXME: do this some other way! */
SET_SUBADR(SUB_SETUP1);
outb(lp->setup, _XREG);
......@@ -188,7 +188,6 @@ int com20020_found(struct net_device *dev, int shared)
outb(0x18, _COMMAND);
}
lp->config = 0x20 | (lp->timeout << 3) | (lp->backplane << 2) | 1;
/* Default 0x38 + register: Node ID */
SETCONF;
......@@ -235,15 +234,19 @@ int com20020_found(struct net_device *dev, int shared)
static int com20020_reset(struct net_device *dev, int really_reset)
{
struct arcnet_local *lp = (struct arcnet_local *) dev->priv;
short ioaddr = dev->base_addr;
u_int ioaddr = dev->base_addr;
u_char inbyte;
BUGMSG(D_DEBUG, "%s: %d: %s: dev: %p, lp: %p, dev->name: %s\n",
__FILE__,__LINE__,__FUNCTION__,dev,lp,dev->name);
BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n",
dev->name, ASTATUS());
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2);
/* power-up defaults */
SETCONF;
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
if (really_reset) {
/* reset the card */
......@@ -251,17 +254,22 @@ static int com20020_reset(struct net_device *dev, int really_reset)
mdelay(RESETtime * 2); /* COM20020 seems to be slower sometimes */
}
/* clear flags & end reset */
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
/* verify that the ARCnet signature byte is present */
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
com20020_copy_from_card(dev, 0, 0, &inbyte, 1);
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
if (inbyte != TESTvalue) {
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n");
return 1;
}
/* enable extended (512-byte) packets */
ACOMMAND(CONFIGcmd | EXTconf);
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__);
/* done! return success. */
return 0;
......@@ -270,22 +278,24 @@ static int com20020_reset(struct net_device *dev, int really_reset)
static void com20020_setmask(struct net_device *dev, int mask)
{
short ioaddr = dev->base_addr;
u_int ioaddr = dev->base_addr;
BUGMSG(D_DURING, "Setting mask to %x at %x\n",mask,ioaddr);
AINTMASK(mask);
}
static void com20020_command(struct net_device *dev, int cmd)
{
short ioaddr = dev->base_addr;
u_int ioaddr = dev->base_addr;
ACOMMAND(cmd);
}
static int com20020_status(struct net_device *dev)
{
short ioaddr = dev->base_addr;
return ASTATUS();
u_int ioaddr = dev->base_addr;
return ASTATUS() + (ADIAGSTATUS()<<8);
}
static void com20020_close(struct net_device *dev)
......
......@@ -47,9 +47,12 @@ struct ArcProto rfc1051_proto =
{
.suffix = 's',
.mtu = XMTU - RFC1051_HDR_SIZE,
.is_ip = 1,
.rx = rx,
.build_header = build_header,
.prepare_tx = prepare_tx,
.continue_tx = NULL,
.ack_tx = NULL
};
......
......@@ -47,10 +47,12 @@ struct ArcProto rfc1201_proto =
{
.suffix = 'a',
.mtu = 1500, /* could be more, but some receivers can't handle it... */
.is_ip = 1, /* This is for sending IP and ARP packages */
.rx = rx,
.build_header = build_header,
.prepare_tx = prepare_tx,
.continue_tx = continue_tx,
.ack_tx = NULL
};
......
/* net_init.c: Initialization for network devices. */
/*
Written 1993,1994,1995 by Donald Becker.
The author may be reached as becker@scyld.com, or C/O
Scyld Computing Corporation
410 Severn Ave., Suite 210
Annapolis MD 21403
This file contains the initialization for the "pl14+" style ethernet
drivers. It should eventually replace most of drivers/net/Space.c.
It's primary advantage is that it's able to allocate low-memory buffers.
A secondary advantage is that the dangerous NE*000 netcards can reserve
their I/O port region before the SCSI probes start.
Modifications/additions by Bjorn Ekwall <bj0rn@blox.se>:
ethdev_index[MAX_ETH_CARDS]
register_netdev() / unregister_netdev()
Modifications by Wolfgang Walter
Use dev_close cleanly so we always shut things down tidily.
Changed 29/10/95, Alan Cox to pass sockaddr's around for mac addresses.
14/06/96 - Paul Gortmaker: Add generic eth_change_mtu() function.
24/09/96 - Paul Norton: Add token-ring variants of the netdev functions.
08/11/99 - Alan Cox: Got fed up of the mess in this file and cleaned it
up. We now share common code and have regularised name
allocation setups. Abolished the 16 card limits.
03/19/2000 - jgarzik and Urban Widmark: init_etherdev 32-byte align
03/21/2001 - jgarzik: alloc_etherdev and friends
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/if_ether.h>
#include <linux/string.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/fddidevice.h>
#include <linux/hippidevice.h>
#include <linux/trdevice.h>
#include <linux/fcdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ltalk.h>
#include <linux/rtnetlink.h>
#include <net/neighbour.h>
/* The network devices currently exist only in the socket namespace, so these
entries are unused. The only ones that make sense are
open start the ethercard
close stop the ethercard
ioctl To get statistics, perhaps set the interface port (AUI, BNC, etc.)
One can also imagine getting raw packets using
read & write
but this is probably better handled by a raw packet socket.
Given that almost all of these functions are handled in the current
socket-based scheme, putting ethercard devices in /dev/ seems pointless.
[Removed all support for /dev network devices. When someone adds
streams then by magic we get them, but otherwise they are un-needed
and a space waste]
*/
struct net_device *alloc_netdev(int sizeof_priv, const char *mask,
void (*setup)(struct net_device *))
{
void *p;
struct net_device *dev;
int alloc_size;
/* ensure 32-byte alignment of both the device and private area */
alloc_size = (sizeof(struct net_device) + NETDEV_ALIGN_CONST)
& ~NETDEV_ALIGN_CONST;
alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
p = kmalloc (alloc_size, GFP_KERNEL);
if (!p) {
printk(KERN_ERR "alloc_dev: Unable to allocate device.\n");
return NULL;
}
memset(p, 0, alloc_size);
dev = (struct net_device *)(((long)p + NETDEV_ALIGN_CONST)
& ~NETDEV_ALIGN_CONST);
dev->padded = (char *)dev - (char *)p;
if (sizeof_priv)
dev->priv = netdev_priv(dev);
setup(dev);
strcpy(dev->name, mask);
return dev;
}
EXPORT_SYMBOL(alloc_netdev);
int register_netdev(struct net_device *dev)
{
int err;
rtnl_lock();
/*
* If the name is a format string the caller wants us to
* do a name allocation
*/
if (strchr(dev->name, '%'))
{
err = dev_alloc_name(dev, dev->name);
if (err < 0)
goto out;
}
/*
* Back compatibility hook. Kill this one in 2.5
*/
if (dev->name[0]==0 || dev->name[0]==' ')
{
err = dev_alloc_name(dev, "eth%d");
if (err < 0)
goto out;
}
err = register_netdevice(dev);
out:
rtnl_unlock();
return err;
}
void unregister_netdev(struct net_device *dev)
{
rtnl_lock();
unregister_netdevice(dev);
rtnl_unlock();
}
EXPORT_SYMBOL(register_netdev);
EXPORT_SYMBOL(unregister_netdev);
......@@ -25,7 +25,6 @@
#define bool int
#endif
/*
* RECON_THRESHOLD is the maximum number of RECON messages to receive
* within one minute before printing a "cabling problem" warning. The
......@@ -74,6 +73,7 @@
#define D_SKB 1024 /* show skb's */
#define D_SKB_SIZE 2048 /* show skb sizes */
#define D_TIMING 4096 /* show time needed to copy buffers to card */
#define D_DEBUG 8192 /* Very detailed debug line for line */
#ifndef ARCNET_DEBUG_MAX
#define ARCNET_DEBUG_MAX (127) /* change to ~0 if you want detailed debugging */
......@@ -135,6 +135,7 @@ extern int arcnet_debug;
#define TXACKflag 0x02 /* transmitted msg. ackd */
#define RECONflag 0x04 /* network reconfigured */
#define TESTflag 0x08 /* test flag */
#define EXCNAKflag 0x08 /* excesive nak flag */
#define RESETflag 0x10 /* power-on-reset */
#define RES1flag 0x20 /* reserved - usually set by jumper */
#define RES2flag 0x40 /* reserved - usually set by jumper */
......@@ -162,6 +163,8 @@ extern int arcnet_debug;
#define RESETclear 0x08 /* power-on-reset */
#define CONFIGclear 0x10 /* system reconfigured */
#define EXCNAKclear 0x0E /* Clear and acknowledge the excive nak bit */
/* flags for "load test flags" command */
#define TESTload 0x08 /* test flag (diagnostic) */
......@@ -187,6 +190,7 @@ extern int arcnet_debug;
struct ArcProto {
char suffix; /* a for RFC1201, e for ether-encap, etc. */
int mtu; /* largest possible packet */
int is_ip; /* This is a ip plugin - not a raw thing */
void (*rx) (struct net_device * dev, int bufnum,
struct archdr * pkthdr, int length);
......@@ -197,9 +201,11 @@ struct ArcProto {
int (*prepare_tx) (struct net_device * dev, struct archdr * pkt, int length,
int bufnum);
int (*continue_tx) (struct net_device * dev, int bufnum);
int (*ack_tx) (struct net_device * dev, int acked);
};
extern struct ArcProto *arc_proto_map[256], *arc_proto_default, *arc_bcast_proto;
extern struct ArcProto *arc_proto_map[256], *arc_proto_default,
*arc_bcast_proto, *arc_raw_proto;
extern struct ArcProto arc_proto_null;
......@@ -251,6 +257,10 @@ struct arcnet_local {
char *card_name; /* card ident string */
int card_flags; /* special card features */
/* On preemtive and SMB a lock is needed */
spinlock_t lock;
/*
* Buffer management: an ARCnet card has 4 x 512-byte buffers, each of
* which can be used for either sending or receiving. The new dynamic
......@@ -279,6 +289,8 @@ struct arcnet_local {
int num_recons; /* number of RECONs between first and last. */
bool network_down; /* do we think the network is down? */
bool excnak_pending; /* We just got an excesive nak interrupt */
struct {
uint16_t sequence; /* sequence number (incs with each packet) */
uint16_t aborted_seq;
......@@ -323,9 +335,10 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc);
#endif
#if (ARCNET_DEBUG_MAX & D_RX) || (ARCNET_DEBUG_MAX & D_TX)
void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc);
void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc,
int take_arcnet_lock);
#else
#define arcnet_dump_packet(dev, bufnum, desc) ;
#define arcnet_dump_packet(dev, bufnum, desc,take_arcnet_lock) ;
#endif
void arcnet_unregister_proto(struct ArcProto *proto);
......
......@@ -34,17 +34,24 @@ int com20020_found(struct net_device *dev, int shared);
#define ARCNET_TOTAL_SIZE 8
/* various register addresses */
#define _INTMASK (ioaddr+0) /* writable */
#define _STATUS (ioaddr+0) /* readable */
#define _COMMAND (ioaddr+1) /* standard arcnet commands */
#define _DIAGSTAT (ioaddr+1) /* diagnostic status register */
#define _ADDR_HI (ioaddr+2) /* control registers for IO-mapped memory */
#define _ADDR_LO (ioaddr+3)
#define _MEMDATA (ioaddr+4) /* data port for IO-mapped memory */
#define _SUBADR (ioaddr+5) /* the extended port _XREG refers to */
#define _CONFIG (ioaddr+6) /* configuration register */
#define _XREG (ioaddr+7) /* extra registers (indexed by _CONFIG
or _SUBADR) */
#ifdef CONFIG_SA1100_CT6001
#define BUS_ALIGN 2 /* 8 bit device on a 16 bit bus - needs padding */
#else
#define BUS_ALIGN 1
#endif
#define _INTMASK (ioaddr+BUS_ALIGN*0) /* writable */
#define _STATUS (ioaddr+BUS_ALIGN*0) /* readable */
#define _COMMAND (ioaddr+BUS_ALIGN*1) /* standard arcnet commands */
#define _DIAGSTAT (ioaddr+BUS_ALIGN*1) /* diagnostic status register */
#define _ADDR_HI (ioaddr+BUS_ALIGN*2) /* control registers for IO-mapped memory */
#define _ADDR_LO (ioaddr+BUS_ALIGN*3)
#define _MEMDATA (ioaddr+BUS_ALIGN*4) /* data port for IO-mapped memory */
#define _SUBADR (ioaddr+BUS_ALIGN*5) /* the extended port _XREG refers to */
#define _CONFIG (ioaddr+BUS_ALIGN*6) /* configuration register */
#define _XREG (ioaddr+BUS_ALIGN*7) /* extra registers (indexed by _CONFIG
or _SUBADR) */
/* in the ADDR_HI register */
#define RDDATAflag 0x80 /* next access is a read (not a write) */
......@@ -99,6 +106,7 @@ int com20020_found(struct net_device *dev, int shared);
}
#define ASTATUS() inb(_STATUS)
#define ADIAGSTATUS() inb(_DIAGSTAT)
#define ACOMMAND(cmd) outb((cmd),_COMMAND)
#define AINTMASK(msk) outb((msk),_INTMASK)
......
......@@ -56,6 +56,9 @@
#define CRYPTO_UNSPEC 0
#define CRYPTO_MAX_ALG_NAME 64
#define CRYPTO_DIR_ENCRYPT 1
#define CRYPTO_DIR_DECRYPT 0
struct scatterlist;
/*
......
......@@ -23,6 +23,9 @@
* These are the defined ARCnet Protocol ID's.
*/
/* CAP mode */
/* No macro but uses 1-8 */
/* RFC1201 Protocol ID's */
#define ARC_P_IP 212 /* 0xD4 */
#define ARC_P_IPV6 196 /* 0xC4: RFC2497 */
......@@ -86,6 +89,16 @@ struct arc_eth_encap
#define ETH_ENCAP_HDR_SIZE 14
struct arc_cap
{
uint8_t proto;
uint8_t cookie[sizeof(int)]; /* Actually NOT sent over the network */
union {
uint8_t ack;
uint8_t raw[0]; /* 507 bytes */
} mes;
};
/*
* The data needed by the actual arcnet hardware.
*
......@@ -116,6 +129,7 @@ struct archdr
struct arc_rfc1201 rfc1201;
struct arc_rfc1051 rfc1051;
struct arc_eth_encap eth_encap;
struct arc_cap cap;
uint8_t raw[0]; /* 508 bytes */
} soft;
};
......
......@@ -91,6 +91,7 @@
#define ETH_P_IRDA 0x0017 /* Linux-IrDA */
#define ETH_P_ECONET 0x0018 /* Acorn Econet */
#define ETH_P_HDLC 0x0019 /* HDLC frames */
#define ETH_P_ARCNET 0x001A /* 1A for ArcNet :-) */
/*
* This is an Ethernet frame header.
......
#ifndef _EBT_ULOG_H
#define _EBT_ULOG_H
#define EBT_ULOG_DEFAULT_NLGROUP 0
#define EBT_ULOG_DEFAULT_QTHRESHOLD 1
#define EBT_ULOG_MAXNLGROUPS 32 /* hardcoded netlink max */
#define EBT_ULOG_PREFIX_LEN 32
#define EBT_ULOG_MAX_QLEN 50
#define EBT_ULOG_WATCHER "ulog"
#define EBT_ULOG_VERSION 1
struct ebt_ulog_info {
uint32_t nlgroup;
unsigned int cprange;
unsigned int qthreshold;
char prefix[EBT_ULOG_PREFIX_LEN];
};
typedef struct ebt_ulog_packet_msg {
int version;
char indev[IFNAMSIZ];
char outdev[IFNAMSIZ];
char physindev[IFNAMSIZ];
char physoutdev[IFNAMSIZ];
char prefix[EBT_ULOG_PREFIX_LEN];
struct timeval stamp;
unsigned long mark;
unsigned int hook;
size_t data_len;
/* The complete packet, including Ethernet header and perhaps
* the VLAN header is appended */
unsigned char data[0] __attribute__
((aligned (__alignof__(struct ebt_ulog_info))));
} ebt_ulog_packet_msg_t;
#endif /* _EBT_ULOG_H */
......@@ -201,9 +201,9 @@ struct ebt_watcher
{
struct list_head list;
const char name[EBT_FUNCTION_MAXNAMELEN];
void (*watcher)(const struct sk_buff *skb, const struct net_device *in,
const struct net_device *out, const void *watcherdata,
unsigned int datalen);
void (*watcher)(const struct sk_buff *skb, unsigned int hooknr,
const struct net_device *in, const struct net_device *out,
const void *watcherdata, unsigned int datalen);
/* 0 == let it in */
int (*check)(const char *tablename, unsigned int hookmask,
const struct ebt_entry *e, void *watcherdata, unsigned int datalen);
......
......@@ -11,13 +11,8 @@ enum ip_nat_manip_type
IP_NAT_MANIP_DST
};
#ifndef CONFIG_IP_NF_NAT_LOCAL
/* SRC manip occurs only on POST_ROUTING */
#define HOOK2MANIP(hooknum) ((hooknum) != NF_IP_POST_ROUTING)
#else
/* SRC manip occurs POST_ROUTING or LOCAL_IN */
#define HOOK2MANIP(hooknum) ((hooknum) != NF_IP_POST_ROUTING && (hooknum) != NF_IP_LOCAL_IN)
#endif
#define IP_NAT_RANGE_MAP_IPS 1
#define IP_NAT_RANGE_PROTO_SPECIFIED 2
......
......@@ -192,6 +192,7 @@ enum
TCA_U32_ACT,
TCA_U32_INDEV,
TCA_U32_PCNT,
TCA_U32_MARK,
__TCA_U32_MAX
};
......
......@@ -292,6 +292,8 @@ struct sk_buff {
extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff *alloc_skb(unsigned int size, int priority);
extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
unsigned int size, int priority);
extern void kfree_skbmem(struct sk_buff *skb);
extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority);
extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority);
......@@ -935,6 +937,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
*
* %NULL is returned in there is no free memory.
*/
#ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
int gfp_mask)
{
......@@ -943,6 +946,9 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
skb_reserve(skb, 16);
return skb;
}
#else
extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
#endif
/**
* dev_alloc_skb - allocate an skbuff for sending
......
......@@ -87,8 +87,8 @@ extern int tcf_register_action(struct tc_action_ops *a);
extern int tcf_unregister_action(struct tc_action_ops *a);
extern void tcf_action_destroy(struct tc_action *a, int bind);
extern int tcf_action_exec(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res);
extern int tcf_action_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a,char *n, int ovr, int bind);
extern int tcf_action_init_1(struct rtattr *rta, struct rtattr *est, struct tc_action *a,char *n, int ovr, int bind);
extern struct tc_action *tcf_action_init(struct rtattr *rta, struct rtattr *est, char *n, int ovr, int bind, int *err);
extern struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est, char *n, int ovr, int bind, int *err);
extern int tcf_action_dump(struct sk_buff *skb, struct tc_action *a, int, int);
extern int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
extern int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
......
......@@ -70,17 +70,10 @@ tcf_change_act_police(struct tcf_proto *tp, struct tc_action **action,
int ret;
struct tc_action *act;
act = kmalloc(sizeof(*act), GFP_KERNEL);
if (NULL == act)
return -ENOMEM;
memset(act, 0, sizeof(*act));
ret = tcf_action_init_1(act_police_tlv, rate_tlv, act, "police",
TCA_ACT_NOREPLACE, TCA_ACT_BIND);
if (ret < 0) {
tcf_action_destroy(act, TCA_ACT_UNBIND);
act = tcf_action_init_1(act_police_tlv, rate_tlv, "police",
TCA_ACT_NOREPLACE, TCA_ACT_BIND, &ret);
if (act == NULL)
return ret;
}
act->type = TCA_OLD_COMPAT;
......@@ -103,17 +96,10 @@ tcf_change_act(struct tcf_proto *tp, struct tc_action **action,
int ret;
struct tc_action *act;
act = kmalloc(sizeof(*act), GFP_KERNEL);
if (NULL == act)
return -ENOMEM;
memset(act, 0, sizeof(*act));
ret = tcf_action_init(act_tlv, rate_tlv, act, NULL,
TCA_ACT_NOREPLACE, TCA_ACT_BIND);
if (ret < 0) {
tcf_action_destroy(act, TCA_ACT_UNBIND);
act = tcf_action_init(act_tlv, rate_tlv, NULL,
TCA_ACT_NOREPLACE, TCA_ACT_BIND, &ret);
if (act == NULL)
return ret;
}
if (*action) {
tcf_tree_lock(tp);
......
......@@ -243,6 +243,7 @@ extern int x25_validate_nr(struct sock *, unsigned short);
extern void x25_write_internal(struct sock *, int);
extern int x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int *, int *);
extern void x25_disconnect(struct sock *, int, unsigned char, unsigned char);
extern int x25_check_calluserdata(struct x25_calluserdata *,struct x25_calluserdata *);
/* x25_timer.c */
extern void x25_start_heartbeat(struct sock *);
......
......@@ -189,8 +189,22 @@ config BRIDGE_EBT_LOG
tristate "ebt: log support"
depends on BRIDGE_NF_EBTABLES
help
This option adds the log target, that you can use in any rule in
any ebtables table. It records the frame header to the syslog.
This option adds the log watcher, that you can use in any rule
in any ebtables table. It records info about the frame header
to the syslog.
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_ULOG
tristate "ebt: ulog support"
depends on BRIDGE_NF_EBTABLES
help
This option adds the ulog watcher, that you can use in any rule
in any ebtables table. The packet is passed to a userspace
logging daemon using netlink multicast sockets. This differs
from the log watcher in the sense that the complete packet is
sent to userspace instead of a descriptive text and that
netlink multicast sockets are used instead of the syslog.
To compile it as a module, choose M here. If unsure, say N.
......
......@@ -29,3 +29,4 @@ obj-$(CONFIG_BRIDGE_EBT_SNAT) += ebt_snat.o
# watchers
obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o
obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_ulog.o
......@@ -55,8 +55,9 @@ static void print_MAC(unsigned char *p)
}
#define myNIPQUAD(a) a[0], a[1], a[2], a[3]
static void ebt_log(const struct sk_buff *skb, const struct net_device *in,
const struct net_device *out, const void *data, unsigned int datalen)
static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
const struct net_device *in, const struct net_device *out,
const void *data, unsigned int datalen)
{
struct ebt_log_info *info = (struct ebt_log_info *)data;
char level_string[4] = "< >";
......
/*
* netfilter module for userspace bridged Ethernet frames logging daemons
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
*
* November, 2004
*
* Based on ipt_ULOG.c, which is
* (C) 2000-2002 by Harald Welte <laforge@netfilter.org>
*
* This module accepts two parameters:
*
* nlbufsiz:
* The parameter specifies how big the buffer for each netlink multicast
* group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will
* get accumulated in the kernel until they are sent to userspace. It is
* NOT possible to allocate more than 128kB, and it is strongly discouraged,
* because atomically allocating 128kB inside the network rx softirq is not
* reliable. Please also keep in mind that this buffer size is allocated for
* each nlgroup you are using, so the total kernel memory usage increases
* by that factor.
*
* flushtimeout:
* Specify, after how many hundredths of a second the queue should be
* flushed even if it is not full yet.
*
*/
#include <linux/module.h>
#include <linux/config.h>
#include <linux/spinlock.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/netlink.h>
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_ulog.h>
#include <net/sock.h>
#include "../br_private.h"
#define PRINTR(format, args...) do { if (net_ratelimit()) \
printk(format , ## args); } while (0)
static unsigned int nlbufsiz = 4096;
module_param(nlbufsiz, uint, 0600);
MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) "
"(defaults to 4096)");
static unsigned int flushtimeout = 10;
module_param(flushtimeout, uint, 0600);
MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths ofa second) "
"(defaults to 10)");
typedef struct {
unsigned int qlen; /* number of nlmsgs' in the skb */
struct nlmsghdr *lastnlh; /* netlink header of last msg in skb */
struct sk_buff *skb; /* the pre-allocated skb */
struct timer_list timer; /* the timer function */
spinlock_t lock; /* the per-queue lock */
} ebt_ulog_buff_t;
static ebt_ulog_buff_t ulog_buffers[EBT_ULOG_MAXNLGROUPS];
static struct sock *ebtulognl;
/* send one ulog_buff_t to userspace */
static void ulog_send(unsigned int nlgroup)
{
ebt_ulog_buff_t *ub = &ulog_buffers[nlgroup];
if (timer_pending(&ub->timer))
del_timer(&ub->timer);
/* last nlmsg needs NLMSG_DONE */
if (ub->qlen > 1)
ub->lastnlh->nlmsg_type = NLMSG_DONE;
NETLINK_CB(ub->skb).dst_groups = 1 << nlgroup;
netlink_broadcast(ebtulognl, ub->skb, 0, 1 << nlgroup, GFP_ATOMIC);
ub->qlen = 0;
ub->skb = NULL;
}
/* timer function to flush queue in flushtimeout time */
static void ulog_timer(unsigned long data)
{
spin_lock_bh(&ulog_buffers[data].lock);
if (ulog_buffers[data].skb)
ulog_send(data);
spin_unlock_bh(&ulog_buffers[data].lock);
}
static struct sk_buff *ulog_alloc_skb(unsigned int size)
{
struct sk_buff *skb;
skb = alloc_skb(nlbufsiz, GFP_ATOMIC);
if (!skb) {
PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer "
"of size %ub!\n", nlbufsiz);
if (size < nlbufsiz) {
/* try to allocate only as much as we need for
* current packet */
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
PRINTR(KERN_ERR "ebt_ulog: can't even allocate "
"buffer of size %ub\n", size);
}
}
return skb;
}
static void ebt_ulog(const struct sk_buff *skb, unsigned int hooknr,
const struct net_device *in, const struct net_device *out,
const void *data, unsigned int datalen)
{
ebt_ulog_packet_msg_t *pm;
size_t size, copy_len;
struct nlmsghdr *nlh;
struct ebt_ulog_info *uloginfo = (struct ebt_ulog_info *)data;
unsigned int group = uloginfo->nlgroup;
ebt_ulog_buff_t *ub = &ulog_buffers[group];
spinlock_t *lock = &ub->lock;
if ((uloginfo->cprange == 0) ||
(uloginfo->cprange > skb->len + ETH_HLEN))
copy_len = skb->len + ETH_HLEN;
else
copy_len = uloginfo->cprange;
size = NLMSG_SPACE(sizeof(*pm) + copy_len);
if (size > nlbufsiz) {
PRINTR("ebt_ulog: Size %d needed, but nlbufsiz=%d\n",
size, nlbufsiz);
return;
}
spin_lock_bh(lock);
if (!ub->skb) {
if (!(ub->skb = ulog_alloc_skb(size)))
goto alloc_failure;
} else if (size > skb_tailroom(ub->skb)) {
ulog_send(group);
if (!(ub->skb = ulog_alloc_skb(size)))
goto alloc_failure;
}
nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, 0,
size - NLMSG_ALIGN(sizeof(*nlh)));
ub->qlen++;
pm = NLMSG_DATA(nlh);
/* Fill in the ulog data */
pm->version = EBT_ULOG_VERSION;
do_gettimeofday(&pm->stamp);
if (ub->qlen == 1)
ub->skb->stamp = pm->stamp;
pm->data_len = copy_len;
pm->mark = skb->nfmark;
pm->hook = hooknr;
if (uloginfo->prefix != NULL)
strcpy(pm->prefix, uloginfo->prefix);
else
*(pm->prefix) = '\0';
if (in) {
strcpy(pm->physindev, in->name);
/* If in isn't a bridge, then physindev==indev */
if (in->br_port)
strcpy(pm->indev, in->br_port->br->dev->name);
else
strcpy(pm->indev, in->name);
} else
pm->indev[0] = pm->physindev[0] = '\0';
if (out) {
/* If out exists, then out is a bridge port */
strcpy(pm->physoutdev, out->name);
strcpy(pm->outdev, out->br_port->br->dev->name);
} else
pm->outdev[0] = pm->physoutdev[0] = '\0';
if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0)
BUG();
if (ub->qlen > 1)
ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
ub->lastnlh = nlh;
if (ub->qlen >= uloginfo->qthreshold)
ulog_send(group);
else if (!timer_pending(&ub->timer)) {
ub->timer.expires = jiffies + flushtimeout * HZ / 100;
add_timer(&ub->timer);
}
unlock:
spin_unlock_bh(lock);
return;
nlmsg_failure:
printk(KERN_CRIT "ebt_ulog: error during NLMSG_PUT. This should "
"not happen, please report to author.\n");
goto unlock;
alloc_failure:
goto unlock;
}
static int ebt_ulog_check(const char *tablename, unsigned int hookmask,
const struct ebt_entry *e, void *data, unsigned int datalen)
{
struct ebt_ulog_info *uloginfo = (struct ebt_ulog_info *)data;
if (datalen != EBT_ALIGN(sizeof(struct ebt_ulog_info)) ||
uloginfo->nlgroup > 31)
return -EINVAL;
uloginfo->prefix[EBT_ULOG_PREFIX_LEN - 1] = '\0';
if (uloginfo->qthreshold > EBT_ULOG_MAX_QLEN)
uloginfo->qthreshold = EBT_ULOG_MAX_QLEN;
return 0;
}
static struct ebt_watcher ulog = {
.name = EBT_ULOG_WATCHER,
.watcher = ebt_ulog,
.check = ebt_ulog_check,
.me = THIS_MODULE,
};
static int __init init(void)
{
int i, ret = 0;
if (nlbufsiz >= 128*1024) {
printk(KERN_NOTICE "ebt_ulog: Netlink buffer has to be <= 128kB,"
" please try a smaller nlbufsiz parameter.\n");
return -EINVAL;
}
/* initialize ulog_buffers */
for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
init_timer(&ulog_buffers[i].timer);
ulog_buffers[i].timer.function = ulog_timer;
ulog_buffers[i].timer.data = i;
ulog_buffers[i].lock = SPIN_LOCK_UNLOCKED;
}
ebtulognl = netlink_kernel_create(NETLINK_NFLOG, NULL);
if (!ebtulognl)
ret = -ENOMEM;
else if ((ret = ebt_register_watcher(&ulog)))
sock_release(ebtulognl->sk_socket);
return ret;
}
static void __exit fini(void)
{
ebt_ulog_buff_t *ub;
int i;
ebt_unregister_watcher(&ulog);
for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
ub = &ulog_buffers[i];
if (timer_pending(&ub->timer))
del_timer(&ub->timer);
spin_lock_bh(&ub->lock);
if (ub->skb) {
kfree_skb(ub->skb);
ub->skb = NULL;
}
spin_unlock_bh(&ub->lock);
}
sock_release(ebtulognl->sk_socket);
}
module_init(init);
module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
MODULE_DESCRIPTION("ebtables userspace logging module for bridged Ethernet"
" frames");
......@@ -90,10 +90,10 @@ static struct ebt_target ebt_standard_target =
{ {NULL, NULL}, EBT_STANDARD_TARGET, NULL, NULL, NULL, NULL};
static inline int ebt_do_watcher (struct ebt_entry_watcher *w,
const struct sk_buff *skb, const struct net_device *in,
const struct sk_buff *skb, unsigned int hooknr, const struct net_device *in,
const struct net_device *out)
{
w->u.watcher->watcher(skb, in, out, w->data,
w->u.watcher->watcher(skb, hooknr, in, out, w->data,
w->watcher_size);
/* watchers don't give a verdict */
return 0;
......@@ -208,7 +208,7 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff **pskb,
/* these should only watch: not modify, nor tell us
what to do with the packet */
EBT_WATCHER_ITERATE(point, ebt_do_watcher, *pskb, in,
EBT_WATCHER_ITERATE(point, ebt_do_watcher, *pskb, hook, in,
out);
t = (struct ebt_entry_target *)
......
......@@ -1493,7 +1493,7 @@ int netif_rx_ni(struct sk_buff *skb)
preempt_disable();
err = netif_rx(skb);
if (softirq_pending(smp_processor_id()))
if (local_softirq_pending())
do_softirq();
preempt_enable();
......@@ -2700,8 +2700,7 @@ static inline void net_set_todo(struct net_device *dev)
* chain. 0 is returned on success. A negative errno code is returned
* on a failure to set up the device, or if the name is a duplicate.
*
* Callers must hold the rtnl semaphore. See the comment at the
* end of Space.c for details about the locking. You may want
* Callers must hold the rtnl semaphore. You may want
* register_netdev() instead of this.
*
* BUGS:
......@@ -2822,6 +2821,51 @@ int register_netdevice(struct net_device *dev)
goto out;
}
/**
* register_netdev - register a network device
* @dev: device to register
*
* Take a completed network device structure and add it to the kernel
* interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
* chain. 0 is returned on success. A negative errno code is returned
* on a failure to set up the device, or if the name is a duplicate.
*
* This is a wrapper around register_netdev that takes the rtnl semaphore
* and expands the device name if you passed a format string to
* alloc_netdev.
*/
int register_netdev(struct net_device *dev)
{
int err;
rtnl_lock();
/*
* If the name is a format string the caller wants us to do a
* name allocation.
*/
if (strchr(dev->name, '%')) {
err = dev_alloc_name(dev, dev->name);
if (err < 0)
goto out;
}
/*
* Back compatibility hook. Kill this one in 2.5
*/
if (dev->name[0] == 0 || dev->name[0] == ' ') {
err = dev_alloc_name(dev, "eth%d");
if (err < 0)
goto out;
}
err = register_netdevice(dev);
out:
rtnl_unlock();
return err;
}
EXPORT_SYMBOL(register_netdev);
/*
* netdev_wait_allrefs - wait until all references are gone.
*
......@@ -2964,6 +3008,46 @@ void netdev_run_todo(void)
up(&net_todo_run_mutex);
}
/**
* alloc_netdev - allocate network device
* @sizeof_priv: size of private data to allocate space for
* @name: device name format string
* @setup: callback to initialize device
*
* Allocates a struct net_device with private data area for driver use
* and performs basic initialization.
*/
struct net_device *alloc_netdev(int sizeof_priv, const char *name,
void (*setup)(struct net_device *))
{
void *p;
struct net_device *dev;
int alloc_size;
/* ensure 32-byte alignment of both the device and private area */
alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
p = kmalloc(alloc_size, GFP_KERNEL);
if (!p) {
printk(KERN_ERR "alloc_dev: Unable to allocate device.\n");
return NULL;
}
memset(p, 0, alloc_size);
dev = (struct net_device *)
(((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
dev->padded = (char *)dev - (char *)p;
if (sizeof_priv)
dev->priv = netdev_priv(dev);
setup(dev);
strcpy(dev->name, name);
return dev;
}
EXPORT_SYMBOL(alloc_netdev);
/**
* free_netdev - free network device
* @dev: device
......@@ -3006,8 +3090,7 @@ void synchronize_net(void)
* from the kernel tables. On success 0 is returned, on a failure
* a negative errno code is returned.
*
* Callers must hold the rtnl semaphore. See the comment at the
* end of Space.c for details about the locking. You may want
* Callers must hold the rtnl semaphore. You may want
* unregister_netdev() instead of this.
*/
......@@ -3085,6 +3168,27 @@ int unregister_netdevice(struct net_device *dev)
return 0;
}
/**
* unregister_netdev - remove device from the kernel
* @dev: device
*
* This function shuts down a device interface and removes it
* from the kernel tables. On success 0 is returned, on a failure
* a negative errno code is returned.
*
* This is just a wrapper for unregister_netdevice that takes
* the rtnl semaphore. In general you want to use this and not
* unregister_netdevice.
*/
void unregister_netdev(struct net_device *dev)
{
rtnl_lock();
unregister_netdevice(dev);
rtnl_unlock();
}
EXPORT_SYMBOL(unregister_netdev);
#ifdef CONFIG_HOTPLUG_CPU
static int dev_cpu_callback(struct notifier_block *nfb,
unsigned long action,
......
......@@ -267,7 +267,22 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
struct net_device *dev;
int err, send_addr_notify = 0;
dev = dev_get_by_index(ifm->ifi_index);
if (ifm->ifi_index >= 0)
dev = dev_get_by_index(ifm->ifi_index);
else if (ida[IFLA_IFNAME - 1]) {
char ifname[IFNAMSIZ];
if (RTA_PAYLOAD(ida[IFLA_IFNAME - 1]) > RTA_ALIGN(sizeof(ifname)))
return -EINVAL;
memset(ifname, 0, sizeof(ifname));
memcpy(ifname, RTA_DATA(ida[IFLA_IFNAME - 1]),
RTA_PAYLOAD(ida[IFLA_IFNAME - 1]));
ifname[IFNAMSIZ - 1] = '\0';
dev = dev_get_by_name(ifname);
} else
return -EINVAL;
if (!dev)
return -ENODEV;
......@@ -358,10 +373,10 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
dev->weight = *((u32 *) RTA_DATA(ida[IFLA_WEIGHT - 1]));
}
if (ida[IFLA_IFNAME - 1]) {
if (ifm->ifi_index >= 0 && ida[IFLA_IFNAME - 1]) {
char ifname[IFNAMSIZ];
if (ida[IFLA_IFNAME - 1]->rta_len > RTA_LENGTH(sizeof(ifname)))
if (RTA_PAYLOAD(ida[IFLA_IFNAME - 1]) > RTA_ALIGN(sizeof(ifname)))
goto out;
memset(ifname, 0, sizeof(ifname));
......
......@@ -163,6 +163,59 @@ struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
goto out;
}
/**
* alloc_skb_from_cache - allocate a network buffer
* @cp: kmem_cache from which to allocate the data area
* (object size must be big enough for @size bytes + skb overheads)
* @size: size to allocate
* @gfp_mask: allocation mask
*
* Allocate a new &sk_buff. The returned buffer has no headroom and
* tail room of size bytes. The object has a reference count of one.
* The return is the buffer. On a failure the return is %NULL.
*
* Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC.
*/
struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
unsigned int size, int gfp_mask)
{
struct sk_buff *skb;
u8 *data;
/* Get the HEAD */
skb = kmem_cache_alloc(skbuff_head_cache,
gfp_mask & ~__GFP_DMA);
if (!skb)
goto out;
/* Get the DATA. */
size = SKB_DATA_ALIGN(size);
data = kmem_cache_alloc(cp, gfp_mask);
if (!data)
goto nodata;
memset(skb, 0, offsetof(struct sk_buff, truesize));
skb->truesize = size + sizeof(struct sk_buff);
atomic_set(&skb->users, 1);
skb->head = data;
skb->data = data;
skb->tail = data;
skb->end = data + size;
atomic_set(&(skb_shinfo(skb)->dataref), 1);
skb_shinfo(skb)->nr_frags = 0;
skb_shinfo(skb)->tso_size = 0;
skb_shinfo(skb)->tso_segs = 0;
skb_shinfo(skb)->frag_list = NULL;
out:
return skb;
nodata:
kmem_cache_free(skbuff_head_cache, skb);
skb = NULL;
goto out;
}
static void skb_drop_fraglist(struct sk_buff *skb)
{
......
......@@ -274,29 +274,6 @@ fn_hash_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result
static int fn_hash_last_dflt=-1;
static int fib_detect_death(struct fib_info *fi, int order,
struct fib_info **last_resort, int *last_idx)
{
struct neighbour *n;
int state = NUD_NONE;
n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
if (n) {
state = n->nud_state;
neigh_release(n);
}
if (state==NUD_REACHABLE)
return 0;
if ((state&NUD_VALID) && order != fn_hash_last_dflt)
return 0;
if ((state&NUD_VALID) ||
(*last_idx<0 && order > fn_hash_last_dflt)) {
*last_resort = fi;
*last_idx = order;
}
return 1;
}
static void
fn_hash_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
{
......@@ -337,7 +314,7 @@ fn_hash_select_default(struct fib_table *tb, const struct flowi *flp, struct fib
if (next_fi != res->fi)
break;
} else if (!fib_detect_death(fi, order, &last_resort,
&last_idx)) {
&last_idx, &fn_hash_last_dflt)) {
if (res->fi)
fib_info_put(res->fi);
res->fi = fi;
......@@ -355,7 +332,7 @@ fn_hash_select_default(struct fib_table *tb, const struct flowi *flp, struct fib
goto out;
}
if (!fib_detect_death(fi, order, &last_resort, &last_idx)) {
if (!fib_detect_death(fi, order, &last_resort, &last_idx, &fn_hash_last_dflt)) {
if (res->fi)
fib_info_put(res->fi);
res->fi = fi;
......@@ -376,11 +353,6 @@ fn_hash_select_default(struct fib_table *tb, const struct flowi *flp, struct fib
read_unlock(&fib_hash_lock);
}
static void rtmsg_fib(int, struct fib_node *, struct fib_alias *,
int, int,
struct nlmsghdr *n,
struct netlink_skb_parms *);
/* Insert node F to FZ. */
static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
{
......@@ -404,26 +376,6 @@ static struct fib_node *fib_find_node(struct fn_zone *fz, u32 key)
return NULL;
}
/* Return the first fib alias matching TOS with
* priority less than or equal to PRIO.
*/
static struct fib_alias *fib_find_alias(struct fib_node *fn, u8 tos, u32 prio)
{
if (fn) {
struct list_head *head = &fn->fn_alias;
struct fib_alias *fa;
list_for_each_entry(fa, head, fa_list) {
if (fa->fa_tos > tos)
continue;
if (fa->fa_info->fib_priority >= prio ||
fa->fa_tos < tos)
return fa;
}
}
return NULL;
}
static int
fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
struct nlmsghdr *n, struct netlink_skb_parms *req)
......@@ -463,7 +415,11 @@ fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
fn_rehash_zone(fz);
f = fib_find_node(fz, key);
fa = fib_find_alias(f, tos, fi->fib_priority);
if (!f)
fa = NULL;
else
fa = fib_find_alias(&f->fn_alias, tos, fi->fib_priority);
/* Now fa, if non-NULL, points to the first fib alias
* with the same keys [prefix,tos,priority], if such key already
......@@ -565,7 +521,7 @@ fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
fz->fz_nent++;
rt_cache_flush(-1);
rtmsg_fib(RTM_NEWROUTE, f, new_fa, z, tb->tb_id, n, req);
rtmsg_fib(RTM_NEWROUTE, key, new_fa, z, tb->tb_id, n, req);
return 0;
out_free_new_fa:
......@@ -603,7 +559,11 @@ fn_hash_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
}
f = fib_find_node(fz, key);
fa = fib_find_alias(f, tos, 0);
if (!f)
fa = NULL;
else
fa = fib_find_alias(&f->fn_alias, tos, 0);
if (!fa)
return -ESRCH;
......@@ -631,7 +591,7 @@ fn_hash_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
int kill_fn;
fa = fa_to_delete;
rtmsg_fib(RTM_DELROUTE, f, fa, z, tb->tb_id, n, req);
rtmsg_fib(RTM_DELROUTE, key, fa, z, tb->tb_id, n, req);
kill_fn = 0;
write_lock_bh(&fib_hash_lock);
......@@ -796,33 +756,6 @@ static int fn_hash_dump(struct fib_table *tb, struct sk_buff *skb, struct netlin
return skb->len;
}
static void rtmsg_fib(int event, struct fib_node *f, struct fib_alias *fa,
int z, int tb_id,
struct nlmsghdr *n, struct netlink_skb_parms *req)
{
struct sk_buff *skb;
u32 pid = req ? req->pid : 0;
int size = NLMSG_SPACE(sizeof(struct rtmsg)+256);
skb = alloc_skb(size, GFP_KERNEL);
if (!skb)
return;
if (fib_dump_info(skb, pid, n->nlmsg_seq, event, tb_id,
fa->fa_type, fa->fa_scope, &f->fn_key, z,
fa->fa_tos,
fa->fa_info) < 0) {
kfree_skb(skb);
return;
}
NETLINK_CB(skb).dst_groups = RTMGRP_IPV4_ROUTE;
if (n->nlmsg_flags&NLM_F_ECHO)
atomic_inc(&skb->users);
netlink_broadcast(rtnl, skb, pid, RTMGRP_IPV4_ROUTE, GFP_KERNEL);
if (n->nlmsg_flags&NLM_F_ECHO)
netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
}
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_table * fib_hash_init(int id)
#else
......
......@@ -30,5 +30,13 @@ extern int fib_nh_match(struct rtmsg *r, struct nlmsghdr *,
extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
u8 tb_id, u8 type, u8 scope, void *dst,
int dst_len, u8 tos, struct fib_info *fi);
extern void rtmsg_fib(int event, u32 key, struct fib_alias *fa,
int z, int tb_id,
struct nlmsghdr *n, struct netlink_skb_parms *req);
extern struct fib_alias *fib_find_alias(struct list_head *fah,
u8 tos, u32 prio);
extern int fib_detect_death(struct fib_info *fi, int order,
struct fib_info **last_resort,
int *last_idx, int *dflt);
#endif /* _FIB_LOOKUP_H */
......@@ -270,6 +270,74 @@ int ip_fib_check_default(u32 gw, struct net_device *dev)
return -1;
}
void rtmsg_fib(int event, u32 key, struct fib_alias *fa,
int z, int tb_id,
struct nlmsghdr *n, struct netlink_skb_parms *req)
{
struct sk_buff *skb;
u32 pid = req ? req->pid : 0;
int size = NLMSG_SPACE(sizeof(struct rtmsg)+256);
skb = alloc_skb(size, GFP_KERNEL);
if (!skb)
return;
if (fib_dump_info(skb, pid, n->nlmsg_seq, event, tb_id,
fa->fa_type, fa->fa_scope, &key, z,
fa->fa_tos,
fa->fa_info) < 0) {
kfree_skb(skb);
return;
}
NETLINK_CB(skb).dst_groups = RTMGRP_IPV4_ROUTE;
if (n->nlmsg_flags&NLM_F_ECHO)
atomic_inc(&skb->users);
netlink_broadcast(rtnl, skb, pid, RTMGRP_IPV4_ROUTE, GFP_KERNEL);
if (n->nlmsg_flags&NLM_F_ECHO)
netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
}
/* Return the first fib alias matching TOS with
* priority less than or equal to PRIO.
*/
struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
{
if (fah) {
struct fib_alias *fa;
list_for_each_entry(fa, fah, fa_list) {
if (fa->fa_tos > tos)
continue;
if (fa->fa_info->fib_priority >= prio ||
fa->fa_tos < tos)
return fa;
}
}
return NULL;
}
int fib_detect_death(struct fib_info *fi, int order,
struct fib_info **last_resort, int *last_idx, int *dflt)
{
struct neighbour *n;
int state = NUD_NONE;
n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
if (n) {
state = n->nud_state;
neigh_release(n);
}
if (state==NUD_REACHABLE)
return 0;
if ((state&NUD_VALID) && order != *dflt)
return 0;
if ((state&NUD_VALID) ||
(*last_idx<0 && order > *dflt)) {
*last_resort = fi;
*last_idx = order;
}
return 1;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
static u32 fib_get_attr32(struct rtattr *attr, int attrlen, int type)
......
......@@ -504,20 +504,6 @@ config IP_NF_TARGET_SAME
To compile it as a module, choose M here. If unsure, say N.
config IP_NF_NAT_LOCAL
bool "NAT of local connections (READ HELP)"
depends on IP_NF_NAT
help
This option enables support for NAT of locally originated connections.
Enable this if you need to use destination NAT on connections
originating from local processes on the nat box itself.
Please note that you will need a recent version (>= 1.2.6a)
of the iptables userspace program in order to use this feature.
See <http://www.iptables.org/> for download instructions.
If unsure, say 'N'.
config IP_NF_NAT_SNMP_BASIC
tristate "Basic SNMP-ALG support (EXPERIMENTAL)"
depends on EXPERIMENTAL && IP_NF_NAT
......
......@@ -882,6 +882,7 @@ void need_ip_conntrack(void)
EXPORT_SYMBOL(ip_conntrack_protocol_register);
EXPORT_SYMBOL(ip_conntrack_protocol_unregister);
EXPORT_SYMBOL(ip_ct_get_tuple);
EXPORT_SYMBOL(invert_tuplepr);
EXPORT_SYMBOL(ip_conntrack_alter_reply);
EXPORT_SYMBOL(ip_conntrack_destroyed);
......
......@@ -182,7 +182,6 @@ find_appropriate_src(const struct ip_conntrack_tuple *tuple,
return 0;
}
#ifdef CONFIG_IP_NF_NAT_LOCAL
/* If it's really a local destination manip, it may need to do a
source manip too. */
static int
......@@ -202,7 +201,6 @@ do_extra_mangle(u_int32_t var_ip, u_int32_t *other_ipp)
ip_rt_put(rt);
return 1;
}
#endif
/* Simple way to iterate through all. */
static inline int fake_cmp(const struct ip_conntrack *ct,
......@@ -301,7 +299,6 @@ find_best_ips_proto(struct ip_conntrack_tuple *tuple,
* do_extra_mangle last time. */
*other_ipp = saved_ip;
#ifdef CONFIG_IP_NF_NAT_LOCAL
if (hooknum == NF_IP_LOCAL_OUT
&& *var_ipp != orig_dstip
&& !do_extra_mangle(*var_ipp, other_ipp)) {
......@@ -312,7 +309,6 @@ find_best_ips_proto(struct ip_conntrack_tuple *tuple,
* anyway. */
continue;
}
#endif
/* Count how many others map onto this. */
score = count_maps(tuple->src.ip, tuple->dst.ip,
......@@ -356,13 +352,11 @@ find_best_ips_proto_fast(struct ip_conntrack_tuple *tuple,
else {
/* Only do extra mangle when required (breaks
socket binding) */
#ifdef CONFIG_IP_NF_NAT_LOCAL
if (tuple->dst.ip != mr->range[0].min_ip
&& hooknum == NF_IP_LOCAL_OUT
&& !do_extra_mangle(mr->range[0].min_ip,
&tuple->src.ip))
return NULL;
#endif
tuple->dst.ip = mr->range[0].min_ip;
}
}
......@@ -473,10 +467,8 @@ get_unique_tuple(struct ip_conntrack_tuple *tuple,
static unsigned int opposite_hook[NF_IP_NUMHOOKS]
= { [NF_IP_PRE_ROUTING] = NF_IP_POST_ROUTING,
[NF_IP_POST_ROUTING] = NF_IP_PRE_ROUTING,
#ifdef CONFIG_IP_NF_NAT_LOCAL
[NF_IP_LOCAL_OUT] = NF_IP_LOCAL_IN,
[NF_IP_LOCAL_IN] = NF_IP_LOCAL_OUT,
#endif
};
unsigned int
......@@ -821,6 +813,23 @@ do_bindings(struct ip_conntrack *ct,
/* not reached */
}
static inline int tuple_src_equal_dst(const struct ip_conntrack_tuple *t1,
const struct ip_conntrack_tuple *t2)
{
if (t1->dst.protonum != t2->dst.protonum || t1->src.ip != t2->dst.ip)
return 0;
if (t1->dst.protonum != IPPROTO_ICMP)
return t1->src.u.all == t2->dst.u.all;
else {
struct ip_conntrack_tuple inv;
/* ICMP tuples are asymetric */
invert_tuplepr(&inv, t1);
return inv.src.u.all == t2->src.u.all &&
inv.dst.u.all == t2->dst.u.all;
}
}
int
icmp_reply_translation(struct sk_buff **pskb,
struct ip_conntrack *conntrack,
......@@ -833,6 +842,7 @@ icmp_reply_translation(struct sk_buff **pskb,
} *inside;
unsigned int i;
struct ip_nat_info *info = &conntrack->nat.info;
struct ip_conntrack_tuple *cttuple, innertuple;
int hdrlen;
if (!skb_ip_make_writable(pskb,(*pskb)->nh.iph->ihl*4+sizeof(*inside)))
......@@ -876,6 +886,13 @@ icmp_reply_translation(struct sk_buff **pskb,
such addresses are not too uncommon, as Alan Cox points
out) */
if (!ip_ct_get_tuple(&inside->ip, *pskb, (*pskb)->nh.iph->ihl*4 +
sizeof(struct icmphdr) + inside->ip.ihl*4,
&innertuple,
ip_ct_find_proto(inside->ip.protocol)))
return 0;
cttuple = &conntrack->tuplehash[dir].tuple;
READ_LOCK(&ip_nat_lock);
for (i = 0; i < info->num_manips; i++) {
DEBUGP("icmp_reply: manip %u dir %s hook %u\n",
......@@ -885,37 +902,52 @@ icmp_reply_translation(struct sk_buff **pskb,
if (info->manips[i].direction != dir)
continue;
/* Mapping the inner packet is just like a normal
packet, except it was never src/dst reversed, so
where we would normally apply a dst manip, we apply
a src, and vice versa. */
if (info->manips[i].hooknum == hooknum) {
DEBUGP("icmp_reply: inner %s -> %u.%u.%u.%u %u\n",
info->manips[i].maniptype == IP_NAT_MANIP_SRC
? "DST" : "SRC",
NIPQUAD(info->manips[i].manip.ip),
ntohs(info->manips[i].manip.u.udp.port));
if (!manip_pkt(inside->ip.protocol, pskb,
(*pskb)->nh.iph->ihl*4
+ sizeof(inside->icmp),
&info->manips[i].manip,
!info->manips[i].maniptype))
goto unlock_fail;
/* Mapping the inner packet is just like a normal packet, except
* it was never src/dst reversed, so where we would normally
* apply a dst manip, we apply a src, and vice versa. */
/* Outer packet needs to have IP header NATed like
it's a reply. */
/* Only true for forwarded packets, locally generated packets
* never hit PRE_ROUTING, we need to apply their PRE_ROUTING
* manips in LOCAL_OUT. */
if (hooknum == NF_IP_LOCAL_OUT &&
info->manips[i].hooknum == NF_IP_PRE_ROUTING)
hooknum = info->manips[i].hooknum;
/* Use mapping to map outer packet: 0 give no
per-proto mapping */
DEBUGP("icmp_reply: outer %s -> %u.%u.%u.%u\n",
info->manips[i].maniptype == IP_NAT_MANIP_SRC
? "SRC" : "DST",
NIPQUAD(info->manips[i].manip.ip));
if (!manip_pkt(0, pskb, 0,
&info->manips[i].manip,
info->manips[i].maniptype))
goto unlock_fail;
if (info->manips[i].hooknum != hooknum)
continue;
/* ICMP errors may be generated locally for packets that
* don't have all NAT manips applied yet. Verify manips
* have been applied before reversing them */
if (info->manips[i].maniptype == IP_NAT_MANIP_SRC) {
if (!tuple_src_equal_dst(cttuple, &innertuple))
continue;
} else {
if (!tuple_src_equal_dst(&innertuple, cttuple))
continue;
}
DEBUGP("icmp_reply: inner %s -> %u.%u.%u.%u %u\n",
info->manips[i].maniptype == IP_NAT_MANIP_SRC
? "DST" : "SRC", NIPQUAD(info->manips[i].manip.ip),
ntohs(info->manips[i].manip.u.udp.port));
if (!manip_pkt(inside->ip.protocol, pskb,
(*pskb)->nh.iph->ihl*4 + sizeof(inside->icmp),
&info->manips[i].manip,
!info->manips[i].maniptype))
goto unlock_fail;
/* Outer packet needs to have IP header NATed like
it's a reply. */
/* Use mapping to map outer packet: 0 give no
per-proto mapping */
DEBUGP("icmp_reply: outer %s -> %u.%u.%u.%u\n",
info->manips[i].maniptype == IP_NAT_MANIP_SRC
? "SRC" : "DST", NIPQUAD(info->manips[i].manip.ip));
if (!manip_pkt(0, pskb, 0, &info->manips[i].manip,
info->manips[i].maniptype))
goto unlock_fail;
}
READ_UNLOCK(&ip_nat_lock);
......
......@@ -149,12 +149,8 @@ static unsigned int ipt_dnat_target(struct sk_buff **pskb,
struct ip_conntrack *ct;
enum ip_conntrack_info ctinfo;
#ifdef CONFIG_IP_NF_NAT_LOCAL
IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING
|| hooknum == NF_IP_LOCAL_OUT);
#else
IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING);
#endif
ct = ip_conntrack_get(*pskb, &ctinfo);
......@@ -232,13 +228,6 @@ static int ipt_dnat_checkentry(const char *tablename,
return 0;
}
#ifndef CONFIG_IP_NF_NAT_LOCAL
if (hook_mask & (1 << NF_IP_LOCAL_OUT)) {
DEBUGP("DNAT: CONFIG_IP_NF_NAT_LOCAL not enabled\n");
return 0;
}
#endif
return 1;
}
......
......@@ -128,16 +128,7 @@ ip_nat_fn(unsigned int hooknum,
WRITE_LOCK(&ip_nat_lock);
/* Seen it before? This can happen for loopback, retrans,
or local packets.. */
if (!(info->initialized & (1 << maniptype))
#ifndef CONFIG_IP_NF_NAT_LOCAL
/* If this session has already been confirmed we must not
* touch it again even if there is no mapping set up.
* Can only happen on local->local traffic with
* CONFIG_IP_NF_NAT_LOCAL disabled.
*/
&& !(ct->status & IPS_CONFIRMED)
#endif
) {
if (!(info->initialized & (1 << maniptype))) {
unsigned int ret;
if (ct->master
......@@ -146,15 +137,14 @@ ip_nat_fn(unsigned int hooknum,
ret = call_expect(master_ct(ct), pskb,
hooknum, ct, info);
} else {
#ifdef CONFIG_IP_NF_NAT_LOCAL
/* LOCAL_IN hook doesn't have a chain! */
if (hooknum == NF_IP_LOCAL_IN)
ret = alloc_null_binding(ct, info,
hooknum);
else
#endif
ret = ip_nat_rule_find(pskb, hooknum, in, out,
ct, info);
ret = ip_nat_rule_find(pskb, hooknum,
in, out, ct,
info);
}
if (ret != NF_ACCEPT) {
......@@ -179,6 +169,29 @@ ip_nat_fn(unsigned int hooknum,
return do_bindings(ct, ctinfo, info, hooknum, pskb);
}
static unsigned int
ip_nat_in(unsigned int hooknum,
struct sk_buff **pskb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
u_int32_t saddr, daddr;
unsigned int ret;
saddr = (*pskb)->nh.iph->saddr;
daddr = (*pskb)->nh.iph->daddr;
ret = ip_nat_fn(hooknum, pskb, in, out, okfn);
if (ret != NF_DROP && ret != NF_STOLEN
&& ((*pskb)->nh.iph->saddr != saddr
|| (*pskb)->nh.iph->daddr != daddr)) {
dst_release((*pskb)->dst);
(*pskb)->dst = NULL;
}
return ret;
}
static unsigned int
ip_nat_out(unsigned int hooknum,
struct sk_buff **pskb,
......@@ -211,7 +224,6 @@ ip_nat_out(unsigned int hooknum,
return ip_nat_fn(hooknum, pskb, in, out, okfn);
}
#ifdef CONFIG_IP_NF_NAT_LOCAL
static unsigned int
ip_nat_local_fn(unsigned int hooknum,
struct sk_buff **pskb,
......@@ -237,13 +249,12 @@ ip_nat_local_fn(unsigned int hooknum,
return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP;
return ret;
}
#endif
/* We must be after connection tracking and before packet filtering. */
/* Before packet filtering, change destination */
static struct nf_hook_ops ip_nat_in_ops = {
.hook = ip_nat_fn,
.hook = ip_nat_in,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_IP_PRE_ROUTING,
......@@ -259,7 +270,6 @@ static struct nf_hook_ops ip_nat_out_ops = {
.priority = NF_IP_PRI_NAT_SRC,
};
#ifdef CONFIG_IP_NF_NAT_LOCAL
/* Before packet filtering, change destination */
static struct nf_hook_ops ip_nat_local_out_ops = {
.hook = ip_nat_local_fn,
......@@ -277,7 +287,6 @@ static struct nf_hook_ops ip_nat_local_in_ops = {
.hooknum = NF_IP_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC,
};
#endif
/* Protocol registration. */
int ip_nat_protocol_register(struct ip_nat_protocol *proto)
......@@ -334,7 +343,6 @@ static int init_or_cleanup(int init)
printk("ip_nat_init: can't register out hook.\n");
goto cleanup_inops;
}
#ifdef CONFIG_IP_NF_NAT_LOCAL
ret = nf_register_hook(&ip_nat_local_out_ops);
if (ret < 0) {
printk("ip_nat_init: can't register local out hook.\n");
......@@ -345,16 +353,13 @@ static int init_or_cleanup(int init)
printk("ip_nat_init: can't register local in hook.\n");
goto cleanup_localoutops;
}
#endif
return ret;
cleanup:
#ifdef CONFIG_IP_NF_NAT_LOCAL
nf_unregister_hook(&ip_nat_local_in_ops);
cleanup_localoutops:
nf_unregister_hook(&ip_nat_local_out_ops);
cleanup_outops:
#endif
nf_unregister_hook(&ip_nat_out_ops);
cleanup_inops:
nf_unregister_hook(&ip_nat_in_ops);
......
......@@ -334,6 +334,18 @@ config NET_CLS_IND
Requires a new iproute2
You MUST NOT turn this on if you dont have an update iproute2.
config CLS_U32_MARK
bool "Use nfmark as a key in U32 classifier"
depends on NET_CLS_U32 && NETFILTER
help
This allows you to match mark in a u32 filter.
Example:
tc filter add dev eth0 protocol ip parent 1:0 prio 5 u32 \
match mark 0x0090 0xffff \
match ip dst 4.4.4.4 \
flowid 1:90
You must use a new iproute2 to use this feature.
config NET_CLS_RSVP
tristate "Special RSVP classifier"
depends on NET_CLS && NET_QOS
......
......@@ -294,14 +294,16 @@ int tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int re
}
int tcf_action_init_1(struct rtattr *rta, struct rtattr *est, struct tc_action *a, char *name, int ovr, int bind )
struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est,
char *name, int ovr, int bind, int *err)
{
struct tc_action *a;
struct tc_action_ops *a_o;
char act_name[4 + IFNAMSIZ + 1];
struct rtattr *tb[TCA_ACT_MAX+1];
struct rtattr *kind = NULL;
int err = -EINVAL;
*err = -EINVAL;
if (NULL == name) {
if (rtattr_parse(tb, TCA_ACT_MAX, RTA_DATA(rta), RTA_PAYLOAD(rta))<0)
......@@ -337,22 +339,25 @@ int tcf_action_init_1(struct rtattr *rta, struct rtattr *est, struct tc_action *
goto err_out;
}
if (NULL == a) {
a = kmalloc(sizeof(*a), GFP_KERNEL);
if (a == NULL) {
*err = -ENOMEM;
goto err_mod;
}
memset(a, 0, sizeof(*a));
/* backward compatibility for policer */
if (NULL == name) {
err = a_o->init(tb[TCA_ACT_OPTIONS-1], est, a, ovr, bind);
if (0 > err ) {
err = -EINVAL;
goto err_mod;
*err = a_o->init(tb[TCA_ACT_OPTIONS-1], est, a, ovr, bind);
if (*err < 0) {
*err = -EINVAL;
goto err_free;
}
} else {
err = a_o->init(rta, est, a, ovr, bind);
if (0 > err ) {
err = -EINVAL;
goto err_mod;
*err = a_o->init(rta, est, a, ovr, bind);
if (*err < 0) {
*err = -EINVAL;
goto err_free;
}
}
......@@ -360,60 +365,58 @@ int tcf_action_init_1(struct rtattr *rta, struct rtattr *est, struct tc_action *
if it exists and is only bound to in a_o->init() then
ACT_P_CREATED is not returned (a zero is).
*/
if (ACT_P_CREATED != err) {
if (*err != ACT_P_CREATED)
module_put(a_o->owner);
}
a->ops = a_o;
DPRINTK("tcf_action_init_1: successfull %s \n",act_name);
return 0;
*err = 0;
return a;
err_free:
kfree(a);
err_mod:
module_put(a_o->owner);
err_out:
return err;
return NULL;
}
int tcf_action_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a, char *name, int ovr , int bind)
struct tc_action *tcf_action_init(struct rtattr *rta, struct rtattr *est,
char *name, int ovr, int bind, int *err)
{
struct rtattr *tb[TCA_ACT_MAX_PRIO+1];
struct tc_action *a = NULL, *act, *act_prev = NULL;
int i;
struct tc_action *act = a, *a_s = a;
int err = -EINVAL;
if (rtattr_parse(tb, TCA_ACT_MAX_PRIO, RTA_DATA(rta), RTA_PAYLOAD(rta))<0)
return err;
if (rtattr_parse(tb, TCA_ACT_MAX_PRIO, RTA_DATA(rta),
RTA_PAYLOAD(rta)) < 0) {
*err = -EINVAL;
return a;
}
for (i=0; i < TCA_ACT_MAX_PRIO ; i++) {
for (i=0; i < TCA_ACT_MAX_PRIO; i++) {
if (tb[i]) {
if (NULL == act) {
act = kmalloc(sizeof(*act),GFP_KERNEL);
if (NULL == act) {
err = -ENOMEM;
goto bad_ret;
}
memset(act, 0,sizeof(*act));
}
act->next = NULL;
if (0 > tcf_action_init_1(tb[i],est,act,name,ovr,bind)) {
printk("Error processing action order %d\n",i);
return err;
act = tcf_action_init_1(tb[i], est, name, ovr, bind, err);
if (act == NULL) {
printk("Error processing action order %d\n", i);
goto bad_ret;
}
act->order = i+1;
if (a_s != act) {
a_s->next = act;
a_s = act;
}
act = NULL;
if (a == NULL)
a = act;
else
act_prev->next = act;
act_prev = act;
}
}
return a;
return 0;
bad_ret:
tcf_action_destroy(a, bind);
return err;
if (a != NULL)
tcf_action_destroy(a, bind);
return NULL;
}
int tcf_action_copy_stats (struct sk_buff *skb,struct tc_action *a)
......@@ -849,21 +852,9 @@ static int tcf_action_add(struct rtattr *rta, struct nlmsghdr *n, u32 pid, int o
struct tc_action *a = NULL;
u32 seq = n->nlmsg_seq;
act = kmalloc(sizeof(*act),GFP_KERNEL);
if (NULL == act)
return -ENOMEM;
memset(act, 0, sizeof(*act));
ret = tcf_action_init(rta, NULL,act,NULL,ovr,0);
/* NOTE: We have an all-or-none model
* This means that of any of the actions fail
* to update then all are undone.
* */
if (0 > ret) {
tcf_action_destroy(act, 0);
act = tcf_action_init(rta, NULL, NULL, ovr, 0, &ret);
if (act == NULL)
goto done;
}
/* dump then free all the actions after update; inserted policy
* stays intact
......@@ -880,7 +871,6 @@ static int tcf_action_add(struct rtattr *rta, struct nlmsghdr *n, u32 pid, int o
}
}
done:
return ret;
}
......
......@@ -27,6 +27,7 @@
* JHS: We should remove the CONFIG_NET_CLS_IND from here
* eventually when the meta match extension is made available
*
* nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
*/
#include <asm/uaccess.h>
......@@ -58,6 +59,13 @@
#include <net/pkt_cls.h>
struct tc_u32_mark
{
__u32 val;
__u32 mask;
__u32 success;
};
struct tc_u_knode
{
struct tc_u_knode *next;
......@@ -78,6 +86,9 @@ struct tc_u_knode
struct tc_u_hnode *ht_down;
#ifdef CONFIG_CLS_U32_PERF
struct tc_u32_pcnt *pf;
#endif
#ifdef CONFIG_CLS_U32_MARK
struct tc_u32_mark mark;
#endif
struct tc_u32_sel sel;
};
......@@ -139,6 +150,16 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
n->pf->rcnt +=1;
j = 0;
#endif
#ifdef CONFIG_CLS_U32_MARK
if ((skb->nfmark & n->mark.mask) != n->mark.val) {
n = n->next;
goto next_knode;
} else {
n->mark.success++;
}
#endif
for (i = n->sel.nkeys; i>0; i--, key++) {
if ((*(u32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) {
......@@ -554,6 +575,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
struct tc_u_hnode *ht;
struct tc_u_knode *n;
struct tc_u32_sel *s;
struct tc_u32_mark *mark;
struct rtattr *opt = tca[TCA_OPTIONS-1];
struct rtattr *tb[TCA_U32_MAX];
u32 htid;
......@@ -657,6 +679,17 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
}
n->fshift = i;
}
#ifdef CONFIG_CLS_U32_MARK
if (tb[TCA_U32_MARK-1]) {
if (RTA_PAYLOAD(tb[TCA_U32_MARK-1]) < sizeof(struct tc_u32_mark))
return -EINVAL;
mark = RTA_DATA(tb[TCA_U32_MARK-1]);
memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
n->mark.success = 0;
}
#endif
err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE-1]);
if (err == 0) {
struct tc_u_knode **ins;
......@@ -744,6 +777,12 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
RTA_PUT(skb, TCA_U32_CLASSID, 4, &n->res.classid);
if (n->ht_down)
RTA_PUT(skb, TCA_U32_LINK, 4, &n->ht_down->handle);
#ifdef CONFIG_CLS_U32_MARK
if (n->mark.val || n->mark.mask)
RTA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark);
#endif
#ifdef CONFIG_NET_CLS_ACT
if (tcf_dump_act(skb, n->action, TCA_U32_ACT, TCA_U32_POLICE) < 0)
goto rtattr_failure;
......
......@@ -1073,7 +1073,6 @@ int sock_wake_async(struct socket *sock, int how, int band)
static int __sock_create(int family, int type, int protocol, struct socket **res, int kern)
{
int i;
int err;
struct socket *sock;
......@@ -1118,7 +1117,7 @@ static int __sock_create(int family, int type, int protocol, struct socket **res
net_family_read_lock();
if (net_families[family] == NULL) {
i = -EAFNOSUPPORT;
err = -EAFNOSUPPORT;
goto out;
}
......@@ -1128,10 +1127,9 @@ static int __sock_create(int family, int type, int protocol, struct socket **res
* default.
*/
if (!(sock = sock_alloc()))
{
if (!(sock = sock_alloc())) {
printk(KERN_WARNING "socket: no more sockets\n");
i = -ENFILE; /* Not exactly a match, but its the
err = -ENFILE; /* Not exactly a match, but its the
closest posix thing */
goto out;
}
......@@ -1142,11 +1140,11 @@ static int __sock_create(int family, int type, int protocol, struct socket **res
* We will call the ->create function, that possibly is in a loadable
* module, so we have to bump that loadable module refcnt first.
*/
i = -EAFNOSUPPORT;
err = -EAFNOSUPPORT;
if (!try_module_get(net_families[family]->owner))
goto out_release;
if ((i = net_families[family]->create(sock, protocol)) < 0)
if ((err = net_families[family]->create(sock, protocol)) < 0)
goto out_module_put;
/*
* Now to bump the refcnt of the [loadable] module that owns this
......@@ -1166,7 +1164,7 @@ static int __sock_create(int family, int type, int protocol, struct socket **res
out:
net_family_read_unlock();
return i;
return err;
out_module_put:
module_put(net_families[family]->owner);
out_release:
......
......@@ -34,28 +34,19 @@
#include <linux/config.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/stat.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <net/x25.h>
......@@ -223,14 +214,19 @@ static void x25_insert_socket(struct sock *sk)
/*
* Find a socket that wants to accept the Call Request we just
* received.
* received. Check the full list for an address/cud match.
* If no cuds match return the next_best thing, an address match.
* Note: if a listening socket has cud set it must only get calls
* with matching cud.
*/
static struct sock *x25_find_listener(struct x25_address *addr)
static struct sock *x25_find_listener(struct x25_address *addr, struct x25_calluserdata *calluserdata)
{
struct sock *s;
struct sock *next_best;
struct hlist_node *node;
read_lock_bh(&x25_list_lock);
next_best = NULL;
sk_for_each(s, node, &x25_list)
if ((!strcmp(addr->x25_addr,
......@@ -238,9 +234,24 @@ static struct sock *x25_find_listener(struct x25_address *addr)
!strcmp(addr->x25_addr,
null_x25_address.x25_addr)) &&
s->sk_state == TCP_LISTEN) {
sock_hold(s);
goto found;
/*
* Found a listening socket, now check the incoming
* call user data vs this sockets call user data
*/
if (x25_check_calluserdata(&x25_sk(s)->calluserdata, calluserdata)) {
sock_hold(s);
goto found;
}
if (x25_sk(s)->calluserdata.cudlength == 0) {
next_best = s;
}
}
if (next_best) {
s = next_best;
sock_hold(s);
goto found;
}
s = NULL;
found:
read_unlock_bh(&x25_list_lock);
......@@ -814,6 +825,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
struct x25_opt *makex25;
struct x25_address source_addr, dest_addr;
struct x25_facilities facilities;
struct x25_calluserdata calluserdata;
int len, rc;
/*
......@@ -828,9 +840,27 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr));
/*
* Find a listener for the particular address.
* Get the length of the facilities, skip past them for the moment
* get the call user data because this is needed to determine
* the correct listener
*/
len = skb->data[0] + 1;
skb_pull(skb,len);
/*
* Incoming Call User Data.
*/
if (skb->len >= 0) {
memcpy(calluserdata.cuddata, skb->data, skb->len);
calluserdata.cudlength = skb->len;
}
skb_push(skb,len);
/*
* Find a listener for the particular address/cud pair.
*/
sk = x25_find_listener(&source_addr);
sk = x25_find_listener(&source_addr,&calluserdata);
/*
* We can't accept the Call Request.
......@@ -859,7 +889,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
goto out_sock_put;
/*
* Remove the facilities, leaving any Call User Data.
* Remove the facilities
*/
skb_pull(skb, len);
......@@ -873,17 +903,10 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
makex25->neighbour = nb;
makex25->facilities = facilities;
makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask;
makex25->calluserdata = calluserdata;
x25_write_internal(make, X25_CALL_ACCEPTED);
/*
* Incoming Call User Data.
*/
if (skb->len >= 0) {
memcpy(makex25->calluserdata.cuddata, skb->data, skb->len);
makex25->calluserdata.cudlength = skb->len;
}
makex25->state = X25_STATE_3;
sk->sk_ack_backlog++;
......
......@@ -5,7 +5,6 @@
* Added /proc/sys/net/x25 directory entry (empty =) ). [MS]
*/
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
......
......@@ -18,29 +18,10 @@
*/
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/stat.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/proc_fs.h>
#include <linux/if_arp.h>
#include <net/x25.h>
......
......@@ -19,24 +19,10 @@
* negotiation.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/system.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/x25.h>
/*
......
......@@ -24,25 +24,11 @@
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/ip.h> /* For ip_rcv */
#include <net/tcp.h>
#include <asm/system.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/x25.h>
static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
......
......@@ -21,25 +21,12 @@
* 2000-09-04 Henner Eisen dev_hold() / dev_put() for x25_neigh.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <net/x25.h>
......
......@@ -22,24 +22,11 @@
* needed cleaned seq-number fields.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/system.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/x25.h>
static int x25_pacsize_to_bytes(unsigned int pacsize)
......
......@@ -21,25 +21,11 @@
* jun/24/01 Arnaldo C. Melo use skb_queue_purge, cleanups
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <asm/system.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/x25.h>
/*
......@@ -367,3 +353,22 @@ void x25_check_rbuf(struct sock *sk)
x25_stop_timer(sk);
}
}
/*
* Compare 2 calluserdata structures, used to find correct listening sockets
* when call user data is used.
*/
int x25_check_calluserdata(struct x25_calluserdata *ours, struct x25_calluserdata *theirs)
{
int i;
if (ours->cudlength != theirs->cudlength)
return 0;
for (i=0;i<ours->cudlength;i++) {
if (ours->cuddata[i] != theirs->cuddata[i]) {
return 0;
}
}
return 1;
}
......@@ -20,24 +20,10 @@
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <asm/system.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/x25.h>
static void x25_heartbeat_expiry(unsigned long);
......
......@@ -549,6 +549,8 @@ void xfrm_policy_delete(struct xfrm_policy *pol, int dir)
}
}
EXPORT_SYMBOL(xfrm_policy_delete);
int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
{
struct xfrm_policy *old_pol;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment