Commit a736e074 authored by David S. Miller's avatar David S. Miller

Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net

Overlapping changes in RXRPC, changing to ktime_get_seconds() whilst
adding some tracepoints.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 192e91d2 112cbae2
...@@ -19,33 +19,24 @@ ...@@ -19,33 +19,24 @@
* u32 *macp, u8 const rk[], u32 rounds); * u32 *macp, u8 const rk[], u32 rounds);
*/ */
ENTRY(ce_aes_ccm_auth_data) ENTRY(ce_aes_ccm_auth_data)
frame_push 7 ldr w8, [x3] /* leftover from prev round? */
mov x19, x0
mov x20, x1
mov x21, x2
mov x22, x3
mov x23, x4
mov x24, x5
ldr w25, [x22] /* leftover from prev round? */
ld1 {v0.16b}, [x0] /* load mac */ ld1 {v0.16b}, [x0] /* load mac */
cbz w25, 1f cbz w8, 1f
sub w25, w25, #16 sub w8, w8, #16
eor v1.16b, v1.16b, v1.16b eor v1.16b, v1.16b, v1.16b
0: ldrb w7, [x20], #1 /* get 1 byte of input */ 0: ldrb w7, [x1], #1 /* get 1 byte of input */
subs w21, w21, #1 subs w2, w2, #1
add w25, w25, #1 add w8, w8, #1
ins v1.b[0], w7 ins v1.b[0], w7
ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */ ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */
beq 8f /* out of input? */ beq 8f /* out of input? */
cbnz w25, 0b cbnz w8, 0b
eor v0.16b, v0.16b, v1.16b eor v0.16b, v0.16b, v1.16b
1: ld1 {v3.4s}, [x23] /* load first round key */ 1: ld1 {v3.4s}, [x4] /* load first round key */
prfm pldl1strm, [x20] prfm pldl1strm, [x1]
cmp w24, #12 /* which key size? */ cmp w5, #12 /* which key size? */
add x6, x23, #16 add x6, x4, #16
sub w7, w24, #2 /* modified # of rounds */ sub w7, w5, #2 /* modified # of rounds */
bmi 2f bmi 2f
bne 5f bne 5f
mov v5.16b, v3.16b mov v5.16b, v3.16b
...@@ -64,43 +55,33 @@ ENTRY(ce_aes_ccm_auth_data) ...@@ -64,43 +55,33 @@ ENTRY(ce_aes_ccm_auth_data)
ld1 {v5.4s}, [x6], #16 /* load next round key */ ld1 {v5.4s}, [x6], #16 /* load next round key */
bpl 3b bpl 3b
aese v0.16b, v4.16b aese v0.16b, v4.16b
subs w21, w21, #16 /* last data? */ subs w2, w2, #16 /* last data? */
eor v0.16b, v0.16b, v5.16b /* final round */ eor v0.16b, v0.16b, v5.16b /* final round */
bmi 6f bmi 6f
ld1 {v1.16b}, [x20], #16 /* load next input block */ ld1 {v1.16b}, [x1], #16 /* load next input block */
eor v0.16b, v0.16b, v1.16b /* xor with mac */ eor v0.16b, v0.16b, v1.16b /* xor with mac */
beq 6f bne 1b
6: st1 {v0.16b}, [x0] /* store mac */
if_will_cond_yield_neon
st1 {v0.16b}, [x19] /* store mac */
do_cond_yield_neon
ld1 {v0.16b}, [x19] /* reload mac */
endif_yield_neon
b 1b
6: st1 {v0.16b}, [x19] /* store mac */
beq 10f beq 10f
adds w21, w21, #16 adds w2, w2, #16
beq 10f beq 10f
mov w25, w21 mov w8, w2
7: ldrb w7, [x20], #1 7: ldrb w7, [x1], #1
umov w6, v0.b[0] umov w6, v0.b[0]
eor w6, w6, w7 eor w6, w6, w7
strb w6, [x19], #1 strb w6, [x0], #1
subs w21, w21, #1 subs w2, w2, #1
beq 10f beq 10f
ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */ ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
b 7b b 7b
8: mov w7, w25 8: mov w7, w8
add w25, w25, #16 add w8, w8, #16
9: ext v1.16b, v1.16b, v1.16b, #1 9: ext v1.16b, v1.16b, v1.16b, #1
adds w7, w7, #1 adds w7, w7, #1
bne 9b bne 9b
eor v0.16b, v0.16b, v1.16b eor v0.16b, v0.16b, v1.16b
st1 {v0.16b}, [x19] st1 {v0.16b}, [x0]
10: str w25, [x22] 10: str w8, [x3]
frame_pop
ret ret
ENDPROC(ce_aes_ccm_auth_data) ENDPROC(ce_aes_ccm_auth_data)
...@@ -145,29 +126,19 @@ ENTRY(ce_aes_ccm_final) ...@@ -145,29 +126,19 @@ ENTRY(ce_aes_ccm_final)
ENDPROC(ce_aes_ccm_final) ENDPROC(ce_aes_ccm_final)
.macro aes_ccm_do_crypt,enc .macro aes_ccm_do_crypt,enc
frame_push 8 ldr x8, [x6, #8] /* load lower ctr */
ld1 {v0.16b}, [x5] /* load mac */
mov x19, x0 CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */
mov x20, x1
mov x21, x2
mov x22, x3
mov x23, x4
mov x24, x5
mov x25, x6
ldr x26, [x25, #8] /* load lower ctr */
ld1 {v0.16b}, [x24] /* load mac */
CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */
0: /* outer loop */ 0: /* outer loop */
ld1 {v1.8b}, [x25] /* load upper ctr */ ld1 {v1.8b}, [x6] /* load upper ctr */
prfm pldl1strm, [x20] prfm pldl1strm, [x1]
add x26, x26, #1 add x8, x8, #1
rev x9, x26 rev x9, x8
cmp w23, #12 /* which key size? */ cmp w4, #12 /* which key size? */
sub w7, w23, #2 /* get modified # of rounds */ sub w7, w4, #2 /* get modified # of rounds */
ins v1.d[1], x9 /* no carry in lower ctr */ ins v1.d[1], x9 /* no carry in lower ctr */
ld1 {v3.4s}, [x22] /* load first round key */ ld1 {v3.4s}, [x3] /* load first round key */
add x10, x22, #16 add x10, x3, #16
bmi 1f bmi 1f
bne 4f bne 4f
mov v5.16b, v3.16b mov v5.16b, v3.16b
...@@ -194,9 +165,9 @@ CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */ ...@@ -194,9 +165,9 @@ CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */
bpl 2b bpl 2b
aese v0.16b, v4.16b aese v0.16b, v4.16b
aese v1.16b, v4.16b aese v1.16b, v4.16b
subs w21, w21, #16 subs w2, w2, #16
bmi 7f /* partial block? */ bmi 6f /* partial block? */
ld1 {v2.16b}, [x20], #16 /* load next input block */ ld1 {v2.16b}, [x1], #16 /* load next input block */
.if \enc == 1 .if \enc == 1
eor v2.16b, v2.16b, v5.16b /* final round enc+mac */ eor v2.16b, v2.16b, v5.16b /* final round enc+mac */
eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */ eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */
...@@ -205,29 +176,18 @@ CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */ ...@@ -205,29 +176,18 @@ CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */
eor v1.16b, v2.16b, v5.16b /* final round enc */ eor v1.16b, v2.16b, v5.16b /* final round enc */
.endif .endif
eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */ eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
st1 {v1.16b}, [x19], #16 /* write output block */ st1 {v1.16b}, [x0], #16 /* write output block */
beq 5f bne 0b
CPU_LE( rev x8, x8 )
if_will_cond_yield_neon st1 {v0.16b}, [x5] /* store mac */
st1 {v0.16b}, [x24] /* store mac */ str x8, [x6, #8] /* store lsb end of ctr (BE) */
do_cond_yield_neon 5: ret
ld1 {v0.16b}, [x24] /* reload mac */
endif_yield_neon 6: eor v0.16b, v0.16b, v5.16b /* final round mac */
b 0b
5:
CPU_LE( rev x26, x26 )
st1 {v0.16b}, [x24] /* store mac */
str x26, [x25, #8] /* store lsb end of ctr (BE) */
6: frame_pop
ret
7: eor v0.16b, v0.16b, v5.16b /* final round mac */
eor v1.16b, v1.16b, v5.16b /* final round enc */ eor v1.16b, v1.16b, v5.16b /* final round enc */
st1 {v0.16b}, [x24] /* store mac */ st1 {v0.16b}, [x5] /* store mac */
add w21, w21, #16 /* process partial tail block */ add w2, w2, #16 /* process partial tail block */
8: ldrb w9, [x20], #1 /* get 1 byte of input */ 7: ldrb w9, [x1], #1 /* get 1 byte of input */
umov w6, v1.b[0] /* get top crypted ctr byte */ umov w6, v1.b[0] /* get top crypted ctr byte */
umov w7, v0.b[0] /* get top mac byte */ umov w7, v0.b[0] /* get top mac byte */
.if \enc == 1 .if \enc == 1
...@@ -237,13 +197,13 @@ CPU_LE( rev x26, x26 ) ...@@ -237,13 +197,13 @@ CPU_LE( rev x26, x26 )
eor w9, w9, w6 eor w9, w9, w6
eor w7, w7, w9 eor w7, w7, w9
.endif .endif
strb w9, [x19], #1 /* store out byte */ strb w9, [x0], #1 /* store out byte */
strb w7, [x24], #1 /* store mac byte */ strb w7, [x5], #1 /* store mac byte */
subs w21, w21, #1 subs w2, w2, #1
beq 6b beq 5b
ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */ ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */
ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */ ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */
b 8b b 7b
.endm .endm
/* /*
......
...@@ -322,55 +322,41 @@ ENDPROC(pmull_ghash_update_p8) ...@@ -322,55 +322,41 @@ ENDPROC(pmull_ghash_update_p8)
.endm .endm
.macro pmull_gcm_do_crypt, enc .macro pmull_gcm_do_crypt, enc
frame_push 10 ld1 {SHASH.2d}, [x4]
ld1 {XL.2d}, [x1]
ldr x8, [x5, #8] // load lower counter
mov x19, x0 load_round_keys w7, x6
mov x20, x1
mov x21, x2
mov x22, x3
mov x23, x4
mov x24, x5
mov x25, x6
mov x26, x7
.if \enc == 1
ldr x27, [sp, #96] // first stacked arg
.endif
ldr x28, [x24, #8] // load lower counter
CPU_LE( rev x28, x28 )
0: mov x0, x25
load_round_keys w26, x0
ld1 {SHASH.2d}, [x23]
ld1 {XL.2d}, [x20]
movi MASK.16b, #0xe1 movi MASK.16b, #0xe1
ext SHASH2.16b, SHASH.16b, SHASH.16b, #8 ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
CPU_LE( rev x8, x8 )
shl MASK.2d, MASK.2d, #57 shl MASK.2d, MASK.2d, #57
eor SHASH2.16b, SHASH2.16b, SHASH.16b eor SHASH2.16b, SHASH2.16b, SHASH.16b
.if \enc == 1 .if \enc == 1
ld1 {KS.16b}, [x27] ldr x10, [sp]
ld1 {KS.16b}, [x10]
.endif .endif
1: ld1 {CTR.8b}, [x24] // load upper counter 0: ld1 {CTR.8b}, [x5] // load upper counter
ld1 {INP.16b}, [x22], #16 ld1 {INP.16b}, [x3], #16
rev x9, x28 rev x9, x8
add x28, x28, #1 add x8, x8, #1
sub w19, w19, #1 sub w0, w0, #1
ins CTR.d[1], x9 // set lower counter ins CTR.d[1], x9 // set lower counter
.if \enc == 1 .if \enc == 1
eor INP.16b, INP.16b, KS.16b // encrypt input eor INP.16b, INP.16b, KS.16b // encrypt input
st1 {INP.16b}, [x21], #16 st1 {INP.16b}, [x2], #16
.endif .endif
rev64 T1.16b, INP.16b rev64 T1.16b, INP.16b
cmp w26, #12 cmp w7, #12
b.ge 4f // AES-192/256? b.ge 2f // AES-192/256?
2: enc_round CTR, v21 1: enc_round CTR, v21
ext T2.16b, XL.16b, XL.16b, #8 ext T2.16b, XL.16b, XL.16b, #8
ext IN1.16b, T1.16b, T1.16b, #8 ext IN1.16b, T1.16b, T1.16b, #8
...@@ -425,39 +411,27 @@ CPU_LE( rev x28, x28 ) ...@@ -425,39 +411,27 @@ CPU_LE( rev x28, x28 )
.if \enc == 0 .if \enc == 0
eor INP.16b, INP.16b, KS.16b eor INP.16b, INP.16b, KS.16b
st1 {INP.16b}, [x21], #16 st1 {INP.16b}, [x2], #16
.endif .endif
cbz w19, 3f cbnz w0, 0b
if_will_cond_yield_neon CPU_LE( rev x8, x8 )
st1 {XL.2d}, [x20] st1 {XL.2d}, [x1]
.if \enc == 1 str x8, [x5, #8] // store lower counter
st1 {KS.16b}, [x27]
.endif
do_cond_yield_neon
b 0b
endif_yield_neon
b 1b
3: st1 {XL.2d}, [x20]
.if \enc == 1 .if \enc == 1
st1 {KS.16b}, [x27] st1 {KS.16b}, [x10]
.endif .endif
CPU_LE( rev x28, x28 )
str x28, [x24, #8] // store lower counter
frame_pop
ret ret
4: b.eq 5f // AES-192? 2: b.eq 3f // AES-192?
enc_round CTR, v17 enc_round CTR, v17
enc_round CTR, v18 enc_round CTR, v18
5: enc_round CTR, v19 3: enc_round CTR, v19
enc_round CTR, v20 enc_round CTR, v20
b 2b b 1b
.endm .endm
/* /*
......
...@@ -195,7 +195,7 @@ config PREFETCH ...@@ -195,7 +195,7 @@ config PREFETCH
config MLONGCALLS config MLONGCALLS
bool "Enable the -mlong-calls compiler option for big kernels" bool "Enable the -mlong-calls compiler option for big kernels"
def_bool y if (!MODULES) default y
depends on PA8X00 depends on PA8X00
help help
If you configure the kernel to include many drivers built-in instead If you configure the kernel to include many drivers built-in instead
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
#ifndef __ASSEMBLY__
/* The synchronize caches instruction executes as a nop on systems in
which all memory references are performed in order. */
#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
#if defined(CONFIG_SMP)
#define mb() do { synchronize_caches(); } while (0)
#define rmb() mb()
#define wmb() mb()
#define dma_rmb() mb()
#define dma_wmb() mb()
#else
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
#define dma_rmb() barrier()
#define dma_wmb() barrier()
#endif
#define __smp_mb() mb()
#define __smp_rmb() mb()
#define __smp_wmb() mb()
#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_BARRIER_H */
...@@ -481,6 +481,8 @@ ...@@ -481,6 +481,8 @@
/* Release pa_tlb_lock lock without reloading lock address. */ /* Release pa_tlb_lock lock without reloading lock address. */
.macro tlb_unlock0 spc,tmp .macro tlb_unlock0 spc,tmp
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
or,COND(=) %r0,\spc,%r0
sync
or,COND(=) %r0,\spc,%r0 or,COND(=) %r0,\spc,%r0
stw \spc,0(\tmp) stw \spc,0(\tmp)
#endif #endif
......
...@@ -353,6 +353,7 @@ ENDPROC_CFI(flush_data_cache_local) ...@@ -353,6 +353,7 @@ ENDPROC_CFI(flush_data_cache_local)
.macro tlb_unlock la,flags,tmp .macro tlb_unlock la,flags,tmp
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
ldi 1,\tmp ldi 1,\tmp
sync
stw \tmp,0(\la) stw \tmp,0(\la)
mtsm \flags mtsm \flags
#endif #endif
......
...@@ -633,6 +633,7 @@ cas_action: ...@@ -633,6 +633,7 @@ cas_action:
sub,<> %r28, %r25, %r0 sub,<> %r28, %r25, %r0
2: stw,ma %r24, 0(%r26) 2: stw,ma %r24, 0(%r26)
/* Free lock */ /* Free lock */
sync
stw,ma %r20, 0(%sr2,%r20) stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG #if ENABLE_LWS_DEBUG
/* Clear thread register indicator */ /* Clear thread register indicator */
...@@ -647,6 +648,7 @@ cas_action: ...@@ -647,6 +648,7 @@ cas_action:
3: 3:
/* Error occurred on load or store */ /* Error occurred on load or store */
/* Free lock */ /* Free lock */
sync
stw %r20, 0(%sr2,%r20) stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG #if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20) stw %r0, 4(%sr2,%r20)
...@@ -848,6 +850,7 @@ cas2_action: ...@@ -848,6 +850,7 @@ cas2_action:
cas2_end: cas2_end:
/* Free lock */ /* Free lock */
sync
stw,ma %r20, 0(%sr2,%r20) stw,ma %r20, 0(%sr2,%r20)
/* Enable interrupts */ /* Enable interrupts */
ssm PSW_SM_I, %r0 ssm PSW_SM_I, %r0
...@@ -858,6 +861,7 @@ cas2_end: ...@@ -858,6 +861,7 @@ cas2_end:
22: 22:
/* Error occurred on load or store */ /* Error occurred on load or store */
/* Free lock */ /* Free lock */
sync
stw %r20, 0(%sr2,%r20) stw %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0 ssm PSW_SM_I, %r0
ldo 1(%r0),%r28 ldo 1(%r0),%r28
......
...@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128_aesni_alg[] = { ...@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128_aesni_alg[] = {
} }
}; };
static const struct x86_cpu_id aesni_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_AES),
X86_FEATURE_MATCH(X86_FEATURE_XMM2),
{}
};
MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
static int __init crypto_aegis128_aesni_module_init(void) static int __init crypto_aegis128_aesni_module_init(void)
{ {
if (!x86_match_cpu(aesni_cpu_id)) if (!boot_cpu_has(X86_FEATURE_XMM2) ||
!boot_cpu_has(X86_FEATURE_AES) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV; return -ENODEV;
return crypto_register_aeads(crypto_aegis128_aesni_alg, return crypto_register_aeads(crypto_aegis128_aesni_alg,
......
...@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128l_aesni_alg[] = { ...@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128l_aesni_alg[] = {
} }
}; };
static const struct x86_cpu_id aesni_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_AES),
X86_FEATURE_MATCH(X86_FEATURE_XMM2),
{}
};
MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
static int __init crypto_aegis128l_aesni_module_init(void) static int __init crypto_aegis128l_aesni_module_init(void)
{ {
if (!x86_match_cpu(aesni_cpu_id)) if (!boot_cpu_has(X86_FEATURE_XMM2) ||
!boot_cpu_has(X86_FEATURE_AES) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV; return -ENODEV;
return crypto_register_aeads(crypto_aegis128l_aesni_alg, return crypto_register_aeads(crypto_aegis128l_aesni_alg,
......
...@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis256_aesni_alg[] = { ...@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis256_aesni_alg[] = {
} }
}; };
static const struct x86_cpu_id aesni_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_AES),
X86_FEATURE_MATCH(X86_FEATURE_XMM2),
{}
};
MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
static int __init crypto_aegis256_aesni_module_init(void) static int __init crypto_aegis256_aesni_module_init(void)
{ {
if (!x86_match_cpu(aesni_cpu_id)) if (!boot_cpu_has(X86_FEATURE_XMM2) ||
!boot_cpu_has(X86_FEATURE_AES) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV; return -ENODEV;
return crypto_register_aeads(crypto_aegis256_aesni_alg, return crypto_register_aeads(crypto_aegis256_aesni_alg,
......
...@@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor, ...@@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor,
MORUS1280_DECLARE_ALGS(avx2, "morus1280-avx2", 400); MORUS1280_DECLARE_ALGS(avx2, "morus1280-avx2", 400);
static const struct x86_cpu_id avx2_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_AVX2),
{}
};
MODULE_DEVICE_TABLE(x86cpu, avx2_cpu_id);
static int __init crypto_morus1280_avx2_module_init(void) static int __init crypto_morus1280_avx2_module_init(void)
{ {
if (!x86_match_cpu(avx2_cpu_id)) if (!boot_cpu_has(X86_FEATURE_AVX2) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
return -ENODEV; return -ENODEV;
return crypto_register_aeads(crypto_morus1280_avx2_algs, return crypto_register_aeads(crypto_morus1280_avx2_algs,
......
...@@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor, ...@@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor,
MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350); MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
static const struct x86_cpu_id sse2_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_XMM2),
{}
};
MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id);
static int __init crypto_morus1280_sse2_module_init(void) static int __init crypto_morus1280_sse2_module_init(void)
{ {
if (!x86_match_cpu(sse2_cpu_id)) if (!boot_cpu_has(X86_FEATURE_XMM2) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV; return -ENODEV;
return crypto_register_aeads(crypto_morus1280_sse2_algs, return crypto_register_aeads(crypto_morus1280_sse2_algs,
......
...@@ -37,15 +37,11 @@ asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor, ...@@ -37,15 +37,11 @@ asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor,
MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400); MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
static const struct x86_cpu_id sse2_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_XMM2),
{}
};
MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id);
static int __init crypto_morus640_sse2_module_init(void) static int __init crypto_morus640_sse2_module_init(void)
{ {
if (!x86_match_cpu(sse2_cpu_id)) if (!boot_cpu_has(X86_FEATURE_XMM2) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV; return -ENODEV;
return crypto_register_aeads(crypto_morus640_sse2_algs, return crypto_register_aeads(crypto_morus640_sse2_algs,
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
struct acpi_gpio_event { struct acpi_gpio_event {
struct list_head node; struct list_head node;
struct list_head initial_sync_list;
acpi_handle handle; acpi_handle handle;
unsigned int pin; unsigned int pin;
unsigned int irq; unsigned int irq;
...@@ -50,6 +51,9 @@ struct acpi_gpio_chip { ...@@ -50,6 +51,9 @@ struct acpi_gpio_chip {
struct list_head events; struct list_head events;
}; };
static LIST_HEAD(acpi_gpio_initial_sync_list);
static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
{ {
if (!gc->parent) if (!gc->parent)
...@@ -85,6 +89,21 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin) ...@@ -85,6 +89,21 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
return gpiochip_get_desc(chip, pin); return gpiochip_get_desc(chip, pin);
} }
static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
{
mutex_lock(&acpi_gpio_initial_sync_list_lock);
list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
mutex_unlock(&acpi_gpio_initial_sync_list_lock);
}
static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
{
mutex_lock(&acpi_gpio_initial_sync_list_lock);
if (!list_empty(&event->initial_sync_list))
list_del_init(&event->initial_sync_list);
mutex_unlock(&acpi_gpio_initial_sync_list_lock);
}
static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
{ {
struct acpi_gpio_event *event = data; struct acpi_gpio_event *event = data;
...@@ -136,7 +155,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, ...@@ -136,7 +155,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
irq_handler_t handler = NULL; irq_handler_t handler = NULL;
struct gpio_desc *desc; struct gpio_desc *desc;
unsigned long irqflags; unsigned long irqflags;
int ret, pin, irq; int ret, pin, irq, value;
if (!acpi_gpio_get_irq_resource(ares, &agpio)) if (!acpi_gpio_get_irq_resource(ares, &agpio))
return AE_OK; return AE_OK;
...@@ -167,6 +186,8 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, ...@@ -167,6 +186,8 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
gpiod_direction_input(desc); gpiod_direction_input(desc);
value = gpiod_get_value(desc);
ret = gpiochip_lock_as_irq(chip, pin); ret = gpiochip_lock_as_irq(chip, pin);
if (ret) { if (ret) {
dev_err(chip->parent, "Failed to lock GPIO as interrupt\n"); dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
...@@ -208,6 +229,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, ...@@ -208,6 +229,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
event->irq = irq; event->irq = irq;
event->pin = pin; event->pin = pin;
event->desc = desc; event->desc = desc;
INIT_LIST_HEAD(&event->initial_sync_list);
ret = request_threaded_irq(event->irq, NULL, handler, irqflags, ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
"ACPI:Event", event); "ACPI:Event", event);
...@@ -222,6 +244,18 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, ...@@ -222,6 +244,18 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
enable_irq_wake(irq); enable_irq_wake(irq);
list_add_tail(&event->node, &acpi_gpio->events); list_add_tail(&event->node, &acpi_gpio->events);
/*
* Make sure we trigger the initial state of the IRQ when using RISING
* or FALLING. Note we run the handlers on late_init, the AML code
* may refer to OperationRegions from other (builtin) drivers which
* may be probed after us.
*/
if (handler == acpi_gpio_irq_handler &&
(((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
acpi_gpio_add_to_initial_sync_list(event);
return AE_OK; return AE_OK;
fail_free_event: fail_free_event:
...@@ -294,6 +328,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) ...@@ -294,6 +328,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
struct gpio_desc *desc; struct gpio_desc *desc;
acpi_gpio_del_from_initial_sync_list(event);
if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
disable_irq_wake(event->irq); disable_irq_wake(event->irq);
...@@ -1158,3 +1194,21 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id) ...@@ -1158,3 +1194,21 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
return con_id == NULL; return con_id == NULL;
} }
/* Sync the initial state of handlers after all builtin drivers have probed */
static int acpi_gpio_initial_sync(void)
{
struct acpi_gpio_event *event, *ep;
mutex_lock(&acpi_gpio_initial_sync_list_lock);
list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
initial_sync_list) {
acpi_evaluate_object(event->handle, NULL, NULL, NULL);
list_del_init(&event->initial_sync_list);
}
mutex_unlock(&acpi_gpio_initial_sync_list_lock);
return 0;
}
/* We must use _sync so that this runs after the first deferred_probe run */
late_initcall_sync(acpi_gpio_initial_sync);
...@@ -764,7 +764,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, ...@@ -764,7 +764,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
hw_atl_rpfl2multicast_flr_en_set(self, hw_atl_rpfl2multicast_flr_en_set(self,
IS_FILTER_ENABLED(IFF_MULTICAST), 0); IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
hw_atl_rpfl2_accept_all_mc_packets_set(self, hw_atl_rpfl2_accept_all_mc_packets_set(self,
IS_FILTER_ENABLED(IFF_ALLMULTI)); IS_FILTER_ENABLED(IFF_ALLMULTI));
......
...@@ -1083,6 +1083,8 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) ...@@ -1083,6 +1083,8 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count); lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count);
lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs), lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs),
GFP_KERNEL); GFP_KERNEL);
if (!lmac->dmacs)
return -ENOMEM;
/* Enable lmac */ /* Enable lmac */
bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
......
...@@ -1038,10 +1038,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb, ...@@ -1038,10 +1038,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
req->local_port = cpu_to_be16(f->fs.val.lport); req->local_port = cpu_to_be16(f->fs.val.lport);
req->peer_port = cpu_to_be16(f->fs.val.fport); req->peer_port = cpu_to_be16(f->fs.val.fport);
req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 | memcpy(&req->local_ip, f->fs.val.lip, 4);
f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24; memcpy(&req->peer_ip, f->fs.val.fip, 4);
req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE || req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
f->fs.newvlan == VLAN_REWRITE) | f->fs.newvlan == VLAN_REWRITE) |
DELACK_V(f->fs.hitcnts) | DELACK_V(f->fs.hitcnts) |
......
...@@ -858,8 +858,6 @@ struct mlx5e_profile { ...@@ -858,8 +858,6 @@ struct mlx5e_profile {
mlx5e_fp_handle_rx_cqe handle_rx_cqe; mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
} rx_handlers; } rx_handlers;
void (*netdev_registered_init)(struct mlx5e_priv *priv);
void (*netdev_registered_remove)(struct mlx5e_priv *priv);
int max_tc; int max_tc;
}; };
......
...@@ -443,16 +443,12 @@ static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) ...@@ -443,16 +443,12 @@ static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
bool is_new; bool is_new;
int err; int err;
if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
return -EINVAL; !MLX5_DSCP_SUPPORTED(priv->mdev))
return -EOPNOTSUPP;
if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
return -EINVAL;
if (!MLX5_DSCP_SUPPORTED(priv->mdev))
return -EINVAL;
if (app->protocol >= MLX5E_MAX_DSCP) if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
(app->protocol >= MLX5E_MAX_DSCP))
return -EINVAL; return -EINVAL;
/* Save the old entry info */ /* Save the old entry info */
...@@ -500,16 +496,12 @@ static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) ...@@ -500,16 +496,12 @@ static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
int err; int err;
if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
return -EINVAL; !MLX5_DSCP_SUPPORTED(priv->mdev))
return -EOPNOTSUPP;
if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
return -EINVAL;
if (!MLX5_DSCP_SUPPORTED(priv->mdev))
return -EINVAL;
if (app->protocol >= MLX5E_MAX_DSCP) if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
(app->protocol >= MLX5E_MAX_DSCP))
return -EINVAL; return -EINVAL;
/* Skip if no dscp app entry */ /* Skip if no dscp app entry */
...@@ -1146,7 +1138,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) ...@@ -1146,7 +1138,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
{ {
int err; int err;
err = mlx5_set_trust_state(priv->mdev, trust_state); err = mlx5_set_trust_state(priv->mdev, trust_state);
if (err) if (err)
return err; return err;
priv->dcbx_dp.trust_state = trust_state; priv->dcbx_dp.trust_state = trust_state;
......
...@@ -2034,15 +2034,15 @@ static bool actions_match_supported(struct mlx5e_priv *priv, ...@@ -2034,15 +2034,15 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{ {
struct mlx5_core_dev *fmdev, *pmdev; struct mlx5_core_dev *fmdev, *pmdev;
u16 func_id, peer_id; u64 fsystem_guid, psystem_guid;
fmdev = priv->mdev; fmdev = priv->mdev;
pmdev = peer_priv->mdev; pmdev = peer_priv->mdev;
func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn)); mlx5_query_nic_vport_system_image_guid(fmdev, &fsystem_guid);
peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn)); mlx5_query_nic_vport_system_image_guid(pmdev, &psystem_guid);
return (func_id == peer_id); return (fsystem_guid == psystem_guid);
} }
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
......
...@@ -1595,9 +1595,12 @@ int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled) ...@@ -1595,9 +1595,12 @@ int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
d->iotlb = niotlb; d->iotlb = niotlb;
for (i = 0; i < d->nvqs; ++i) { for (i = 0; i < d->nvqs; ++i) {
mutex_lock(&d->vqs[i]->mutex); struct vhost_virtqueue *vq = d->vqs[i];
d->vqs[i]->iotlb = niotlb;
mutex_unlock(&d->vqs[i]->mutex); mutex_lock(&vq->mutex);
vq->iotlb = niotlb;
__vhost_vq_meta_reset(vq);
mutex_unlock(&vq->mutex);
} }
vhost_umem_clean(oiotlb); vhost_umem_clean(oiotlb);
......
...@@ -64,7 +64,8 @@ struct vsock_sock { ...@@ -64,7 +64,8 @@ struct vsock_sock {
struct list_head pending_links; struct list_head pending_links;
struct list_head accept_queue; struct list_head accept_queue;
bool rejected; bool rejected;
struct delayed_work dwork; struct delayed_work connect_work;
struct delayed_work pending_work;
struct delayed_work close_work; struct delayed_work close_work;
bool close_work_scheduled; bool close_work_scheduled;
u32 peer_shutdown; u32 peer_shutdown;
...@@ -77,7 +78,6 @@ struct vsock_sock { ...@@ -77,7 +78,6 @@ struct vsock_sock {
s64 vsock_stream_has_data(struct vsock_sock *vsk); s64 vsock_stream_has_data(struct vsock_sock *vsk);
s64 vsock_stream_has_space(struct vsock_sock *vsk); s64 vsock_stream_has_space(struct vsock_sock *vsk);
void vsock_pending_work(struct work_struct *work);
struct sock *__vsock_create(struct net *net, struct sock *__vsock_create(struct net *net,
struct socket *sock, struct socket *sock,
struct sock *parent, struct sock *parent,
......
...@@ -116,6 +116,11 @@ static inline void llc_sap_hold(struct llc_sap *sap) ...@@ -116,6 +116,11 @@ static inline void llc_sap_hold(struct llc_sap *sap)
refcount_inc(&sap->refcnt); refcount_inc(&sap->refcnt);
} }
static inline bool llc_sap_hold_safe(struct llc_sap *sap)
{
return refcount_inc_not_zero(&sap->refcnt);
}
void llc_sap_close(struct llc_sap *sap); void llc_sap_close(struct llc_sap *sap);
static inline void llc_sap_put(struct llc_sap *sap) static inline void llc_sap_put(struct llc_sap *sap)
......
...@@ -229,14 +229,16 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now) ...@@ -229,14 +229,16 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
u32 cwnd = hc->tx_cwnd, restart_cwnd, u32 cwnd = hc->tx_cwnd, restart_cwnd,
iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache); iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
s32 delta = now - hc->tx_lsndtime;
hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2)); hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
/* don't reduce cwnd below the initial window (IW) */ /* don't reduce cwnd below the initial window (IW) */
restart_cwnd = min(cwnd, iwnd); restart_cwnd = min(cwnd, iwnd);
cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto;
hc->tx_cwnd = max(cwnd, restart_cwnd);
while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd)
cwnd >>= 1;
hc->tx_cwnd = max(cwnd, restart_cwnd);
hc->tx_cwnd_stamp = now; hc->tx_cwnd_stamp = now;
hc->tx_cwnd_used = 0; hc->tx_cwnd_used = 0;
......
...@@ -639,7 +639,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) ...@@ -639,7 +639,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
int ret; int ret;
/* Port's PHY and MAC both need to be EEE capable */ /* Port's PHY and MAC both need to be EEE capable */
if (!dev->phydev) if (!dev->phydev && !dp->pl)
return -ENODEV; return -ENODEV;
if (!ds->ops->set_mac_eee) if (!ds->ops->set_mac_eee)
...@@ -659,7 +659,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) ...@@ -659,7 +659,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
int ret; int ret;
/* Port's PHY and MAC both need to be EEE capable */ /* Port's PHY and MAC both need to be EEE capable */
if (!dev->phydev) if (!dev->phydev && !dp->pl)
return -ENODEV; return -ENODEV;
if (!ds->ops->get_mac_eee) if (!ds->ops->get_mac_eee)
......
...@@ -1133,12 +1133,8 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, ...@@ -1133,12 +1133,8 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
max_headroom += 8; max_headroom += 8;
mtu -= 8; mtu -= 8;
} }
if (skb->protocol == htons(ETH_P_IPV6)) { mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
if (mtu < IPV6_MIN_MTU) IPV6_MIN_MTU : IPV4_MIN_MTU);
mtu = IPV6_MIN_MTU;
} else if (mtu < 576) {
mtu = 576;
}
skb_dst_update_pmtu(skb, mtu); skb_dst_update_pmtu(skb, mtu);
if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) { if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
......
...@@ -978,10 +978,6 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) ...@@ -978,10 +978,6 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
rt->rt6i_flags &= ~RTF_EXPIRES; rt->rt6i_flags &= ~RTF_EXPIRES;
rcu_assign_pointer(rt->from, from); rcu_assign_pointer(rt->from, from);
dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
if (from->fib6_metrics != &dst_default_metrics) {
rt->dst._metrics |= DST_METRICS_REFCOUNTED;
refcount_inc(&from->fib6_metrics->refcnt);
}
} }
/* Caller must already hold reference to @ort */ /* Caller must already hold reference to @ort */
......
...@@ -73,8 +73,8 @@ struct llc_sap *llc_sap_find(unsigned char sap_value) ...@@ -73,8 +73,8 @@ struct llc_sap *llc_sap_find(unsigned char sap_value)
rcu_read_lock_bh(); rcu_read_lock_bh();
sap = __llc_sap_find(sap_value); sap = __llc_sap_find(sap_value);
if (sap) if (!sap || !llc_sap_hold_safe(sap))
llc_sap_hold(sap); sap = NULL;
rcu_read_unlock_bh(); rcu_read_unlock_bh();
return sap; return sap;
} }
......
...@@ -4230,6 +4230,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, ...@@ -4230,6 +4230,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
} }
if (req->tp_block_nr) { if (req->tp_block_nr) {
unsigned int min_frame_size;
/* Sanity tests and some calculations */ /* Sanity tests and some calculations */
err = -EBUSY; err = -EBUSY;
if (unlikely(rb->pg_vec)) if (unlikely(rb->pg_vec))
...@@ -4252,12 +4254,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, ...@@ -4252,12 +4254,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
goto out; goto out;
if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
goto out; goto out;
min_frame_size = po->tp_hdrlen + po->tp_reserve;
if (po->tp_version >= TPACKET_V3 && if (po->tp_version >= TPACKET_V3 &&
req->tp_block_size <= req->tp_block_size <
BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr)) BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
goto out; goto out;
if (unlikely(req->tp_frame_size < po->tp_hdrlen + if (unlikely(req->tp_frame_size < min_frame_size))
po->tp_reserve))
goto out; goto out;
if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
goto out; goto out;
......
...@@ -104,9 +104,9 @@ struct rxrpc_net { ...@@ -104,9 +104,9 @@ struct rxrpc_net {
#define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */ #define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
u8 peer_keepalive_cursor; u8 peer_keepalive_cursor;
ktime_t peer_keepalive_base; time64_t peer_keepalive_base;
struct hlist_head peer_keepalive[RXRPC_KEEPALIVE_TIME + 1]; struct list_head peer_keepalive[32];
struct hlist_head peer_keepalive_new; struct list_head peer_keepalive_new;
struct timer_list peer_keepalive_timer; struct timer_list peer_keepalive_timer;
struct work_struct peer_keepalive_work; struct work_struct peer_keepalive_work;
}; };
...@@ -295,7 +295,7 @@ struct rxrpc_peer { ...@@ -295,7 +295,7 @@ struct rxrpc_peer {
struct hlist_head error_targets; /* targets for net error distribution */ struct hlist_head error_targets; /* targets for net error distribution */
struct work_struct error_distributor; struct work_struct error_distributor;
struct rb_root service_conns; /* Service connections */ struct rb_root service_conns; /* Service connections */
struct hlist_node keepalive_link; /* Link in net->peer_keepalive[] */ struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
time64_t last_tx_at; /* Last time packet sent here */ time64_t last_tx_at; /* Last time packet sent here */
seqlock_t service_conn_lock; seqlock_t service_conn_lock;
spinlock_t lock; /* access lock */ spinlock_t lock; /* access lock */
......
...@@ -138,7 +138,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, ...@@ -138,7 +138,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
} }
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len); ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
conn->params.peer->last_tx_at = ktime_get_real(); conn->params.peer->last_tx_at = ktime_get_seconds();
if (ret < 0) if (ret < 0)
trace_rxrpc_tx_fail(chan->call_debug_id, serial, ret, trace_rxrpc_tx_fail(chan->call_debug_id, serial, ret,
rxrpc_tx_point_call_final_resend); rxrpc_tx_point_call_final_resend);
...@@ -252,7 +252,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, ...@@ -252,7 +252,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort); trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort);
conn->params.peer->last_tx_at = ktime_get_real(); conn->params.peer->last_tx_at = ktime_get_seconds();
_leave(" = 0"); _leave(" = 0");
return 0; return 0;
......
...@@ -85,12 +85,12 @@ static __net_init int rxrpc_init_net(struct net *net) ...@@ -85,12 +85,12 @@ static __net_init int rxrpc_init_net(struct net *net)
hash_init(rxnet->peer_hash); hash_init(rxnet->peer_hash);
spin_lock_init(&rxnet->peer_hash_lock); spin_lock_init(&rxnet->peer_hash_lock);
for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++) for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++)
INIT_HLIST_HEAD(&rxnet->peer_keepalive[i]); INIT_LIST_HEAD(&rxnet->peer_keepalive[i]);
INIT_HLIST_HEAD(&rxnet->peer_keepalive_new); INIT_LIST_HEAD(&rxnet->peer_keepalive_new);
timer_setup(&rxnet->peer_keepalive_timer, timer_setup(&rxnet->peer_keepalive_timer,
rxrpc_peer_keepalive_timeout, 0); rxrpc_peer_keepalive_timeout, 0);
INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker); INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker);
rxnet->peer_keepalive_base = ktime_add(ktime_get_real(), NSEC_PER_SEC); rxnet->peer_keepalive_base = ktime_get_seconds();
ret = -ENOMEM; ret = -ENOMEM;
rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net); rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
......
...@@ -209,7 +209,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, ...@@ -209,7 +209,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
now = ktime_get_real(); now = ktime_get_real();
if (ping) if (ping)
call->ping_time = now; call->ping_time = now;
conn->params.peer->last_tx_at = ktime_get_real(); conn->params.peer->last_tx_at = ktime_get_seconds();
if (ret < 0) if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret, trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_ack); rxrpc_tx_point_call_ack);
...@@ -299,7 +299,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call) ...@@ -299,7 +299,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
ret = kernel_sendmsg(conn->params.local->socket, ret = kernel_sendmsg(conn->params.local->socket,
&msg, iov, 1, sizeof(pkt)); &msg, iov, 1, sizeof(pkt));
conn->params.peer->last_tx_at = ktime_get_real(); conn->params.peer->last_tx_at = ktime_get_seconds();
if (ret < 0) if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret, trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_abort); rxrpc_tx_point_call_abort);
...@@ -397,7 +397,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, ...@@ -397,7 +397,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
* message and update the peer record * message and update the peer record
*/ */
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
conn->params.peer->last_tx_at = ktime_get_real(); conn->params.peer->last_tx_at = ktime_get_seconds();
up_read(&conn->params.local->defrag_sem); up_read(&conn->params.local->defrag_sem);
if (ret < 0) if (ret < 0)
...@@ -466,7 +466,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, ...@@ -466,7 +466,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
if (ret == 0) { if (ret == 0) {
ret = kernel_sendmsg(conn->params.local->socket, &msg, ret = kernel_sendmsg(conn->params.local->socket, &msg,
iov, 2, len); iov, 2, len);
conn->params.peer->last_tx_at = ktime_get_real(); conn->params.peer->last_tx_at = ktime_get_seconds();
opt = IP_PMTUDISC_DO; opt = IP_PMTUDISC_DO;
kernel_setsockopt(conn->params.local->socket, SOL_IP, kernel_setsockopt(conn->params.local->socket, SOL_IP,
...@@ -484,7 +484,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, ...@@ -484,7 +484,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
if (ret == 0) { if (ret == 0) {
ret = kernel_sendmsg(conn->params.local->socket, &msg, ret = kernel_sendmsg(conn->params.local->socket, &msg,
iov, 2, len); iov, 2, len);
conn->params.peer->last_tx_at = ktime_get_real(); conn->params.peer->last_tx_at = ktime_get_seconds();
opt = IPV6_PMTUDISC_DO; opt = IPV6_PMTUDISC_DO;
kernel_setsockopt(conn->params.local->socket, kernel_setsockopt(conn->params.local->socket,
...@@ -617,6 +617,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer) ...@@ -617,6 +617,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
trace_rxrpc_tx_packet(peer->debug_id, &whdr, trace_rxrpc_tx_packet(peer->debug_id, &whdr,
rxrpc_tx_point_version_keepalive); rxrpc_tx_point_version_keepalive);
peer->last_tx_at = ktime_get_real(); peer->last_tx_at = ktime_get_seconds();
_leave(""); _leave("");
} }
...@@ -350,97 +350,117 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, ...@@ -350,97 +350,117 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
} }
/* /*
* Perform keep-alive pings with VERSION packets to keep any NAT alive. * Perform keep-alive pings.
*/ */
void rxrpc_peer_keepalive_worker(struct work_struct *work) static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
struct list_head *collector,
time64_t base,
u8 cursor)
{ {
struct rxrpc_net *rxnet =
container_of(work, struct rxrpc_net, peer_keepalive_work);
struct rxrpc_peer *peer; struct rxrpc_peer *peer;
unsigned long delay; const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
ktime_t base, now = ktime_get_real(); time64_t keepalive_at;
s64 diff; int slot;
u8 cursor, slot;
base = rxnet->peer_keepalive_base; spin_lock_bh(&rxnet->peer_hash_lock);
cursor = rxnet->peer_keepalive_cursor;
_enter("%u,%lld", cursor, ktime_sub(now, base)); while (!list_empty(collector)) {
peer = list_entry(collector->next,
struct rxrpc_peer, keepalive_link);
next_bucket: list_del_init(&peer->keepalive_link);
diff = ktime_to_ns(ktime_sub(now, base)); if (!rxrpc_get_peer_maybe(peer))
if (diff < 0) continue;
goto resched;
_debug("at %u", cursor);
spin_lock_bh(&rxnet->peer_hash_lock);
next_peer:
if (!rxnet->live) {
spin_unlock_bh(&rxnet->peer_hash_lock); spin_unlock_bh(&rxnet->peer_hash_lock);
goto out;
}
/* Everything in the bucket at the cursor is processed this second; the keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
* bucket at cursor + 1 goes now + 1s and so on... slot = keepalive_at - base;
*/ _debug("%02x peer %u t=%d {%pISp}",
if (hlist_empty(&rxnet->peer_keepalive[cursor])) { cursor, peer->debug_id, slot, &peer->srx.transport);
if (hlist_empty(&rxnet->peer_keepalive_new)) {
spin_unlock_bh(&rxnet->peer_hash_lock); if (keepalive_at <= base ||
goto emptied_bucket; keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
rxrpc_send_keepalive(peer);
slot = RXRPC_KEEPALIVE_TIME;
} }
hlist_move_list(&rxnet->peer_keepalive_new, /* A transmission to this peer occurred since last we examined
&rxnet->peer_keepalive[cursor]); * it so put it into the appropriate future bucket.
*/
slot += cursor;
slot &= mask;
spin_lock_bh(&rxnet->peer_hash_lock);
list_add_tail(&peer->keepalive_link,
&rxnet->peer_keepalive[slot & mask]);
rxrpc_put_peer(peer);
} }
peer = hlist_entry(rxnet->peer_keepalive[cursor].first,
struct rxrpc_peer, keepalive_link);
hlist_del_init(&peer->keepalive_link);
if (!rxrpc_get_peer_maybe(peer))
goto next_peer;
spin_unlock_bh(&rxnet->peer_hash_lock); spin_unlock_bh(&rxnet->peer_hash_lock);
}
_debug("peer %u {%pISp}", peer->debug_id, &peer->srx.transport); /*
* Perform keep-alive pings with VERSION packets to keep any NAT alive.
*/
void rxrpc_peer_keepalive_worker(struct work_struct *work)
{
struct rxrpc_net *rxnet =
container_of(work, struct rxrpc_net, peer_keepalive_work);
const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
time64_t base, now, delay;
u8 cursor, stop;
LIST_HEAD(collector);
recalc: now = ktime_get_seconds();
diff = ktime_divns(ktime_sub(peer->last_tx_at, base), NSEC_PER_SEC); base = rxnet->peer_keepalive_base;
if (diff < -30 || diff > 30) cursor = rxnet->peer_keepalive_cursor;
goto send; /* LSW of 64-bit time probably wrapped on 32-bit */ _enter("%lld,%u", base - now, cursor);
diff += RXRPC_KEEPALIVE_TIME - 1;
if (diff < 0)
goto send;
slot = (diff > RXRPC_KEEPALIVE_TIME - 1) ? RXRPC_KEEPALIVE_TIME - 1 : diff; if (!rxnet->live)
if (slot == 0) return;
goto send;
/* A transmission to this peer occurred since last we examined it so /* Remove to a temporary list all the peers that are currently lodged
* put it into the appropriate future bucket. * in expired buckets plus all new peers.
*
* Everything in the bucket at the cursor is processed this
* second; the bucket at cursor + 1 goes at now + 1s and so
* on...
*/ */
slot = (slot + cursor) % ARRAY_SIZE(rxnet->peer_keepalive);
spin_lock_bh(&rxnet->peer_hash_lock); spin_lock_bh(&rxnet->peer_hash_lock);
hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive[slot]); list_splice_init(&rxnet->peer_keepalive_new, &collector);
rxrpc_put_peer(peer);
goto next_peer; stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
while (base <= now && (s8)(cursor - stop) < 0) {
send: list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
rxrpc_send_keepalive(peer); &collector);
now = ktime_get_real(); base++;
goto recalc; cursor++;
}
emptied_bucket: base = now;
cursor++; spin_unlock_bh(&rxnet->peer_hash_lock);
if (cursor >= ARRAY_SIZE(rxnet->peer_keepalive))
cursor = 0;
base = ktime_add_ns(base, NSEC_PER_SEC);
goto next_bucket;
resched:
rxnet->peer_keepalive_base = base; rxnet->peer_keepalive_base = base;
rxnet->peer_keepalive_cursor = cursor; rxnet->peer_keepalive_cursor = cursor;
delay = nsecs_to_jiffies(-diff) + 1; rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay); ASSERT(list_empty(&collector));
out:
/* Schedule the timer for the next occupied timeslot. */
cursor = rxnet->peer_keepalive_cursor;
stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
for (; (s8)(cursor - stop) < 0; cursor++) {
if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
break;
base++;
}
now = ktime_get_seconds();
delay = base - now;
if (delay < 1)
delay = 1;
delay *= HZ;
if (rxnet->live)
timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
_leave(""); _leave("");
} }
...@@ -322,7 +322,7 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local, ...@@ -322,7 +322,7 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
if (!peer) { if (!peer) {
peer = prealloc; peer = prealloc;
hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive_new); list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
} }
spin_unlock(&rxnet->peer_hash_lock); spin_unlock(&rxnet->peer_hash_lock);
...@@ -367,8 +367,8 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, ...@@ -367,8 +367,8 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
if (!peer) { if (!peer) {
hash_add_rcu(rxnet->peer_hash, hash_add_rcu(rxnet->peer_hash,
&candidate->hash_link, hash_key); &candidate->hash_link, hash_key);
hlist_add_head(&candidate->keepalive_link, list_add_tail(&candidate->keepalive_link,
&rxnet->peer_keepalive_new); &rxnet->peer_keepalive_new);
} }
spin_unlock_bh(&rxnet->peer_hash_lock); spin_unlock_bh(&rxnet->peer_hash_lock);
...@@ -441,7 +441,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer) ...@@ -441,7 +441,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
spin_lock_bh(&rxnet->peer_hash_lock); spin_lock_bh(&rxnet->peer_hash_lock);
hash_del_rcu(&peer->hash_link); hash_del_rcu(&peer->hash_link);
hlist_del_init(&peer->keepalive_link); list_del_init(&peer->keepalive_link);
spin_unlock_bh(&rxnet->peer_hash_lock); spin_unlock_bh(&rxnet->peer_hash_lock);
kfree_rcu(peer, rcu); kfree_rcu(peer, rcu);
......
...@@ -670,7 +670,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) ...@@ -670,7 +670,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
return -EAGAIN; return -EAGAIN;
} }
conn->params.peer->last_tx_at = ktime_get_real(); conn->params.peer->last_tx_at = ktime_get_seconds();
trace_rxrpc_tx_packet(conn->debug_id, &whdr, trace_rxrpc_tx_packet(conn->debug_id, &whdr,
rxrpc_tx_point_rxkad_challenge); rxrpc_tx_point_rxkad_challenge);
_leave(" = 0"); _leave(" = 0");
...@@ -728,8 +728,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn, ...@@ -728,8 +728,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
return -EAGAIN; return -EAGAIN;
} }
conn->params.peer->last_tx_at = ktime_get_real(); conn->params.peer->last_tx_at = ktime_get_seconds();
trace_rxrpc_tx_packet(0, &whdr, rxrpc_tx_point_rxkad_response);
_leave(" = 0"); _leave(" = 0");
return 0; return 0;
} }
......
...@@ -1305,6 +1305,8 @@ static void smc_tcp_listen_work(struct work_struct *work) ...@@ -1305,6 +1305,8 @@ static void smc_tcp_listen_work(struct work_struct *work)
sock_hold(lsk); /* sock_put in smc_listen_work */ sock_hold(lsk); /* sock_put in smc_listen_work */
INIT_WORK(&new_smc->smc_listen_work, smc_listen_work); INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
smc_copy_sock_settings_to_smc(new_smc); smc_copy_sock_settings_to_smc(new_smc);
new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
sock_hold(&new_smc->sk); /* sock_put in passive closing */ sock_hold(&new_smc->sk); /* sock_put in passive closing */
if (!schedule_work(&new_smc->smc_listen_work)) if (!schedule_work(&new_smc->smc_listen_work))
sock_put(&new_smc->sk); sock_put(&new_smc->sk);
...@@ -1581,8 +1583,7 @@ static int smc_shutdown(struct socket *sock, int how) ...@@ -1581,8 +1583,7 @@ static int smc_shutdown(struct socket *sock, int how)
lock_sock(sk); lock_sock(sk);
rc = -ENOTCONN; rc = -ENOTCONN;
if ((sk->sk_state != SMC_LISTEN) && if ((sk->sk_state != SMC_ACTIVE) &&
(sk->sk_state != SMC_ACTIVE) &&
(sk->sk_state != SMC_PEERCLOSEWAIT1) && (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
(sk->sk_state != SMC_PEERCLOSEWAIT2) && (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
(sk->sk_state != SMC_APPCLOSEWAIT1) && (sk->sk_state != SMC_APPCLOSEWAIT1) &&
...@@ -1706,12 +1707,16 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd, ...@@ -1706,12 +1707,16 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
smc = smc_sk(sock->sk); smc = smc_sk(sock->sk);
conn = &smc->conn; conn = &smc->conn;
lock_sock(&smc->sk);
if (smc->use_fallback) { if (smc->use_fallback) {
if (!smc->clcsock) if (!smc->clcsock) {
release_sock(&smc->sk);
return -EBADF; return -EBADF;
return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg); }
answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
release_sock(&smc->sk);
return answ;
} }
lock_sock(&smc->sk);
switch (cmd) { switch (cmd) {
case SIOCINQ: /* same as FIONREAD */ case SIOCINQ: /* same as FIONREAD */
if (smc->sk.sk_state == SMC_LISTEN) { if (smc->sk.sk_state == SMC_LISTEN) {
......
...@@ -123,15 +123,13 @@ void tipc_net_finalize(struct net *net, u32 addr) ...@@ -123,15 +123,13 @@ void tipc_net_finalize(struct net *net, u32 addr)
{ {
struct tipc_net *tn = tipc_net(net); struct tipc_net *tn = tipc_net(net);
spin_lock_bh(&tn->node_list_lock); if (!cmpxchg(&tn->node_addr, 0, addr)) {
if (!tipc_own_addr(net)) {
tipc_set_node_addr(net, addr); tipc_set_node_addr(net, addr);
tipc_named_reinit(net); tipc_named_reinit(net);
tipc_sk_reinit(net); tipc_sk_reinit(net);
tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
TIPC_CLUSTER_SCOPE, 0, addr); TIPC_CLUSTER_SCOPE, 0, addr);
} }
spin_unlock_bh(&tn->node_list_lock);
} }
void tipc_net_stop(struct net *net) void tipc_net_stop(struct net *net)
......
...@@ -451,14 +451,14 @@ static int vsock_send_shutdown(struct sock *sk, int mode) ...@@ -451,14 +451,14 @@ static int vsock_send_shutdown(struct sock *sk, int mode)
return transport->shutdown(vsock_sk(sk), mode); return transport->shutdown(vsock_sk(sk), mode);
} }
void vsock_pending_work(struct work_struct *work) static void vsock_pending_work(struct work_struct *work)
{ {
struct sock *sk; struct sock *sk;
struct sock *listener; struct sock *listener;
struct vsock_sock *vsk; struct vsock_sock *vsk;
bool cleanup; bool cleanup;
vsk = container_of(work, struct vsock_sock, dwork.work); vsk = container_of(work, struct vsock_sock, pending_work.work);
sk = sk_vsock(vsk); sk = sk_vsock(vsk);
listener = vsk->listener; listener = vsk->listener;
cleanup = true; cleanup = true;
...@@ -498,7 +498,6 @@ void vsock_pending_work(struct work_struct *work) ...@@ -498,7 +498,6 @@ void vsock_pending_work(struct work_struct *work)
sock_put(sk); sock_put(sk);
sock_put(listener); sock_put(listener);
} }
EXPORT_SYMBOL_GPL(vsock_pending_work);
/**** SOCKET OPERATIONS ****/ /**** SOCKET OPERATIONS ****/
...@@ -597,6 +596,8 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr) ...@@ -597,6 +596,8 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
return retval; return retval;
} }
static void vsock_connect_timeout(struct work_struct *work);
struct sock *__vsock_create(struct net *net, struct sock *__vsock_create(struct net *net,
struct socket *sock, struct socket *sock,
struct sock *parent, struct sock *parent,
...@@ -638,6 +639,8 @@ struct sock *__vsock_create(struct net *net, ...@@ -638,6 +639,8 @@ struct sock *__vsock_create(struct net *net,
vsk->sent_request = false; vsk->sent_request = false;
vsk->ignore_connecting_rst = false; vsk->ignore_connecting_rst = false;
vsk->peer_shutdown = 0; vsk->peer_shutdown = 0;
INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
psk = parent ? vsock_sk(parent) : NULL; psk = parent ? vsock_sk(parent) : NULL;
if (parent) { if (parent) {
...@@ -1117,7 +1120,7 @@ static void vsock_connect_timeout(struct work_struct *work) ...@@ -1117,7 +1120,7 @@ static void vsock_connect_timeout(struct work_struct *work)
struct vsock_sock *vsk; struct vsock_sock *vsk;
int cancel = 0; int cancel = 0;
vsk = container_of(work, struct vsock_sock, dwork.work); vsk = container_of(work, struct vsock_sock, connect_work.work);
sk = sk_vsock(vsk); sk = sk_vsock(vsk);
lock_sock(sk); lock_sock(sk);
...@@ -1221,9 +1224,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, ...@@ -1221,9 +1224,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
* timeout fires. * timeout fires.
*/ */
sock_hold(sk); sock_hold(sk);
INIT_DELAYED_WORK(&vsk->dwork, schedule_delayed_work(&vsk->connect_work, timeout);
vsock_connect_timeout);
schedule_delayed_work(&vsk->dwork, timeout);
/* Skip ahead to preserve error code set above. */ /* Skip ahead to preserve error code set above. */
goto out_wait; goto out_wait;
......
...@@ -1094,8 +1094,7 @@ static int vmci_transport_recv_listen(struct sock *sk, ...@@ -1094,8 +1094,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
vpending->listener = sk; vpending->listener = sk;
sock_hold(sk); sock_hold(sk);
sock_hold(pending); sock_hold(pending);
INIT_DELAYED_WORK(&vpending->dwork, vsock_pending_work); schedule_delayed_work(&vpending->pending_work, HZ);
schedule_delayed_work(&vpending->dwork, HZ);
out: out:
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment