checksum.h 2.49 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Checksum routines
 *
 * Copyright (C) 2023 Rivos Inc.
 */
#ifndef __ASM_RISCV_CHECKSUM_H
#define __ASM_RISCV_CHECKSUM_H

#include <linux/in6.h>
#include <linux/uaccess.h>

#define ip_fast_csum ip_fast_csum

15 16 17 18 19 20 21 22 23 24 25
extern unsigned int do_csum(const unsigned char *buff, int len);
#define do_csum do_csum

/* Default version is sufficient for 32 bit */
#ifndef CONFIG_32BIT
#define _HAVE_ARCH_IPV6_CSUM
__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
			const struct in6_addr *daddr,
			__u32 len, __u8 proto, __wsum sum);
#endif

26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
/* Define riscv versions of functions before importing asm-generic/checksum.h */
#include <asm-generic/checksum.h>

/**
 * Quickly compute an IP checksum with the assumption that IPv4 headers will
 * always be in multiples of 32-bits, and have an ihl of at least 5.
 *
 * @ihl: the number of 32 bit segments and must be greater than or equal to 5.
 * @iph: assumed to be word aligned given that NET_IP_ALIGN is set to 2 on
 *  riscv, defining IP headers to be aligned.
 */
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
	unsigned long csum = 0;
	int pos = 0;

	do {
		csum += ((const unsigned int *)iph)[pos];
		if (IS_ENABLED(CONFIG_32BIT))
			csum += csum < ((const unsigned int *)iph)[pos];
	} while (++pos < ihl);

	/*
	 * ZBB only saves three instructions on 32-bit and five on 64-bit so not
	 * worth checking if supported without Alternatives.
	 */
	if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
	    IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
		unsigned long fold_temp;

		asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
					      RISCV_ISA_EXT_ZBB, 1)
		    :
		    :
		    :
		    : no_zbb);

		if (IS_ENABLED(CONFIG_32BIT)) {
			asm(".option push				\n\
			.option arch,+zbb				\n\
				not	%[fold_temp], %[csum]		\n\
				rori	%[csum], %[csum], 16		\n\
				sub	%[csum], %[fold_temp], %[csum]	\n\
			.option pop"
			: [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp));
		} else {
			asm(".option push				\n\
			.option arch,+zbb				\n\
				rori	%[fold_temp], %[csum], 32	\n\
				add	%[csum], %[fold_temp], %[csum]	\n\
				srli	%[csum], %[csum], 32		\n\
				not	%[fold_temp], %[csum]		\n\
				roriw	%[csum], %[csum], 16		\n\
				subw	%[csum], %[fold_temp], %[csum]	\n\
			.option pop"
			: [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp));
		}
		return (__force __sum16)(csum >> 16);
	}
no_zbb:
#ifndef CONFIG_32BIT
	csum += ror64(csum, 32);
	csum >>= 32;
#endif
	return csum_fold((__force __wsum)csum);
}

#endif /* __ASM_RISCV_CHECKSUM_H */