Commit bf873a80 authored by Randy Dunlap's avatar Randy Dunlap Committed by Jakub Kicinski

net: skbuff: fix spelling errors

Correct spelling as reported by codespell.
Signed-off-by: default avatarRandy Dunlap <rdunlap@infradead.org>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Link: https://lore.kernel.org/r/20231213043511.10357-1-rdunlap@infradead.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 81d56f56
...@@ -1069,7 +1069,7 @@ struct sk_buff { ...@@ -1069,7 +1069,7 @@ struct sk_buff {
refcount_t users; refcount_t users;
#ifdef CONFIG_SKB_EXTENSIONS #ifdef CONFIG_SKB_EXTENSIONS
/* only useable after checking ->active_extensions != 0 */ /* only usable after checking ->active_extensions != 0 */
struct skb_ext *extensions; struct skb_ext *extensions;
#endif #endif
}; };
...@@ -3311,7 +3311,7 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, ...@@ -3311,7 +3311,7 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
unsigned int order) unsigned int order)
{ {
/* This piece of code contains several assumptions. /* This piece of code contains several assumptions.
* 1. This is for device Rx, therefor a cold page is preferred. * 1. This is for device Rx, therefore a cold page is preferred.
* 2. The expectation is the user wants a compound page. * 2. The expectation is the user wants a compound page.
* 3. If requesting a order 0 page it will not be compound * 3. If requesting a order 0 page it will not be compound
* due to the check to see if order has a value in prep_new_page * due to the check to see if order has a value in prep_new_page
...@@ -4247,7 +4247,7 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a, ...@@ -4247,7 +4247,7 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
{ {
const void *a = skb_metadata_end(skb_a); const void *a = skb_metadata_end(skb_a);
const void *b = skb_metadata_end(skb_b); const void *b = skb_metadata_end(skb_b);
/* Using more efficient varaiant than plain call to memcmp(). */ /* Using more efficient variant than plain call to memcmp(). */
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
u64 diffs = 0; u64 diffs = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment