• Ira Weiny's avatar
    btrfs: use memzero_page() instead of open coded kmap pattern · d048b9c2
    Ira Weiny authored
    There are many places where kmap/memset/kunmap patterns occur.
    
    Use the newly lifted memzero_page() to eliminate direct uses of kmap and
    leverage the new core functions use of kmap_local_page().
    
    The development of this patch was aided by the following coccinelle
    script:
    
    // <smpl>
    // SPDX-License-Identifier: GPL-2.0-only
    // Find kmap/memset/kunmap pattern and replace with memset*page calls
    //
    // NOTE: Offsets and other expressions may be more complex than what the script
    // will automatically generate.  Therefore a catchall rule is provided to find
    // the pattern which then must be evaluated by hand.
    //
    // Confidence: Low
    // Copyright: (C) 2021 Intel Corporation
    // URL: http://coccinelle.lip6.fr/
    // Comments:
    // Options:
    
    //
    // Then the memset pattern
    //
    @ memset_rule1 @
    expression page, V, L, Off;
    identifier ptr;
    type VP;
    @@
    
    (
    -VP ptr = kmap(page);
    |
    -ptr = kmap(page);
    |
    -VP ptr = kmap_atomic(page);
    |
    -ptr = kmap_atomic(page);
    )
    <+...
    (
    -memset(ptr, 0, L);
    +memzero_page(page, 0, L);
    |
    -memset(ptr + Off, 0, L);
    +memzero_page(page, Off, L);
    |
    -memset(ptr, V, L);
    +memset_page(page, V, 0, L);
    |
    -memset(ptr + Off, V, L);
    +memset_page(page, V, Off, L);
    )
    ...+>
    (
    -kunmap(page);
    |
    -kunmap_atomic(ptr);
    )
    
    // Remove any pointers left unused
    @
    depends on memset_rule1
    @
    identifier memset_rule1.ptr;
    type VP, VP1;
    @@
    
    -VP ptr;
    	... when != ptr;
    ? VP1 ptr;
    
    //
    // Catch all
    //
    @ memset_rule2 @
    expression page;
    identifier ptr;
    expression GenTo, GenSize, GenValue;
    type VP;
    @@
    
    (
    -VP ptr = kmap(page);
    |
    -ptr = kmap(page);
    |
    -VP ptr = kmap_atomic(page);
    |
    -ptr = kmap_atomic(page);
    )
    <+...
    (
    //
    // Some call sites have complex expressions within the memset/memcpy
    // The follow are catch alls which need to be evaluated by hand.
    //
    -memset(GenTo, 0, GenSize);
    +memzero_pageExtra(page, GenTo, GenSize);
    |
    -memset(GenTo, GenValue, GenSize);
    +memset_pageExtra(page, GenValue, GenTo, GenSize);
    )
    ...+>
    (
    -kunmap(page);
    |
    -kunmap_atomic(ptr);
    )
    
    // Remove any pointers left unused
    @
    depends on memset_rule2
    @
    identifier memset_rule2.ptr;
    type VP, VP1;
    @@
    
    -VP ptr;
    	... when != ptr;
    ? VP1 ptr;
    
    // </smpl>
    
    Link: https://lkml.kernel.org/r/20210309212137.2610186-4-ira.weiny@intel.comSigned-off-by: default avatarIra Weiny <ira.weiny@intel.com>
    Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
    Cc: Alexander Viro <viro@zeniv.linux.org.uk>
    Cc: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
    Cc: Chris Mason <clm@fb.com>
    Cc: Josef Bacik <josef@toxicpanda.com>
    Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
    Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
    d048b9c2
inode.c 296 KB