Commit 5d299f44 authored by Kees Cook's avatar Kees Cook Committed by Dave Kleikamp

jfs: Avoid field-overflowing memcpy()

In preparation for FORTIFY_SOURCE performing compile-time and run-time
field array bounds checking for memcpy(), memmove(), and memset(),
avoid intentionally writing across neighboring fields.

Introduce more unions to cover the full inline data section, so that the
entire 256 bytes can be addressed by memcpy() without thinking it is
crossing field boundaries. Additionally adjusts dir memcpy() to use
existing union names to get the same coverage.

diffoscope shows there are no binary differences before/after excepting
the name of the initcall, which is line number based:

$ diffoscope --exclude-directory-metadata yes before/fs after/fs
 --- before/fs
 +++ after/fs
 │   --- before/fs/jfs
 ├── +++ after/fs/jfs
 │ │   --- before/fs/jfs/super.o
 │ ├── +++ after/fs/jfs/super.o
 │ │ ├── readelf --wide --symbols {}
 │ │ │ @@ -2,15 +2,15 @@
 │ │ │  Symbol table '.symtab' contains 158 entries:
 │ │ │     Num:    Value          Size Type    Bind   Vis      Ndx Name
 ...
 │ │ │ -     5: 0000000000000000     0 NOTYPE  LOCAL  DEFAULT    6 __initcall__kmod_jfs__319_1049_ini
 t_jfs_fs6
 │ │ │ +     5: 0000000000000000     0 NOTYPE  LOCAL  DEFAULT    6 __initcall__kmod_jfs__319_1050_ini
 t_jfs_fs6
...
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Signed-off-by: default avatarDave Kleikamp <dave.kleikamp@oracle.com>
parent e15a56b7
......@@ -101,7 +101,6 @@ struct dinode {
u8 unused[16]; /* 16: */
dxd_t _dxd; /* 16: */
union {
__le32 _rdev; /* 4: */
/*
* The fast symlink area
* is expected to overflow
......@@ -109,9 +108,15 @@ struct dinode {
* needed (which will clear
* INLINEEA).
*/
u8 _fastsymlink[128];
} _u;
u8 _inlineea[128];
struct {
union {
__le32 _rdev; /* 4: */
u8 _fastsymlink[128];
} _u;
u8 _inlineea[128];
};
u8 _inline_all[256];
};
} _special;
} _u2;
} _file;
......@@ -122,6 +127,7 @@ struct dinode {
#define di_rdev u._file._u2._special._u._rdev
#define di_fastsymlink u._file._u2._special._u._fastsymlink
#define di_inlineea u._file._u2._special._inlineea
#define di_inline_all u._file._u2._special._inline_all
} u;
};
......
......@@ -761,7 +761,7 @@ int diWrite(tid_t tid, struct inode *ip)
lv = & dilinelock->lv[dilinelock->index];
lv->offset = (dioffset + 2 * 128) >> L2INODESLOTSIZE;
lv->length = 2;
memcpy(&dp->di_fastsymlink, jfs_ip->i_inline, IDATASIZE);
memcpy(&dp->di_inline_all, jfs_ip->i_inline_all, IDATASIZE);
dilinelock->index++;
}
/*
......@@ -3082,7 +3082,7 @@ static int copy_from_dinode(struct dinode * dip, struct inode *ip)
}
if (S_ISDIR(ip->i_mode)) {
memcpy(&jfs_ip->i_dirtable, &dip->di_dirtable, 384);
memcpy(&jfs_ip->u.dir, &dip->u._dir, 384);
} else if (S_ISREG(ip->i_mode) || S_ISLNK(ip->i_mode)) {
memcpy(&jfs_ip->i_xtroot, &dip->di_xtroot, 288);
} else
......
......@@ -77,11 +77,18 @@ struct jfs_inode_info {
unchar _unused[16]; /* 16: */
dxd_t _dxd; /* 16: */
/* _inline may overflow into _inline_ea when needed */
unchar _inline[128]; /* 128: inline symlink */
/* _inline_ea may overlay the last part of
* file._xtroot if maxentry = XTROOTINITSLOT
*/
unchar _inline_ea[128]; /* 128: inline extended attr */
union {
struct {
/* 128: inline symlink */
unchar _inline[128];
/* 128: inline extended attr */
unchar _inline_ea[128];
};
unchar _inline_all[256];
};
} link;
} u;
#ifdef CONFIG_QUOTA
......@@ -96,6 +103,7 @@ struct jfs_inode_info {
#define i_dtroot u.dir._dtroot
#define i_inline u.link._inline
#define i_inline_ea u.link._inline_ea
#define i_inline_all u.link._inline_all
#define IREAD_LOCK(ip, subclass) \
down_read_nested(&JFS_IP(ip)->rdwrlock, subclass)
......
......@@ -939,7 +939,8 @@ static int __init init_jfs_fs(void)
jfs_inode_cachep =
kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info),
0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
offsetof(struct jfs_inode_info, i_inline), IDATASIZE,
offsetof(struct jfs_inode_info, i_inline_all),
sizeof_field(struct jfs_inode_info, i_inline_all),
init_once);
if (jfs_inode_cachep == NULL)
return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment