Commit bf3964c1 authored by Ben Myers's avatar Ben Myers

Merge branch 'xfs-extent-list-locking-fixes' into for-next

A set of fixes which makes sure we are taking the ilock whenever accessing the
extent list.  This was associated with "Access to block zero" messages which
may result in extent list corruption.
parents dc16b186 eef334e5
...@@ -1217,7 +1217,7 @@ __xfs_get_blocks( ...@@ -1217,7 +1217,7 @@ __xfs_get_blocks(
lockmode = XFS_ILOCK_EXCL; lockmode = XFS_ILOCK_EXCL;
xfs_ilock(ip, lockmode); xfs_ilock(ip, lockmode);
} else { } else {
lockmode = xfs_ilock_map_shared(ip); lockmode = xfs_ilock_data_map_shared(ip);
} }
ASSERT(offset <= mp->m_super->s_maxbytes); ASSERT(offset <= mp->m_super->s_maxbytes);
......
...@@ -164,6 +164,7 @@ xfs_attr_get( ...@@ -164,6 +164,7 @@ xfs_attr_get(
{ {
int error; int error;
struct xfs_name xname; struct xfs_name xname;
uint lock_mode;
XFS_STATS_INC(xs_attr_get); XFS_STATS_INC(xs_attr_get);
...@@ -174,9 +175,9 @@ xfs_attr_get( ...@@ -174,9 +175,9 @@ xfs_attr_get(
if (error) if (error)
return error; return error;
xfs_ilock(ip, XFS_ILOCK_SHARED); lock_mode = xfs_ilock_attr_map_shared(ip);
error = xfs_attr_get_int(ip, &xname, value, valuelenp, flags); error = xfs_attr_get_int(ip, &xname, value, valuelenp, flags);
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, lock_mode);
return(error); return(error);
} }
......
...@@ -507,17 +507,17 @@ xfs_attr_list_int( ...@@ -507,17 +507,17 @@ xfs_attr_list_int(
{ {
int error; int error;
xfs_inode_t *dp = context->dp; xfs_inode_t *dp = context->dp;
uint lock_mode;
XFS_STATS_INC(xs_attr_list); XFS_STATS_INC(xs_attr_list);
if (XFS_FORCED_SHUTDOWN(dp->i_mount)) if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return EIO; return EIO;
xfs_ilock(dp, XFS_ILOCK_SHARED);
/* /*
* Decide on what work routines to call based on the inode size. * Decide on what work routines to call based on the inode size.
*/ */
lock_mode = xfs_ilock_attr_map_shared(dp);
if (!xfs_inode_hasattr(dp)) { if (!xfs_inode_hasattr(dp)) {
error = 0; error = 0;
} else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
...@@ -527,9 +527,7 @@ xfs_attr_list_int( ...@@ -527,9 +527,7 @@ xfs_attr_list_int(
} else { } else {
error = xfs_attr_node_list(context); error = xfs_attr_node_list(context);
} }
xfs_iunlock(dp, lock_mode);
xfs_iunlock(dp, XFS_ILOCK_SHARED);
return error; return error;
} }
......
...@@ -4013,6 +4013,7 @@ xfs_bmapi_read( ...@@ -4013,6 +4013,7 @@ xfs_bmapi_read(
ASSERT(*nmap >= 1); ASSERT(*nmap >= 1);
ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
XFS_BMAPI_IGSTATE))); XFS_BMAPI_IGSTATE)));
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
if (unlikely(XFS_TEST_ERROR( if (unlikely(XFS_TEST_ERROR(
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
...@@ -4207,6 +4208,7 @@ xfs_bmapi_delay( ...@@ -4207,6 +4208,7 @@ xfs_bmapi_delay(
ASSERT(*nmap >= 1); ASSERT(*nmap >= 1);
ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
ASSERT(!(flags & ~XFS_BMAPI_ENTIRE)); ASSERT(!(flags & ~XFS_BMAPI_ENTIRE));
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (unlikely(XFS_TEST_ERROR( if (unlikely(XFS_TEST_ERROR(
(XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
...@@ -4500,6 +4502,7 @@ xfs_bmapi_write( ...@@ -4500,6 +4502,7 @@ xfs_bmapi_write(
ASSERT(tp != NULL); ASSERT(tp != NULL);
ASSERT(len > 0); ASSERT(len > 0);
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (unlikely(XFS_TEST_ERROR( if (unlikely(XFS_TEST_ERROR(
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
...@@ -5051,6 +5054,7 @@ xfs_bunmapi( ...@@ -5051,6 +5054,7 @@ xfs_bunmapi(
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO); return XFS_ERROR(EIO);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(len > 0); ASSERT(len > 0);
ASSERT(nexts >= 0); ASSERT(nexts >= 0);
......
...@@ -618,22 +618,27 @@ xfs_getbmap( ...@@ -618,22 +618,27 @@ xfs_getbmap(
return XFS_ERROR(ENOMEM); return XFS_ERROR(ENOMEM);
xfs_ilock(ip, XFS_IOLOCK_SHARED); xfs_ilock(ip, XFS_IOLOCK_SHARED);
if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) { if (whichfork == XFS_DATA_FORK) {
if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) { if (!(iflags & BMV_IF_DELALLOC) &&
(ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
error = -filemap_write_and_wait(VFS_I(ip)->i_mapping); error = -filemap_write_and_wait(VFS_I(ip)->i_mapping);
if (error) if (error)
goto out_unlock_iolock; goto out_unlock_iolock;
/*
* Even after flushing the inode, there can still be
* delalloc blocks on the inode beyond EOF due to
* speculative preallocation. These are not removed
* until the release function is called or the inode
* is inactivated. Hence we cannot assert here that
* ip->i_delayed_blks == 0.
*/
} }
/*
* even after flushing the inode, there can still be delalloc
* blocks on the inode beyond EOF due to speculative
* preallocation. These are not removed until the release
* function is called or the inode is inactivated. Hence we
* cannot assert here that ip->i_delayed_blks == 0.
*/
}
lock = xfs_ilock_map_shared(ip); lock = xfs_ilock_data_map_shared(ip);
} else {
lock = xfs_ilock_attr_map_shared(ip);
}
/* /*
* Don't let nex be bigger than the number of extents * Don't let nex be bigger than the number of extents
...@@ -738,7 +743,7 @@ xfs_getbmap( ...@@ -738,7 +743,7 @@ xfs_getbmap(
out_free_map: out_free_map:
kmem_free(map); kmem_free(map);
out_unlock_ilock: out_unlock_ilock:
xfs_iunlock_map_shared(ip, lock); xfs_iunlock(ip, lock);
out_unlock_iolock: out_unlock_iolock:
xfs_iunlock(ip, XFS_IOLOCK_SHARED); xfs_iunlock(ip, XFS_IOLOCK_SHARED);
...@@ -1169,9 +1174,15 @@ xfs_zero_remaining_bytes( ...@@ -1169,9 +1174,15 @@ xfs_zero_remaining_bytes(
xfs_buf_unlock(bp); xfs_buf_unlock(bp);
for (offset = startoff; offset <= endoff; offset = lastoffset + 1) { for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
uint lock_mode;
offset_fsb = XFS_B_TO_FSBT(mp, offset); offset_fsb = XFS_B_TO_FSBT(mp, offset);
nimap = 1; nimap = 1;
lock_mode = xfs_ilock_data_map_shared(ip);
error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0); error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
xfs_iunlock(ip, lock_mode);
if (error || nimap < 1) if (error || nimap < 1)
break; break;
ASSERT(imap.br_blockcount >= 1); ASSERT(imap.br_blockcount >= 1);
......
...@@ -674,6 +674,7 @@ xfs_readdir( ...@@ -674,6 +674,7 @@ xfs_readdir(
{ {
int rval; /* return value */ int rval; /* return value */
int v; /* type-checking value */ int v; /* type-checking value */
uint lock_mode;
trace_xfs_readdir(dp); trace_xfs_readdir(dp);
...@@ -683,6 +684,7 @@ xfs_readdir( ...@@ -683,6 +684,7 @@ xfs_readdir(
ASSERT(S_ISDIR(dp->i_d.di_mode)); ASSERT(S_ISDIR(dp->i_d.di_mode));
XFS_STATS_INC(xs_dir_getdents); XFS_STATS_INC(xs_dir_getdents);
lock_mode = xfs_ilock_data_map_shared(dp);
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
rval = xfs_dir2_sf_getdents(dp, ctx); rval = xfs_dir2_sf_getdents(dp, ctx);
else if ((rval = xfs_dir2_isblock(NULL, dp, &v))) else if ((rval = xfs_dir2_isblock(NULL, dp, &v)))
...@@ -691,5 +693,7 @@ xfs_readdir( ...@@ -691,5 +693,7 @@ xfs_readdir(
rval = xfs_dir2_block_getdents(dp, ctx); rval = xfs_dir2_block_getdents(dp, ctx);
else else
rval = xfs_dir2_leaf_getdents(dp, ctx, bufsize); rval = xfs_dir2_leaf_getdents(dp, ctx, bufsize);
xfs_iunlock(dp, lock_mode);
return rval; return rval;
} }
...@@ -469,16 +469,17 @@ xfs_qm_dqtobp( ...@@ -469,16 +469,17 @@ xfs_qm_dqtobp(
struct xfs_mount *mp = dqp->q_mount; struct xfs_mount *mp = dqp->q_mount;
xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id); xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
struct xfs_trans *tp = (tpp ? *tpp : NULL); struct xfs_trans *tp = (tpp ? *tpp : NULL);
uint lock_mode;
dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
xfs_ilock(quotip, XFS_ILOCK_SHARED); lock_mode = xfs_ilock_data_map_shared(quotip);
if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
/* /*
* Return if this type of quotas is turned off while we * Return if this type of quotas is turned off while we
* didn't have the quota inode lock. * didn't have the quota inode lock.
*/ */
xfs_iunlock(quotip, XFS_ILOCK_SHARED); xfs_iunlock(quotip, lock_mode);
return ESRCH; return ESRCH;
} }
...@@ -488,7 +489,7 @@ xfs_qm_dqtobp( ...@@ -488,7 +489,7 @@ xfs_qm_dqtobp(
error = xfs_bmapi_read(quotip, dqp->q_fileoffset, error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
xfs_iunlock(quotip, XFS_ILOCK_SHARED); xfs_iunlock(quotip, lock_mode);
if (error) if (error)
return error; return error;
......
...@@ -912,7 +912,7 @@ xfs_dir_open( ...@@ -912,7 +912,7 @@ xfs_dir_open(
* If there are any blocks, read-ahead block 0 as we're almost * If there are any blocks, read-ahead block 0 as we're almost
* certain to have the next operation be a read there. * certain to have the next operation be a read there.
*/ */
mode = xfs_ilock_map_shared(ip); mode = xfs_ilock_data_map_shared(ip);
if (ip->i_d.di_nextents > 0) if (ip->i_d.di_nextents > 0)
xfs_dir3_data_readahead(NULL, ip, 0, -1); xfs_dir3_data_readahead(NULL, ip, 0, -1);
xfs_iunlock(ip, mode); xfs_iunlock(ip, mode);
...@@ -1215,7 +1215,7 @@ xfs_seek_data( ...@@ -1215,7 +1215,7 @@ xfs_seek_data(
uint lock; uint lock;
int error; int error;
lock = xfs_ilock_map_shared(ip); lock = xfs_ilock_data_map_shared(ip);
isize = i_size_read(inode); isize = i_size_read(inode);
if (start >= isize) { if (start >= isize) {
...@@ -1294,7 +1294,7 @@ xfs_seek_data( ...@@ -1294,7 +1294,7 @@ xfs_seek_data(
offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
out_unlock: out_unlock:
xfs_iunlock_map_shared(ip, lock); xfs_iunlock(ip, lock);
if (error) if (error)
return -error; return -error;
...@@ -1319,7 +1319,7 @@ xfs_seek_hole( ...@@ -1319,7 +1319,7 @@ xfs_seek_hole(
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
return -XFS_ERROR(EIO); return -XFS_ERROR(EIO);
lock = xfs_ilock_map_shared(ip); lock = xfs_ilock_data_map_shared(ip);
isize = i_size_read(inode); isize = i_size_read(inode);
if (start >= isize) { if (start >= isize) {
...@@ -1402,7 +1402,7 @@ xfs_seek_hole( ...@@ -1402,7 +1402,7 @@ xfs_seek_hole(
offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
out_unlock: out_unlock:
xfs_iunlock_map_shared(ip, lock); xfs_iunlock(ip, lock);
if (error) if (error)
return -error; return -error;
......
...@@ -77,48 +77,44 @@ xfs_get_extsz_hint( ...@@ -77,48 +77,44 @@ xfs_get_extsz_hint(
} }
/* /*
* This is a wrapper routine around the xfs_ilock() routine used to centralize * These two are wrapper routines around the xfs_ilock() routine used to
* some grungy code. It is used in places that wish to lock the inode solely * centralize some grungy code. They are used in places that wish to lock the
* for reading the extents. The reason these places can't just call * inode solely for reading the extents. The reason these places can't just
* xfs_ilock(SHARED) is that the inode lock also guards to bringing in of the * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
* extents from disk for a file in b-tree format. If the inode is in b-tree * bringing in of the extents from disk for a file in b-tree format. If the
* format, then we need to lock the inode exclusively until the extents are read * inode is in b-tree format, then we need to lock the inode exclusively until
* in. Locking it exclusively all the time would limit our parallelism * the extents are read in. Locking it exclusively all the time would limit
* unnecessarily, though. What we do instead is check to see if the extents * our parallelism unnecessarily, though. What we do instead is check to see
* have been read in yet, and only lock the inode exclusively if they have not. * if the extents have been read in yet, and only lock the inode exclusively
* if they have not.
* *
* The function returns a value which should be given to the corresponding * The functions return a value which should be given to the corresponding
* xfs_iunlock_map_shared(). This value is the mode in which the lock was * xfs_iunlock() call.
* actually taken.
*/ */
uint uint
xfs_ilock_map_shared( xfs_ilock_data_map_shared(
xfs_inode_t *ip) struct xfs_inode *ip)
{ {
uint lock_mode; uint lock_mode = XFS_ILOCK_SHARED;
if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) && if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) { (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
lock_mode = XFS_ILOCK_EXCL; lock_mode = XFS_ILOCK_EXCL;
} else {
lock_mode = XFS_ILOCK_SHARED;
}
xfs_ilock(ip, lock_mode); xfs_ilock(ip, lock_mode);
return lock_mode; return lock_mode;
} }
/* uint
* This is simply the unlock routine to go with xfs_ilock_map_shared(). xfs_ilock_attr_map_shared(
* All it does is call xfs_iunlock() with the given lock_mode. struct xfs_inode *ip)
*/
void
xfs_iunlock_map_shared(
xfs_inode_t *ip,
unsigned int lock_mode)
{ {
xfs_iunlock(ip, lock_mode); uint lock_mode = XFS_ILOCK_SHARED;
if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
(ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
lock_mode = XFS_ILOCK_EXCL;
xfs_ilock(ip, lock_mode);
return lock_mode;
} }
/* /*
...@@ -588,9 +584,9 @@ xfs_lookup( ...@@ -588,9 +584,9 @@ xfs_lookup(
if (XFS_FORCED_SHUTDOWN(dp->i_mount)) if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return XFS_ERROR(EIO); return XFS_ERROR(EIO);
lock_mode = xfs_ilock_map_shared(dp); lock_mode = xfs_ilock_data_map_shared(dp);
error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
xfs_iunlock_map_shared(dp, lock_mode); xfs_iunlock(dp, lock_mode);
if (error) if (error)
goto out; goto out;
......
...@@ -337,8 +337,8 @@ int xfs_ilock_nowait(xfs_inode_t *, uint); ...@@ -337,8 +337,8 @@ int xfs_ilock_nowait(xfs_inode_t *, uint);
void xfs_iunlock(xfs_inode_t *, uint); void xfs_iunlock(xfs_inode_t *, uint);
void xfs_ilock_demote(xfs_inode_t *, uint); void xfs_ilock_demote(xfs_inode_t *, uint);
int xfs_isilocked(xfs_inode_t *, uint); int xfs_isilocked(xfs_inode_t *, uint);
uint xfs_ilock_map_shared(xfs_inode_t *); uint xfs_ilock_data_map_shared(struct xfs_inode *);
void xfs_iunlock_map_shared(xfs_inode_t *, uint); uint xfs_ilock_attr_map_shared(struct xfs_inode *);
int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, umode_t, int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, umode_t,
xfs_nlink_t, xfs_dev_t, prid_t, int, xfs_nlink_t, xfs_dev_t, prid_t, int,
struct xfs_buf **, xfs_inode_t **); struct xfs_buf **, xfs_inode_t **);
......
...@@ -431,6 +431,8 @@ xfs_iread_extents( ...@@ -431,6 +431,8 @@ xfs_iread_extents(
xfs_ifork_t *ifp; xfs_ifork_t *ifp;
xfs_extnum_t nextents; xfs_extnum_t nextents;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
ip->i_mount); ip->i_mount);
......
...@@ -112,15 +112,11 @@ xfs_find_handle( ...@@ -112,15 +112,11 @@ xfs_find_handle(
memset(&handle.ha_fid, 0, sizeof(handle.ha_fid)); memset(&handle.ha_fid, 0, sizeof(handle.ha_fid));
hsize = sizeof(xfs_fsid_t); hsize = sizeof(xfs_fsid_t);
} else { } else {
int lock_mode;
lock_mode = xfs_ilock_map_shared(ip);
handle.ha_fid.fid_len = sizeof(xfs_fid_t) - handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
sizeof(handle.ha_fid.fid_len); sizeof(handle.ha_fid.fid_len);
handle.ha_fid.fid_pad = 0; handle.ha_fid.fid_pad = 0;
handle.ha_fid.fid_gen = ip->i_d.di_gen; handle.ha_fid.fid_gen = ip->i_d.di_gen;
handle.ha_fid.fid_ino = ip->i_ino; handle.ha_fid.fid_ino = ip->i_ino;
xfs_iunlock_map_shared(ip, lock_mode);
hsize = XFS_HSIZE(handle); hsize = XFS_HSIZE(handle);
} }
......
...@@ -1222,16 +1222,18 @@ xfs_qm_dqiterate( ...@@ -1222,16 +1222,18 @@ xfs_qm_dqiterate(
lblkno = 0; lblkno = 0;
maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
do { do {
uint lock_mode;
nmaps = XFS_DQITER_MAP_SIZE; nmaps = XFS_DQITER_MAP_SIZE;
/* /*
* We aren't changing the inode itself. Just changing * We aren't changing the inode itself. Just changing
* some of its data. No new blocks are added here, and * some of its data. No new blocks are added here, and
* the inode is never added to the transaction. * the inode is never added to the transaction.
*/ */
xfs_ilock(qip, XFS_ILOCK_SHARED); lock_mode = xfs_ilock_data_map_shared(qip);
error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
map, &nmaps, 0); map, &nmaps, 0);
xfs_iunlock(qip, XFS_ILOCK_SHARED); xfs_iunlock(qip, lock_mode);
if (error) if (error)
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment