Commit ac322de6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'md/4.4' of git://neil.brown.name/md

Pull md updates from Neil Brown:
 "Two major components to this update.

   1) The clustered-raid1 support from SUSE is nearly complete.  There
      are a few outstanding issues being worked on.  Maybe half a dozen
      patches will bring this to a usable state.

   2) The first stage of journalled-raid5 support from Facebook makes an
      appearance.  With a journal device configured (typically NVRAM or
      SSD), the "RAID5 write hole" should be closed - a crash during
      degraded operations cannot result in data corruption.

      The next stage will be to use the journal as a write-behind cache
      so that latency can be reduced and in some cases throughput
      increased by performing more full-stripe writes.

* tag 'md/4.4' of git://neil.brown.name/md: (66 commits)
  MD: when RAID journal is missing/faulty, block RESTART_ARRAY_RW
  MD: set journal disk ->raid_disk
  MD: kick out journal disk if it's not fresh
  raid5-cache: start raid5 readonly if journal is missing
  MD: add new bit to indicate raid array with journal
  raid5-cache: IO error handling
  raid5: journal disk can't be removed
  raid5-cache: add trim support for log
  MD: fix info output for journal disk
  raid5-cache: use bio chaining
  raid5-cache: small log->seq cleanup
  raid5-cache: new helper: r5_reserve_log_entry
  raid5-cache: inline r5l_alloc_io_unit into r5l_new_meta
  raid5-cache: take rdev->data_offset into account early on
  raid5-cache: refactor bio allocation
  raid5-cache: clean up r5l_get_meta
  raid5-cache: simplify state machine when caches flushes are not needed
  raid5-cache: factor out a helper to run all stripes for an I/O unit
  raid5-cache: rename flushed_ios to finished_ios
  raid5-cache: free I/O units earlier
  ...
parents ccf21b69 339421de
...@@ -17,7 +17,7 @@ dm-cache-smq-y += dm-cache-policy-smq.o ...@@ -17,7 +17,7 @@ dm-cache-smq-y += dm-cache-policy-smq.o
dm-cache-cleaner-y += dm-cache-policy-cleaner.o dm-cache-cleaner-y += dm-cache-policy-cleaner.o
dm-era-y += dm-era-target.o dm-era-y += dm-era-target.o
md-mod-y += md.o bitmap.o md-mod-y += md.o bitmap.o
raid456-y += raid5.o raid456-y += raid5.o raid5-cache.o
# Note: link order is important. All raid personalities # Note: link order is important. All raid personalities
# and must come before md.o, as they each initialise # and must come before md.o, as they each initialise
......
...@@ -613,12 +613,10 @@ static int bitmap_read_sb(struct bitmap *bitmap) ...@@ -613,12 +613,10 @@ static int bitmap_read_sb(struct bitmap *bitmap)
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
write_behind = le32_to_cpu(sb->write_behind); write_behind = le32_to_cpu(sb->write_behind);
sectors_reserved = le32_to_cpu(sb->sectors_reserved); sectors_reserved = le32_to_cpu(sb->sectors_reserved);
/* XXX: This is a hack to ensure that we don't use clustering /* Setup nodes/clustername only if bitmap version is
* in case: * cluster-compatible
* - dm-raid is in use and
* - the nodes written in bitmap_sb is erroneous.
*/ */
if (!bitmap->mddev->sync_super) { if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
nodes = le32_to_cpu(sb->nodes); nodes = le32_to_cpu(sb->nodes);
strlcpy(bitmap->mddev->bitmap_info.cluster_name, strlcpy(bitmap->mddev->bitmap_info.cluster_name,
sb->cluster_name, 64); sb->cluster_name, 64);
...@@ -628,7 +626,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) ...@@ -628,7 +626,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
reason = "bad magic"; reason = "bad magic";
else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
le32_to_cpu(sb->version) > BITMAP_MAJOR_HI) le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED)
reason = "unrecognized superblock version"; reason = "unrecognized superblock version";
else if (chunksize < 512) else if (chunksize < 512)
reason = "bitmap chunksize too small"; reason = "bitmap chunksize too small";
...@@ -1572,7 +1570,7 @@ void bitmap_close_sync(struct bitmap *bitmap) ...@@ -1572,7 +1570,7 @@ void bitmap_close_sync(struct bitmap *bitmap)
} }
EXPORT_SYMBOL(bitmap_close_sync); EXPORT_SYMBOL(bitmap_close_sync);
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
{ {
sector_t s = 0; sector_t s = 0;
sector_t blocks; sector_t blocks;
...@@ -1583,7 +1581,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) ...@@ -1583,7 +1581,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
bitmap->last_end_sync = jiffies; bitmap->last_end_sync = jiffies;
return; return;
} }
if (time_before(jiffies, (bitmap->last_end_sync if (!force && time_before(jiffies, (bitmap->last_end_sync
+ bitmap->mddev->bitmap_info.daemon_sleep))) + bitmap->mddev->bitmap_info.daemon_sleep)))
return; return;
wait_event(bitmap->mddev->recovery_wait, wait_event(bitmap->mddev->recovery_wait,
......
...@@ -9,8 +9,10 @@ ...@@ -9,8 +9,10 @@
#define BITMAP_MAJOR_LO 3 #define BITMAP_MAJOR_LO 3
/* version 4 insists the bitmap is in little-endian order /* version 4 insists the bitmap is in little-endian order
* with version 3, it is host-endian which is non-portable * with version 3, it is host-endian which is non-portable
* Version 5 is currently set only for clustered devices
*/ */
#define BITMAP_MAJOR_HI 4 #define BITMAP_MAJOR_HI 4
#define BITMAP_MAJOR_CLUSTERED 5
#define BITMAP_MAJOR_HOSTENDIAN 3 #define BITMAP_MAJOR_HOSTENDIAN 3
/* /*
...@@ -255,7 +257,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, ...@@ -255,7 +257,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded); int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded);
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted); void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
void bitmap_close_sync(struct bitmap *bitmap); void bitmap_close_sync(struct bitmap *bitmap);
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector); void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force);
void bitmap_unplug(struct bitmap *bitmap); void bitmap_unplug(struct bitmap *bitmap);
void bitmap_daemon_work(struct mddev *mddev); void bitmap_daemon_work(struct mddev *mddev);
......
This diff is collapsed.
...@@ -12,15 +12,15 @@ struct md_cluster_operations { ...@@ -12,15 +12,15 @@ struct md_cluster_operations {
int (*join)(struct mddev *mddev, int nodes); int (*join)(struct mddev *mddev, int nodes);
int (*leave)(struct mddev *mddev); int (*leave)(struct mddev *mddev);
int (*slot_number)(struct mddev *mddev); int (*slot_number)(struct mddev *mddev);
void (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi); int (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi);
int (*resync_start)(struct mddev *mddev, sector_t lo, sector_t hi);
void (*resync_finish)(struct mddev *mddev);
int (*metadata_update_start)(struct mddev *mddev); int (*metadata_update_start)(struct mddev *mddev);
int (*metadata_update_finish)(struct mddev *mddev); int (*metadata_update_finish)(struct mddev *mddev);
int (*metadata_update_cancel)(struct mddev *mddev); void (*metadata_update_cancel)(struct mddev *mddev);
int (*resync_start)(struct mddev *mddev);
int (*resync_finish)(struct mddev *mddev);
int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi); int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi);
int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev); int (*add_new_disk)(struct mddev *mddev, struct md_rdev *rdev);
int (*add_new_disk_finish)(struct mddev *mddev); void (*add_new_disk_cancel)(struct mddev *mddev);
int (*new_disk_ack)(struct mddev *mddev, bool ack); int (*new_disk_ack)(struct mddev *mddev, bool ack);
int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev); int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev);
int (*gather_bitmaps)(struct md_rdev *rdev); int (*gather_bitmaps)(struct md_rdev *rdev);
......
This diff is collapsed.
...@@ -87,10 +87,16 @@ struct md_rdev { ...@@ -87,10 +87,16 @@ struct md_rdev {
* array and could again if we did a partial * array and could again if we did a partial
* resync from the bitmap * resync from the bitmap
*/ */
union {
sector_t recovery_offset;/* If this device has been partially sector_t recovery_offset;/* If this device has been partially
* recovered, this is where we were * recovered, this is where we were
* up to. * up to.
*/ */
sector_t journal_tail; /* If this device is a journal device,
* this is the journal tail (journal
* recovery start point)
*/
};
atomic_t nr_pending; /* number of pending requests. atomic_t nr_pending; /* number of pending requests.
* only maintained for arrays that * only maintained for arrays that
...@@ -172,6 +178,11 @@ enum flag_bits { ...@@ -172,6 +178,11 @@ enum flag_bits {
* This device is seen locally but not * This device is seen locally but not
* by the whole cluster * by the whole cluster
*/ */
Journal, /* This device is used as journal for
* raid-5/6.
* Usually, this device should be faster
* than other devices in the array
*/
}; };
#define BB_LEN_MASK (0x00000000000001FFULL) #define BB_LEN_MASK (0x00000000000001FFULL)
...@@ -221,6 +232,8 @@ struct mddev { ...@@ -221,6 +232,8 @@ struct mddev {
#define MD_STILL_CLOSED 4 /* If set, then array has not been opened since #define MD_STILL_CLOSED 4 /* If set, then array has not been opened since
* md_ioctl checked on it. * md_ioctl checked on it.
*/ */
#define MD_JOURNAL_CLEAN 5 /* A raid with journal is already clean */
#define MD_HAS_JOURNAL 6 /* The raid array has journal feature set */
int suspended; int suspended;
atomic_t active_io; atomic_t active_io;
...@@ -658,7 +671,7 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, ...@@ -658,7 +671,7 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev); struct mddev *mddev);
extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule); extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
extern void md_reload_sb(struct mddev *mddev); extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force); extern void md_update_sb(struct mddev *mddev, int force);
extern void md_kick_rdev_from_array(struct md_rdev * rdev); extern void md_kick_rdev_from_array(struct md_rdev * rdev);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
......
...@@ -90,6 +90,8 @@ static void r1bio_pool_free(void *r1_bio, void *data) ...@@ -90,6 +90,8 @@ static void r1bio_pool_free(void *r1_bio, void *data)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS) #define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
...@@ -1590,6 +1592,15 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -1590,6 +1592,15 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->raid_disk >= 0) if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk; first = last = rdev->raid_disk;
/*
* find the disk ... but prefer rdev->saved_raid_disk
* if possible.
*/
if (rdev->saved_raid_disk >= 0 &&
rdev->saved_raid_disk >= first &&
conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
first = last = rdev->saved_raid_disk;
for (mirror = first; mirror <= last; mirror++) { for (mirror = first; mirror <= last; mirror++) {
p = conf->mirrors+mirror; p = conf->mirrors+mirror;
if (!p->rdev) { if (!p->rdev) {
...@@ -2495,6 +2506,11 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp ...@@ -2495,6 +2506,11 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
bitmap_close_sync(mddev->bitmap); bitmap_close_sync(mddev->bitmap);
close_sync(conf); close_sync(conf);
if (mddev_is_clustered(mddev)) {
conf->cluster_sync_low = 0;
conf->cluster_sync_high = 0;
}
return 0; return 0;
} }
...@@ -2515,7 +2531,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp ...@@ -2515,7 +2531,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
return sync_blocks; return sync_blocks;
} }
bitmap_cond_end_sync(mddev->bitmap, sector_nr); /* we are incrementing sector_nr below. To be safe, we check against
* sector_nr + two times RESYNC_SECTORS
*/
bitmap_cond_end_sync(mddev->bitmap, sector_nr,
mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
raise_barrier(conf, sector_nr); raise_barrier(conf, sector_nr);
...@@ -2706,6 +2727,16 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp ...@@ -2706,6 +2727,16 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
bio_full: bio_full:
r1_bio->sectors = nr_sectors; r1_bio->sectors = nr_sectors;
if (mddev_is_clustered(mddev) &&
conf->cluster_sync_high < sector_nr + nr_sectors) {
conf->cluster_sync_low = mddev->curr_resync_completed;
conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
/* Send resync message */
md_cluster_ops->resync_info_update(mddev,
conf->cluster_sync_low,
conf->cluster_sync_high);
}
/* For a user-requested sync, we read all readable devices and do a /* For a user-requested sync, we read all readable devices and do a
* compare * compare
*/ */
...@@ -3020,9 +3051,11 @@ static int raid1_reshape(struct mddev *mddev) ...@@ -3020,9 +3051,11 @@ static int raid1_reshape(struct mddev *mddev)
return -EINVAL; return -EINVAL;
} }
if (!mddev_is_clustered(mddev)) {
err = md_allow_write(mddev); err = md_allow_write(mddev);
if (err) if (err)
return err; return err;
}
raid_disks = mddev->raid_disks + mddev->delta_disks; raid_disks = mddev->raid_disks + mddev->delta_disks;
......
...@@ -111,6 +111,13 @@ struct r1conf { ...@@ -111,6 +111,13 @@ struct r1conf {
* the new thread here until we fully activate the array. * the new thread here until we fully activate the array.
*/ */
struct md_thread *thread; struct md_thread *thread;
/* Keep track of cluster resync window to send to other
* nodes.
*/
sector_t cluster_sync_low;
sector_t cluster_sync_high;
}; };
/* /*
......
...@@ -3149,7 +3149,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3149,7 +3149,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* resync. Schedule a read for every block at this virt offset */ /* resync. Schedule a read for every block at this virt offset */
int count = 0; int count = 0;
bitmap_cond_end_sync(mddev->bitmap, sector_nr); bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0);
if (!bitmap_start_sync(mddev->bitmap, sector_nr, if (!bitmap_start_sync(mddev->bitmap, sector_nr,
&sync_blocks, mddev->degraded) && &sync_blocks, mddev->degraded) &&
......
This diff is collapsed.
This diff is collapsed.
...@@ -223,6 +223,9 @@ struct stripe_head { ...@@ -223,6 +223,9 @@ struct stripe_head {
struct stripe_head *batch_head; /* protected by stripe lock */ struct stripe_head *batch_head; /* protected by stripe lock */
spinlock_t batch_lock; /* only header's lock is useful */ spinlock_t batch_lock; /* only header's lock is useful */
struct list_head batch_list; /* protected by head's batch lock*/ struct list_head batch_list; /* protected by head's batch lock*/
struct r5l_io_unit *log_io;
struct list_head log_list;
/** /**
* struct stripe_operations * struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target * @target - STRIPE_OP_COMPUTE_BLK target
...@@ -244,6 +247,7 @@ struct stripe_head { ...@@ -244,6 +247,7 @@ struct stripe_head {
struct bio *toread, *read, *towrite, *written; struct bio *toread, *read, *towrite, *written;
sector_t sector; /* sector of this page */ sector_t sector; /* sector of this page */
unsigned long flags; unsigned long flags;
u32 log_checksum;
} dev[1]; /* allocated with extra space depending of RAID geometry */ } dev[1]; /* allocated with extra space depending of RAID geometry */
}; };
...@@ -268,6 +272,7 @@ struct stripe_head_state { ...@@ -268,6 +272,7 @@ struct stripe_head_state {
struct bio_list return_bi; struct bio_list return_bi;
struct md_rdev *blocked_rdev; struct md_rdev *blocked_rdev;
int handle_bad_blocks; int handle_bad_blocks;
int log_failed;
}; };
/* Flags for struct r5dev.flags */ /* Flags for struct r5dev.flags */
...@@ -340,6 +345,7 @@ enum { ...@@ -340,6 +345,7 @@ enum {
STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
* to batch yet. * to batch yet.
*/ */
STRIPE_LOG_TRAPPED, /* trapped into log */
}; };
#define STRIPE_EXPAND_SYNC_FLAGS \ #define STRIPE_EXPAND_SYNC_FLAGS \
...@@ -543,6 +549,7 @@ struct r5conf { ...@@ -543,6 +549,7 @@ struct r5conf {
struct r5worker_group *worker_groups; struct r5worker_group *worker_groups;
int group_cnt; int group_cnt;
int worker_cnt_per_group; int worker_cnt_per_group;
struct r5l_log *log;
}; };
...@@ -609,4 +616,21 @@ static inline int algorithm_is_DDF(int layout) ...@@ -609,4 +616,21 @@ static inline int algorithm_is_DDF(int layout)
extern void md_raid5_kick_device(struct r5conf *conf); extern void md_raid5_kick_device(struct r5conf *conf);
extern int raid5_set_cache_size(struct mddev *mddev, int size); extern int raid5_set_cache_size(struct mddev *mddev, int size);
extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
extern void raid5_release_stripe(struct stripe_head *sh);
extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
int previous, int *dd_idx,
struct stripe_head *sh);
extern struct stripe_head *
raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
int previous, int noblock, int noquiesce);
extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
extern void r5l_exit_log(struct r5l_log *log);
extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
extern void r5l_write_stripe_run(struct r5l_log *log);
extern void r5l_flush_stripe_to_raid(struct r5l_log *log);
extern void r5l_stripe_write_finished(struct stripe_head *sh);
extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
extern void r5l_quiesce(struct r5l_log *log, int state);
extern bool r5l_log_disk_error(struct r5conf *conf);
#endif #endif
...@@ -89,6 +89,12 @@ ...@@ -89,6 +89,12 @@
* read requests will only be sent here in * read requests will only be sent here in
* dire need * dire need
*/ */
#define MD_DISK_JOURNAL 18 /* disk is used as the write journal in RAID-5/6 */
#define MD_DISK_ROLE_SPARE 0xffff
#define MD_DISK_ROLE_FAULTY 0xfffe
#define MD_DISK_ROLE_JOURNAL 0xfffd
#define MD_DISK_ROLE_MAX 0xff00 /* max value of regular disk role */
typedef struct mdp_device_descriptor_s { typedef struct mdp_device_descriptor_s {
__u32 number; /* 0 Device number in the entire set */ __u32 number; /* 0 Device number in the entire set */
...@@ -252,7 +258,10 @@ struct mdp_superblock_1 { ...@@ -252,7 +258,10 @@ struct mdp_superblock_1 {
__le64 data_offset; /* sector start of data, often 0 */ __le64 data_offset; /* sector start of data, often 0 */
__le64 data_size; /* sectors in this device that can be used for data */ __le64 data_size; /* sectors in this device that can be used for data */
__le64 super_offset; /* sector start of this superblock */ __le64 super_offset; /* sector start of this superblock */
union {
__le64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */ __le64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */
__le64 journal_tail;/* journal tail of journal device (from data_offset) */
};
__le32 dev_number; /* permanent identifier of this device - not role in raid */ __le32 dev_number; /* permanent identifier of this device - not role in raid */
__le32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */ __le32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */
__u8 device_uuid[16]; /* user-space setable, ignored by kernel */ __u8 device_uuid[16]; /* user-space setable, ignored by kernel */
...@@ -302,6 +311,8 @@ struct mdp_superblock_1 { ...@@ -302,6 +311,8 @@ struct mdp_superblock_1 {
#define MD_FEATURE_RECOVERY_BITMAP 128 /* recovery that is happening #define MD_FEATURE_RECOVERY_BITMAP 128 /* recovery that is happening
* is guided by bitmap. * is guided by bitmap.
*/ */
#define MD_FEATURE_CLUSTERED 256 /* clustered MD */
#define MD_FEATURE_JOURNAL 512 /* support write cache */
#define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \ #define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \
|MD_FEATURE_RECOVERY_OFFSET \ |MD_FEATURE_RECOVERY_OFFSET \
|MD_FEATURE_RESHAPE_ACTIVE \ |MD_FEATURE_RESHAPE_ACTIVE \
...@@ -310,6 +321,66 @@ struct mdp_superblock_1 { ...@@ -310,6 +321,66 @@ struct mdp_superblock_1 {
|MD_FEATURE_RESHAPE_BACKWARDS \ |MD_FEATURE_RESHAPE_BACKWARDS \
|MD_FEATURE_NEW_OFFSET \ |MD_FEATURE_NEW_OFFSET \
|MD_FEATURE_RECOVERY_BITMAP \ |MD_FEATURE_RECOVERY_BITMAP \
|MD_FEATURE_CLUSTERED \
|MD_FEATURE_JOURNAL \
) )
struct r5l_payload_header {
__le16 type;
__le16 flags;
} __attribute__ ((__packed__));
enum r5l_payload_type {
R5LOG_PAYLOAD_DATA = 0,
R5LOG_PAYLOAD_PARITY = 1,
R5LOG_PAYLOAD_FLUSH = 2,
};
struct r5l_payload_data_parity {
struct r5l_payload_header header;
__le32 size; /* sector. data/parity size. each 4k
* has a checksum */
__le64 location; /* sector. For data, it's raid sector. For
* parity, it's stripe sector */
__le32 checksum[];
} __attribute__ ((__packed__));
enum r5l_payload_data_parity_flag {
R5LOG_PAYLOAD_FLAG_DISCARD = 1, /* payload is discard */
/*
* RESHAPED/RESHAPING is only set when there is reshape activity. Note,
* both data/parity of a stripe should have the same flag set
*
* RESHAPED: reshape is running, and this stripe finished reshape
* RESHAPING: reshape is running, and this stripe isn't reshaped
*/
R5LOG_PAYLOAD_FLAG_RESHAPED = 2,
R5LOG_PAYLOAD_FLAG_RESHAPING = 3,
};
struct r5l_payload_flush {
struct r5l_payload_header header;
__le32 size; /* flush_stripes size, bytes */
__le64 flush_stripes[];
} __attribute__ ((__packed__));
enum r5l_payload_flush_flag {
R5LOG_PAYLOAD_FLAG_FLUSH_STRIPE = 1, /* data represents whole stripe */
};
struct r5l_meta_block {
__le32 magic;
__le32 checksum;
__u8 version;
__u8 __zero_pading_1;
__le16 __zero_pading_2;
__le32 meta_size; /* whole size of the block */
__le64 seq;
__le64 position; /* sector, start from rdev->data_offset, current position */
struct r5l_payload_header payloads[];
} __attribute__ ((__packed__));
#define R5LOG_VERSION 0x1
#define R5LOG_MAGIC 0x6433c509
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment