pblk.h 39.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/*
 * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
 * Copyright (C) 2016 CNEX Labs
 * Initial release: Matias Bjorling <matias@cnexlabs.com>
 * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version
 * 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * Implementation of a Physical Block-device target for Open-channel SSDs.
 *
 */

#ifndef PBLK_H_
#define PBLK_H_

#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/vmalloc.h>
#include <linux/crc32.h>
#include <linux/uuid.h>

#include <linux/lightnvm.h>

/* Run only GC if less than 1/X blocks are free */
#define GC_LIMIT_INVERSE 5
#define GC_TIME_MSECS 1000

#define PBLK_SECTOR (512)
#define PBLK_EXPOSED_PAGE_SIZE (4096)

41 42
#define PBLK_NR_CLOSE_JOBS (4)

43 44 45 46 47 48 49 50 51
#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)

#define PBLK_COMMAND_TIMEOUT_MS 30000

/* Max 512 LUNs per device */
#define PBLK_MAX_LUNS_BITMAP (4)

#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)

52
/* Static pool sizes */
53 54
#define PBLK_GEN_WS_POOL_SIZE (2)

55 56
#define PBLK_DEFAULT_OP (11)

57 58 59 60
enum {
	PBLK_READ		= READ,
	PBLK_WRITE		= WRITE,/* Write from write buffer */
	PBLK_WRITE_INT,			/* Internal write - no write buffer */
61
	PBLK_READ_RECOV,		/* Recovery read - errors allowed */
62 63 64
	PBLK_ERASE,
};

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
enum {
	/* IO Types */
	PBLK_IOTYPE_USER	= 1 << 0,
	PBLK_IOTYPE_GC		= 1 << 1,

	/* Write buffer flags */
	PBLK_FLUSH_ENTRY	= 1 << 2,
	PBLK_WRITTEN_DATA	= 1 << 3,
	PBLK_SUBMITTED_ENTRY	= 1 << 4,
	PBLK_WRITABLE_ENTRY	= 1 << 5,
};

enum {
	PBLK_BLK_ST_OPEN =	0x1,
	PBLK_BLK_ST_CLOSED =	0x2,
};

82 83 84 85 86
struct pblk_sec_meta {
	u64 reserved;
	__le64 lba;
};

87 88 89
/* The number of GC lists and the rate-limiter states go together. This way the
 * rate-limiter can dictate how much GC is needed based on resource utilization.
 */
90
#define PBLK_GC_NR_LISTS 4
91 92

enum {
93 94 95 96 97
	PBLK_RL_OFF = 0,
	PBLK_RL_WERR = 1,
	PBLK_RL_HIGH = 2,
	PBLK_RL_MID = 3,
	PBLK_RL_LOW = 4
98 99
};

100 101
#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * NVM_MAX_VLBA)
#define pblk_dma_ppa_size (sizeof(u64) * NVM_MAX_VLBA)
102

103
/* write buffer completion context */
104 105 106 107 108 109 110 111 112
struct pblk_c_ctx {
	struct list_head list;		/* Head for out-of-order completion */

	unsigned long *lun_bitmap;	/* Luns used on current request */
	unsigned int sentry;
	unsigned int nr_valid;
	unsigned int nr_padded;
};

113
/* read context */
114 115
struct pblk_g_ctx {
	void *private;
116
	unsigned long start_time;
117
	u64 lba;
118 119
};

120 121 122 123 124 125 126 127 128 129
/* partial read context */
struct pblk_pr_ctx {
	struct bio *orig_bio;
	DECLARE_BITMAP(bitmap, NVM_MAX_VLBA);
	unsigned int orig_nr_secs;
	unsigned int bio_init_idx;
	void *ppa_ptr;
	dma_addr_t dma_ppa_list;
};

130 131 132 133 134 135 136
/* Pad context */
struct pblk_pad_rq {
	struct pblk *pblk;
	struct completion wait;
	struct kref ref;
};

137 138 139 140 141 142 143 144 145 146 147 148
/* Recovery context */
struct pblk_rec_ctx {
	struct pblk *pblk;
	struct nvm_rq *rqd;
	struct work_struct ws_rec;
};

/* Write context */
struct pblk_w_ctx {
	struct bio_list bios;		/* Original bios - used for completion
					 * in REQ_FUA, REQ_FLUSH case
					 */
149
	u64 lba;			/* Logic addr. associated with entry */
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
	struct ppa_addr ppa;		/* Physic addr. associated with entry */
	int flags;			/* Write context flags */
};

struct pblk_rb_entry {
	struct ppa_addr cacheline;	/* Cacheline for this entry */
	void *data;			/* Pointer to data on this entry */
	struct pblk_w_ctx w_ctx;	/* Context for this entry */
	struct list_head index;		/* List head to enable indexes */
};

#define EMPTY_ENTRY (~0U)

struct pblk_rb_pages {
	struct page *pages;
	int order;
	struct list_head list;
};

struct pblk_rb {
	struct pblk_rb_entry *entries;	/* Ring buffer entries */
	unsigned int mem;		/* Write offset - points to next
					 * writable entry in memory
					 */
	unsigned int subm;		/* Read offset - points to last entry
					 * that has been submitted to the media
					 * to be persisted
					 */
	unsigned int sync;		/* Synced - backpointer that signals
					 * the last submitted entry that has
					 * been successfully persisted to media
					 */
182
	unsigned int flush_point;	/* Sync point - last entry that must be
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
					 * flushed to the media. Used with
					 * REQ_FLUSH and REQ_FUA
					 */
	unsigned int l2p_update;	/* l2p update point - next entry for
					 * which l2p mapping will be updated to
					 * contain a device ppa address (instead
					 * of a cacheline
					 */
	unsigned int nr_entries;	/* Number of entries in write buffer -
					 * must be a power of two
					 */
	unsigned int seg_size;		/* Size of the data segments being
					 * stored on each entry. Typically this
					 * will be 4KB
					 */

	struct list_head pages;		/* List of data pages */

	spinlock_t w_lock;		/* Write lock */
	spinlock_t s_lock;		/* Sync lock */

204
#ifdef CONFIG_NVM_PBLK_DEBUG
205
	atomic_t inflight_flush_point;	/* Not served REQ_FLUSH | REQ_FUA */
206 207 208 209 210 211 212 213 214 215 216 217 218
#endif
};

#define PBLK_RECOVERY_SECTORS 16

struct pblk_lun {
	struct ppa_addr bppa;
	struct semaphore wr_sem;
};

struct pblk_gc_rq {
	struct pblk_line *line;
	void *data;
219 220
	u64 paddr_list[NVM_MAX_VLBA];
	u64 lba_list[NVM_MAX_VLBA];
221 222 223 224 225 226
	int nr_secs;
	int secs_to_gc;
	struct list_head list;
};

struct pblk_gc {
227 228 229
	/* These states are not protected by a lock since (i) they are in the
	 * fast path, and (ii) they are not critical.
	 */
230 231 232 233 234 235
	int gc_active;
	int gc_enabled;
	int gc_forced;

	struct task_struct *gc_ts;
	struct task_struct *gc_writer_ts;
236 237 238
	struct task_struct *gc_reader_ts;

	struct workqueue_struct *gc_line_reader_wq;
239
	struct workqueue_struct *gc_reader_wq;
240

241 242
	struct timer_list gc_timer;

243
	struct semaphore gc_sem;
244 245 246 247
	atomic_t read_inflight_gc; /* Number of lines with inflight GC reads */
	atomic_t pipeline_gc;	   /* Number of lines in the GC pipeline -
				    * started reads to finished writes
				    */
248
	int w_entries;
249

250
	struct list_head w_list;
251
	struct list_head r_list;
252 253 254

	spinlock_t lock;
	spinlock_t w_lock;
255
	spinlock_t r_lock;
256 257 258 259 260 261 262 263
};

struct pblk_rl {
	unsigned int high;	/* Upper threshold for rate limiter (free run -
				 * user I/O rate limiter
				 */
	unsigned int high_pw;	/* High rounded up as a power of 2 */

264 265
#define PBLK_USER_HIGH_THRS 8	/* Begin write limit at 12% available blks */
#define PBLK_USER_LOW_THRS 10	/* Aggressive GC at 10% available blocks */
266 267 268 269 270 271 272 273 274 275 276 277 278 279

	int rb_windows_pw;	/* Number of rate windows in the write buffer
				 * given as a power-of-2. This guarantees that
				 * when user I/O is being rate limited, there
				 * will be reserved enough space for the GC to
				 * place its payload. A window is of
				 * pblk->max_write_pgs size, which in NVMe is
				 * 64, i.e., 256kb.
				 */
	int rb_budget;		/* Total number of entries available for I/O */
	int rb_user_max;	/* Max buffer entries available for user I/O */
	int rb_gc_max;		/* Max buffer entries available for GC I/O */
	int rb_gc_rsv;		/* Reserved buffer entries for GC I/O */
	int rb_state;		/* Rate-limiter current state */
280
	int rb_max_io;		/* Maximum size for an I/O giving the config */
281 282

	atomic_t rb_user_cnt;	/* User I/O buffer counter */
283
	atomic_t rb_gc_cnt;	/* GC I/O buffer counter */
284
	atomic_t rb_space;	/* Space limit in case of reaching capacity */
285

286 287
	int rsv_blocks;		/* Reserved blocks for GC */

288
	int rb_user_active;
289 290
	int rb_gc_active;

291 292
	atomic_t werr_lines;	/* Number of write error lines that needs gc */

293 294 295 296
	struct timer_list u_timer;

	unsigned long long nr_secs;
	unsigned long total_blocks;
297 298 299

	atomic_t free_blocks;		/* Total number of free blocks (+ OP) */
	atomic_t free_user_blocks;	/* Number of user free blocks (no OP) */
300 301 302 303 304 305 306 307 308 309 310
};

#define PBLK_LINE_EMPTY (~0U)

enum {
	/* Line Types */
	PBLK_LINETYPE_FREE = 0,
	PBLK_LINETYPE_LOG = 1,
	PBLK_LINETYPE_DATA = 2,

	/* Line state */
311
	PBLK_LINESTATE_NEW = 9,
312 313 314 315 316 317 318 319 320 321 322 323 324 325
	PBLK_LINESTATE_FREE = 10,
	PBLK_LINESTATE_OPEN = 11,
	PBLK_LINESTATE_CLOSED = 12,
	PBLK_LINESTATE_GC = 13,
	PBLK_LINESTATE_BAD = 14,
	PBLK_LINESTATE_CORRUPT = 15,

	/* GC group */
	PBLK_LINEGC_NONE = 20,
	PBLK_LINEGC_EMPTY = 21,
	PBLK_LINEGC_LOW = 22,
	PBLK_LINEGC_MID = 23,
	PBLK_LINEGC_HIGH = 24,
	PBLK_LINEGC_FULL = 25,
326
	PBLK_LINEGC_WERR = 26
327 328 329
};

#define PBLK_MAGIC 0x70626c6b /*pblk*/
330 331 332 333 334 335 336 337 338 339 340

/* emeta/smeta persistent storage format versions:
 * Changes in major version requires offline migration.
 * Changes in minor version are handled automatically during
 * recovery.
 */

#define SMETA_VERSION_MAJOR (0)
#define SMETA_VERSION_MINOR (1)

#define EMETA_VERSION_MAJOR (0)
341
#define EMETA_VERSION_MINOR (2)
342 343 344 345 346 347

struct line_header {
	__le32 crc;
	__le32 identifier;	/* pblk identifier */
	__u8 uuid[16];		/* instance uuid */
	__le16 type;		/* line type */
348 349
	__u8 version_major;	/* version major */
	__u8 version_minor;	/* version minor */
350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
	__le32 id;		/* line id for current line */
};

struct line_smeta {
	struct line_header header;

	__le32 crc;		/* Full structure including struct crc */
	/* Previous line metadata */
	__le32 prev_id;		/* Line id for previous line */

	/* Current line metadata */
	__le64 seq_nr;		/* Sequence number for current line */

	/* Active writers */
	__le32 window_wr_lun;	/* Number of parallel LUNs to write */

	__le32 rsvd[2];
367 368

	__le64 lun_bitmap[];
369 370
};

371

372
/*
373 374 375 376
 * Metadata layout in media:
 *	First sector:
 *		1. struct line_emeta
 *		2. bad block bitmap (u64 * window_wr_lun)
377
 *		3. write amplification counters
378 379 380 381
 *	Mid sectors (start at lbas_sector):
 *		3. nr_lbas (u64) forming lba list
 *	Last sectors (start at vsc_sector):
 *		4. u32 valid sector count (vsc) for all lines (~0U: free line)
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
 */
struct line_emeta {
	struct line_header header;

	__le32 crc;		/* Full structure including struct crc */

	/* Previous line metadata */
	__le32 prev_id;		/* Line id for prev line */

	/* Current line metadata */
	__le64 seq_nr;		/* Sequence number for current line */

	/* Active writers */
	__le32 window_wr_lun;	/* Number of parallel LUNs to write */

	/* Bookkeeping for recovery */
	__le32 next_id;		/* Line id for next line */
	__le64 nr_lbas;		/* Number of lbas mapped in line */
	__le64 nr_valid_lbas;	/* Number of valid lbas mapped in line */
401 402 403 404 405 406 407 408 409
	__le64 bb_bitmap[];     /* Updated bad block bitmap for line */
};


/* Write amplification counters stored on media */
struct wa_counters {
	__le64 user;		/* Number of user written sectors */
	__le64 gc;		/* Number of sectors written by GC*/
	__le64 pad;		/* Number of padded sectors */
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
};

struct pblk_emeta {
	struct line_emeta *buf;		/* emeta buffer in media format */
	int mem;			/* Write offset - points to next
					 * writable entry in memory
					 */
	atomic_t sync;			/* Synced - backpointer that signals the
					 * last entry that has been successfully
					 * persisted to media
					 */
	unsigned int nr_entries;	/* Number of emeta entries */
};

struct pblk_smeta {
	struct line_smeta *buf;		/* smeta buffer in persistent format */
426 427
};

428 429 430 431 432
struct pblk_w_err_gc {
	int has_write_err;
	__le64 *lba_list;
};

433 434 435 436 437 438 439 440 441 442 443 444 445 446
struct pblk_line {
	struct pblk *pblk;
	unsigned int id;		/* Line number corresponds to the
					 * block line
					 */
	unsigned int seq_nr;		/* Unique line sequence number */

	int state;			/* PBLK_LINESTATE_X */
	int type;			/* PBLK_LINETYPE_X */
	int gc_group;			/* PBLK_LINEGC_X */
	struct list_head list;		/* Free, GC lists */

	unsigned long *lun_bitmap;	/* Bitmap for LUNs mapped in line */

447 448
	struct nvm_chk_meta *chks;	/* Chunks forming line */

449 450 451
	struct pblk_smeta *smeta;	/* Start metadata */
	struct pblk_emeta *emeta;	/* End medatada */

452
	int meta_line;			/* Metadata line id */
453 454
	int meta_distance;		/* Distance between data and metadata */

455 456 457 458 459
	u64 smeta_ssec;			/* Sector where smeta starts */
	u64 emeta_ssec;			/* Sector where emeta starts */

	unsigned int sec_in_line;	/* Number of usable secs in line */

460
	atomic_t blk_in_line;		/* Number of good blocks in line */
461 462 463 464 465 466
	unsigned long *blk_bitmap;	/* Bitmap for valid/invalid blocks */
	unsigned long *erase_bitmap;	/* Bitmap for erased blocks */

	unsigned long *map_bitmap;	/* Bitmap for mapped sectors in line */
	unsigned long *invalid_bitmap;	/* Bitmap for invalid sectors in line */

467
	atomic_t left_eblks;		/* Blocks left for erasing */
468 469 470 471
	atomic_t left_seblks;		/* Blocks left for sync erasing */

	int left_msecs;			/* Sectors left for mapping */
	unsigned int cur_sec;		/* Sector map pointer */
472 473 474
	unsigned int nr_valid_lbas;	/* Number of valid lbas in line */

	__le32 *vsc;			/* Valid sector count in line */
475 476 477

	struct kref ref;		/* Write buffer L2P references */

478 479
	struct pblk_w_err_gc *w_err_gc;	/* Write error gc recovery metadata */

480 481 482
	spinlock_t lock;		/* Necessary for invalid_bitmap only */
};

483
#define PBLK_DATA_LINES 4
484

485
enum {
486 487 488 489
	PBLK_KMALLOC_META = 1,
	PBLK_VMALLOC_META = 2,
};

490 491 492 493
enum {
	PBLK_EMETA_TYPE_HEADER = 1,	/* struct line_emeta first sector */
	PBLK_EMETA_TYPE_LLBA = 2,	/* lba list - type: __le64 */
	PBLK_EMETA_TYPE_VSC = 3,	/* vsc list - type: __le32 */
494 495 496 497 498 499 500 501 502 503 504 505
};

struct pblk_line_mgmt {
	int nr_lines;			/* Total number of full lines */
	int nr_free_lines;		/* Number of full lines in free list */

	/* Free lists - use free_lock */
	struct list_head free_list;	/* Full lines ready to use */
	struct list_head corrupt_list;	/* Full lines corrupted */
	struct list_head bad_list;	/* Full lines bad */

	/* GC lists - use gc_lock */
506
	struct list_head *gc_lists[PBLK_GC_NR_LISTS];
507 508 509 510
	struct list_head gc_high_list;	/* Full lines ready to GC, high isc */
	struct list_head gc_mid_list;	/* Full lines ready to GC, mid isc */
	struct list_head gc_low_list;	/* Full lines ready to GC, low isc */

511 512
	struct list_head gc_werr_list;  /* Write err recovery list */

513 514 515 516 517 518 519 520
	struct list_head gc_full_list;	/* Full lines ready to GC, no valid */
	struct list_head gc_empty_list;	/* Full lines close, all valid */

	struct pblk_line *log_line;	/* Current FTL log line */
	struct pblk_line *data_line;	/* Current data line */
	struct pblk_line *log_next;	/* Next FTL log line */
	struct pblk_line *data_next;	/* Next data line */

521 522 523 524
	struct list_head emeta_list;	/* Lines queued to schedule emeta */

	__le32 *vsc_list;		/* Valid sector counts for all lines */

525 526 527 528
	/* Metadata allocation type: VMALLOC | KMALLOC */
	int emeta_alloc_type;

	/* Pre-allocated metadata for data lines */
529 530
	struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
	struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
531 532 533 534 535 536 537 538 539 540
	unsigned long meta_bitmap;

	/* Helpers for fast bitmap calculations */
	unsigned long *bb_template;
	unsigned long *bb_aux;

	unsigned long d_seq_nr;		/* Data line unique sequence number */
	unsigned long l_seq_nr;		/* Log line unique sequence number */

	spinlock_t free_lock;
541
	spinlock_t close_lock;
542 543 544 545 546
	spinlock_t gc_lock;
};

struct pblk_line_meta {
	unsigned int smeta_len;		/* Total length for smeta */
547 548 549
	unsigned int smeta_sec;		/* Sectors needed for smeta */

	unsigned int emeta_len[4];	/* Lengths for emeta:
550 551 552 553 554
					 *  [0]: Total
					 *  [1]: struct line_emeta +
					 *       bb_bitmap + struct wa_counters
					 *  [2]: L2P portion
					 *  [3]: vsc
555 556 557 558 559
					 */
	unsigned int emeta_sec[4];	/* Sectors needed for emeta. Same layout
					 * as emeta_len
					 */

560
	unsigned int emeta_bb;		/* Boundary for bb that affects emeta */
561 562

	unsigned int vsc_list_len;	/* Length for vsc list */
563 564 565 566 567 568
	unsigned int sec_bitmap_len;	/* Length for sector bitmap in line */
	unsigned int blk_bitmap_len;	/* Length for block bitmap in line */
	unsigned int lun_bitmap_len;	/* Length for lun bitmap in line */

	unsigned int blk_per_line;	/* Number of blocks in a full line */
	unsigned int sec_per_line;	/* Number of sectors in a line */
569
	unsigned int dsec_per_line;	/* Number of data sectors in a line */
570 571 572 573
	unsigned int min_blk_line;	/* Min. number of good blocks in line */

	unsigned int mid_thrs;		/* Threshold for GC mid list */
	unsigned int high_thrs;		/* Threshold for GC high list */
574 575

	unsigned int meta_distance;	/* Distance between data and metadata */
576 577
};

578 579 580 581 582 583 584
enum {
	PBLK_STATE_RUNNING = 0,
	PBLK_STATE_STOPPING = 1,
	PBLK_STATE_RECOVERING = 2,
	PBLK_STATE_STOPPED = 3,
};

585 586 587 588 589 590 591 592 593 594 595 596
/* Internal format to support not power-of-2 device formats */
struct pblk_addrf {
	/* gen to dev */
	int sec_stripe;
	int ch_stripe;
	int lun_stripe;

	/* dev to gen */
	int sec_lun_stripe;
	int sec_ws_stripe;
};

597 598 599 600 601 602 603 604 605 606 607 608
struct pblk {
	struct nvm_tgt_dev *dev;
	struct gendisk *disk;

	struct kobject kobj;

	struct pblk_lun *luns;

	struct pblk_line *lines;		/* Line array */
	struct pblk_line_mgmt l_mg;		/* Line management */
	struct pblk_line_meta lm;		/* Line metadata */

609 610
	struct nvm_addrf addrf;		/* Aligned address format */
	struct pblk_addrf uaddrf;	/* Unaligned address format */
611
	int addrf_len;
612 613 614

	struct pblk_rb rwb;

615 616
	int state;			/* pblk line state */

617 618 619 620
	int min_write_pgs; /* Minimum amount of pages required by controller */
	int max_write_pgs; /* Maximum amount of pages supported by controller */

	sector_t capacity; /* Device capacity when bad blocks are subtracted */
621 622 623

	int op;      /* Percentage of device used for over-provisioning */
	int op_blks; /* Number of blocks used for over-provisioning */
624 625 626 627

	/* pblk provisioning values. Used by rate limiter */
	struct pblk_rl rl;

628
	int sec_per_write;
629 630

	unsigned char instance_uuid[16];
631 632 633 634 635 636 637 638 639 640 641

	/* Persistent write amplification counters, 4kb sector I/Os */
	atomic64_t user_wa;		/* Sectors written by user */
	atomic64_t gc_wa;		/* Sectors written by GC */
	atomic64_t pad_wa;		/* Padded sectors written */

	/* Reset values for delta write amplification measurements */
	u64 user_rst_wa;
	u64 gc_rst_wa;
	u64 pad_rst_wa;

642 643 644 645 646
	/* Counters used for calculating padding distribution */
	atomic64_t *pad_dist;		/* Padding distribution buckets */
	u64 nr_flush_rst;		/* Flushes reset value for pad dist.*/
	atomic64_t nr_flush;		/* Number of flush/fua I/O */

647
#ifdef CONFIG_NVM_PBLK_DEBUG
648
	/* Non-persistent debug counters, 4kb sector I/Os */
649 650 651 652 653 654 655
	atomic_long_t inflight_writes;	/* Inflight writes (user and gc) */
	atomic_long_t padded_writes;	/* Sectors padded due to flush/fua */
	atomic_long_t padded_wb;	/* Sectors padded in write buffer */
	atomic_long_t req_writes;	/* Sectors stored on write buffer */
	atomic_long_t sub_writes;	/* Sectors submitted from buffer */
	atomic_long_t sync_writes;	/* Sectors synced to media */
	atomic_long_t inflight_reads;	/* Inflight sector read requests */
656
	atomic_long_t cache_reads;	/* Read requests that hit the cache */
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
	atomic_long_t sync_reads;	/* Completed sector read requests */
	atomic_long_t recov_writes;	/* Sectors submitted from recovery */
	atomic_long_t recov_gc_writes;	/* Sectors submitted from write GC */
	atomic_long_t recov_gc_reads;	/* Sectors submitted from read GC */
#endif

	spinlock_t lock;

	atomic_long_t read_failed;
	atomic_long_t read_empty;
	atomic_long_t read_high_ecc;
	atomic_long_t read_failed_gc;
	atomic_long_t write_failed;
	atomic_long_t erase_failed;

672 673
	atomic_t inflight_io;		/* General inflight I/O counter */

674 675 676 677 678 679 680 681 682 683 684
	struct task_struct *writer_ts;

	/* Simple translation map of logical addresses to physical addresses.
	 * The logical addresses is known by the host system, while the physical
	 * addresses are used when writing to the disk block device.
	 */
	unsigned char *trans_map;
	spinlock_t trans_lock;

	struct list_head compl_list;

685 686 687
	spinlock_t resubmit_lock;	 /* Resubmit list lock */
	struct list_head resubmit_list; /* Resubmit list for failed writes*/

688 689 690 691 692 693
	mempool_t page_bio_pool;
	mempool_t gen_ws_pool;
	mempool_t rec_pool;
	mempool_t r_rq_pool;
	mempool_t w_rq_pool;
	mempool_t e_rq_pool;
694

695 696
	struct workqueue_struct *close_wq;
	struct workqueue_struct *bb_wq;
697
	struct workqueue_struct *r_end_wq;
698

699 700 701 702 703 704 705 706 707 708 709 710
	struct timer_list wtimer;

	struct pblk_gc gc;
};

struct pblk_line_ws {
	struct pblk *pblk;
	struct pblk_line *line;
	void *priv;
	struct work_struct ws;
};

711
#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
712 713
#define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))

714 715 716 717 718 719 720 721 722
#define pblk_err(pblk, fmt, ...)			\
	pr_err("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
#define pblk_info(pblk, fmt, ...)			\
	pr_info("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
#define pblk_warn(pblk, fmt, ...)			\
	pr_warn("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
#define pblk_debug(pblk, fmt, ...)			\
	pr_debug("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)

723 724 725 726 727 728 729 730 731 732 733 734 735 736
/*
 * pblk ring buffer operations
 */
int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
		 unsigned int power_size, unsigned int power_seg_sz);
unsigned int pblk_rb_calculate_size(unsigned int nr_entries);
void *pblk_rb_entries_ref(struct pblk_rb *rb);
int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
			   unsigned int nr_entries, unsigned int *pos);
int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
			 unsigned int *pos);
void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
			      struct pblk_w_ctx w_ctx, unsigned int pos);
void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
737 738
			    struct pblk_w_ctx w_ctx, struct pblk_line *line,
			    u64 paddr, unsigned int pos);
739
struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
740
void pblk_rb_flush(struct pblk_rb *rb);
741 742

void pblk_rb_sync_l2p(struct pblk_rb *rb);
743
unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
744 745
				 unsigned int pos, unsigned int nr_entries,
				 unsigned int count);
746
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
747
			struct ppa_addr ppa, int bio_iter, bool advanced_bio);
748 749 750 751 752 753 754
unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);

unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
					      struct ppa_addr *ppa);
void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
755
unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
756 757

unsigned int pblk_rb_read_count(struct pblk_rb *rb);
758
unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
759 760 761 762 763 764 765 766 767 768
unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);

int pblk_rb_tear_down_check(struct pblk_rb *rb);
int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
void pblk_rb_data_free(struct pblk_rb *rb);
ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);

/*
 * pblk core
 */
769 770
struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
771
void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
772 773 774
int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
			struct pblk_c_ctx *c_ctx);
void pblk_discard(struct pblk *pblk, struct bio *bio);
775
struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk);
776 777 778
struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
					      struct nvm_chk_meta *lp,
					      struct ppa_addr ppa);
779 780 781
void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
782
int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd);
783
int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
784 785
struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
			      unsigned int nr_secs, unsigned int len,
786
			      int alloc_type, gfp_t gfp_mask);
787 788
struct pblk_line *pblk_line_get(struct pblk *pblk);
struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
789
struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
790 791 792
int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
struct pblk_line *pblk_line_get_data(struct pblk *pblk);
793
struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
794 795
int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
int pblk_line_is_full(struct pblk_line *line);
796
void pblk_line_free(struct pblk_line *line);
797
void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
798
void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
799
void pblk_line_close_ws(struct work_struct *work);
800
void pblk_pipeline_stop(struct pblk *pblk);
801 802
void __pblk_pipeline_stop(struct pblk *pblk);
void __pblk_pipeline_flush(struct pblk *pblk);
803 804 805
void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
		     void (*work)(struct work_struct *), gfp_t gfp_mask,
		     struct workqueue_struct *wq);
806 807
u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line);
808 809
int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
			 void *emeta_buf);
810 811
int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
void pblk_line_put(struct kref *ref);
812
void pblk_line_put_wq(struct kref *ref);
813
struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
814 815
u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
816
u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
817
u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
818 819
int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
		   unsigned long secs_to_flush);
820
void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
821 822
void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
		  unsigned long *lun_bitmap);
823
void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
824 825 826 827 828 829 830
void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
		unsigned long *lun_bitmap);
int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
		       int nr_pages);
void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
			 int nr_pages);
void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
831 832
void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
			   u64 paddr);
833 834 835 836 837 838
void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
			   struct ppa_addr ppa);
void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
			 struct ppa_addr ppa, struct ppa_addr entry_line);
int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
839
		       struct pblk_line *gc_line, u64 paddr);
840 841 842 843 844 845 846 847 848 849
void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
			  u64 *lba_list, int nr_secs);
void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
			 sector_t blba, int nr_secs);

/*
 * pblk user I/O write path
 */
int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
			unsigned long flags);
850
int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
851 852 853 854 855 856 857 858 859 860 861 862 863 864 865

/*
 * pblk map
 */
void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
		       unsigned int sentry, unsigned long *lun_bitmap,
		       unsigned int valid_secs, struct ppa_addr *erase_ppa);
void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
		 unsigned long *lun_bitmap, unsigned int valid_secs,
		 unsigned int off);

/*
 * pblk write thread
 */
int pblk_write_ts(void *data);
866
void pblk_write_timer_fn(struct timer_list *t);
867
void pblk_write_should_kick(struct pblk *pblk);
868
void pblk_write_kick(struct pblk *pblk);
869 870 871 872

/*
 * pblk read path
 */
873
extern struct bio_set pblk_bio_set;
874
int pblk_submit_read(struct pblk *pblk, struct bio *bio);
875
int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
876 877 878 879
/*
 * pblk recovery
 */
struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
880
int pblk_recov_pad(struct pblk *pblk);
881
int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
882 883 884 885

/*
 * pblk gc
 */
886
#define PBLK_GC_MAX_READERS 8	/* Max number of outstanding GC reader jobs */
887
#define PBLK_GC_RQ_QD 128	/* Queue depth for inflight GC requests */
888 889
#define PBLK_GC_L_QD 4		/* Queue depth for inflight GC lines */
#define PBLK_GC_RSV_LINE 1	/* Reserved lines for GC */
890 891

int pblk_gc_init(struct pblk *pblk);
892
void pblk_gc_exit(struct pblk *pblk, bool graceful);
893 894
void pblk_gc_should_start(struct pblk *pblk);
void pblk_gc_should_stop(struct pblk *pblk);
895
void pblk_gc_should_kick(struct pblk *pblk);
896
void pblk_gc_free_full_lines(struct pblk *pblk);
897 898
void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
			      int *gc_active);
899
int pblk_gc_sysfs_force(struct pblk *pblk, int force);
900 901 902 903 904 905

/*
 * pblk rate limiter
 */
void pblk_rl_init(struct pblk_rl *rl, int budget);
void pblk_rl_free(struct pblk_rl *rl);
906
void pblk_rl_update_rates(struct pblk_rl *rl);
907
int pblk_rl_high_thrs(struct pblk_rl *rl);
908
unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
909
unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
910
int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
911
void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
912 913 914 915
void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
916
int pblk_rl_max_io(struct pblk_rl *rl);
917
void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
918 919
void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
			    bool used);
920
int pblk_rl_is_limit(struct pblk_rl *rl);
921

922 923 924
void pblk_rl_werr_line_in(struct pblk_rl *rl);
void pblk_rl_werr_line_out(struct pblk_rl *rl);

925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
/*
 * pblk sysfs
 */
int pblk_sysfs_init(struct gendisk *tdisk);
void pblk_sysfs_exit(struct gendisk *tdisk);

static inline void *pblk_malloc(size_t size, int type, gfp_t flags)
{
	if (type == PBLK_KMALLOC_META)
		return kmalloc(size, flags);
	return vmalloc(size);
}

static inline void pblk_mfree(void *ptr, int type)
{
	if (type == PBLK_KMALLOC_META)
		kfree(ptr);
	else
		vfree(ptr);
}

static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
{
	return c_ctx - sizeof(struct nvm_rq);
}

951 952 953 954 955
static inline void *emeta_to_bb(struct line_emeta *emeta)
{
	return emeta->bb_bitmap;
}

956 957 958 959 960 961
static inline void *emeta_to_wa(struct pblk_line_meta *lm,
				struct line_emeta *emeta)
{
	return emeta->bb_bitmap + lm->blk_bitmap_len;
}

962 963 964 965 966 967
static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
{
	return ((void *)emeta + pblk->lm.emeta_len[1]);
}

static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
968
{
969
	return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
970 971
}

972 973
static inline int pblk_line_vsc(struct pblk_line *line)
{
974
	return le32_to_cpu(*line->vsc);
975 976
}

977 978 979 980 981
static inline int pblk_pad_distance(struct pblk *pblk)
{
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;

982
	return geo->mw_cunits * geo->all_luns * geo->ws_opt;
983 984
}

985
static inline int pblk_ppa_to_line(struct ppa_addr p)
986
{
987
	return p.a.blk;
988 989
}

990
static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
991
{
992
	return p.a.lun * geo->num_ch + p.a.ch;
993 994
}

995 996
static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
					      u64 line_id)
997
{
998 999
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;
1000 1001
	struct ppa_addr ppa;

1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
	if (geo->version == NVM_OCSSD_SPEC_12) {
		struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;

		ppa.ppa = 0;
		ppa.g.blk = line_id;
		ppa.g.pg = (paddr & ppaf->pg_mask) >> ppaf->pg_offset;
		ppa.g.lun = (paddr & ppaf->lun_mask) >> ppaf->lun_offset;
		ppa.g.ch = (paddr & ppaf->ch_mask) >> ppaf->ch_offset;
		ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset;
		ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sec_offset;
	} else {
		struct pblk_addrf *uaddrf = &pblk->uaddrf;
		int secs, chnls, luns;

		ppa.ppa = 0;

		ppa.m.chk = line_id;

		paddr = div_u64_rem(paddr, uaddrf->sec_stripe, &secs);
		ppa.m.sec = secs;

		paddr = div_u64_rem(paddr, uaddrf->ch_stripe, &chnls);
		ppa.m.grp = chnls;

		paddr = div_u64_rem(paddr, uaddrf->lun_stripe, &luns);
		ppa.m.pu = luns;

		ppa.m.sec += uaddrf->sec_stripe * paddr;
	}
1031 1032

	return ppa;
1033 1034
}

1035 1036
static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
							struct ppa_addr p)
1037
{
1038 1039
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;
1040 1041
	u64 paddr;

1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
	if (geo->version == NVM_OCSSD_SPEC_12) {
		struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;

		paddr = (u64)p.g.ch << ppaf->ch_offset;
		paddr |= (u64)p.g.lun << ppaf->lun_offset;
		paddr |= (u64)p.g.pg << ppaf->pg_offset;
		paddr |= (u64)p.g.pl << ppaf->pln_offset;
		paddr |= (u64)p.g.sec << ppaf->sec_offset;
	} else {
		struct pblk_addrf *uaddrf = &pblk->uaddrf;
		u64 secs = p.m.sec;
		int sec_stripe;

		paddr = (u64)p.m.grp * uaddrf->sec_stripe;
		paddr += (u64)p.m.pu * uaddrf->sec_lun_stripe;

		secs = div_u64_rem(secs, uaddrf->sec_stripe, &sec_stripe);
		paddr += secs * uaddrf->sec_ws_stripe;
		paddr += sec_stripe;
	}
1062 1063

	return paddr;
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
}

static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
{
	struct ppa_addr ppa64;

	ppa64.ppa = 0;

	if (ppa32 == -1) {
		ppa64.ppa = ADDR_EMPTY;
	} else if (ppa32 & (1U << 31)) {
		ppa64.c.line = ppa32 & ((~0U) >> 1);
		ppa64.c.is_cached = 1;
	} else {
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
		struct nvm_tgt_dev *dev = pblk->dev;
		struct nvm_geo *geo = &dev->geo;

		if (geo->version == NVM_OCSSD_SPEC_12) {
			struct nvm_addrf_12 *ppaf =
					(struct nvm_addrf_12 *)&pblk->addrf;

			ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
							ppaf->ch_offset;
			ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
							ppaf->lun_offset;
			ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
							ppaf->blk_offset;
			ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
							ppaf->pg_offset;
			ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
							ppaf->pln_offset;
			ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
							ppaf->sec_offset;
		} else {
			struct nvm_addrf *lbaf = &pblk->addrf;

			ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
							lbaf->ch_offset;
			ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
							lbaf->lun_offset;
			ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
							lbaf->chk_offset;
			ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
							lbaf->sec_offset;
		}
1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
	}

	return ppa64;
}

static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
{
	u32 ppa32 = 0;

	if (ppa64.ppa == ADDR_EMPTY) {
		ppa32 = ~0U;
	} else if (ppa64.c.is_cached) {
		ppa32 |= ppa64.c.line;
		ppa32 |= 1U << 31;
	} else {
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
		struct nvm_tgt_dev *dev = pblk->dev;
		struct nvm_geo *geo = &dev->geo;

		if (geo->version == NVM_OCSSD_SPEC_12) {
			struct nvm_addrf_12 *ppaf =
					(struct nvm_addrf_12 *)&pblk->addrf;

			ppa32 |= ppa64.g.ch << ppaf->ch_offset;
			ppa32 |= ppa64.g.lun << ppaf->lun_offset;
			ppa32 |= ppa64.g.blk << ppaf->blk_offset;
			ppa32 |= ppa64.g.pg << ppaf->pg_offset;
			ppa32 |= ppa64.g.pl << ppaf->pln_offset;
			ppa32 |= ppa64.g.sec << ppaf->sec_offset;
		} else {
			struct nvm_addrf *lbaf = &pblk->addrf;

			ppa32 |= ppa64.m.grp << lbaf->ch_offset;
			ppa32 |= ppa64.m.pu << lbaf->lun_offset;
			ppa32 |= ppa64.m.chk << lbaf->chk_offset;
			ppa32 |= ppa64.m.sec << lbaf->sec_offset;
		}
1145 1146 1147 1148 1149
	}

	return ppa32;
}

1150 1151
static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
								sector_t lba)
1152
{
1153 1154
	struct ppa_addr ppa;

1155
	if (pblk->addrf_len < 32) {
1156 1157
		u32 *map = (u32 *)pblk->trans_map;

1158
		ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
1159
	} else {
1160
		struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
1161

1162
		ppa = map[lba];
1163
	}
1164 1165

	return ppa;
1166 1167
}

1168 1169
static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
						struct ppa_addr ppa)
1170
{
1171
	if (pblk->addrf_len < 32) {
1172
		u32 *map = (u32 *)pblk->trans_map;
1173

1174 1175 1176
		map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
	} else {
		u64 *map = (u64 *)pblk->trans_map;
1177

1178 1179
		map[lba] = ppa.ppa;
	}
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
}

static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
{
	return (ppa_addr.ppa == ADDR_EMPTY);
}

static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
{
	ppa_addr->ppa = ADDR_EMPTY;
}

1192 1193
static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
{
1194
	return (lppa.ppa == rppa.ppa);
1195 1196
}

1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
static inline int pblk_addr_in_cache(struct ppa_addr ppa)
{
	return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
}

static inline int pblk_addr_to_cacheline(struct ppa_addr ppa)
{
	return ppa.c.line;
}

static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
{
	struct ppa_addr p;

	p.c.line = addr;
	p.c.is_cached = 1;

	return p;
}

static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
1218
					    struct line_header *header)
1219 1220 1221
{
	u32 crc = ~(u32)0;

1222
	crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
				sizeof(struct line_header) - sizeof(crc));

	return crc;
}

static inline u32 pblk_calc_smeta_crc(struct pblk *pblk,
				      struct line_smeta *smeta)
{
	struct pblk_line_meta *lm = &pblk->lm;
	u32 crc = ~(u32)0;

	crc = crc32_le(crc, (unsigned char *)smeta +
				sizeof(struct line_header) + sizeof(crc),
				lm->smeta_len -
				sizeof(struct line_header) - sizeof(crc));

	return crc;
}

static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
				      struct line_emeta *emeta)
{
	struct pblk_line_meta *lm = &pblk->lm;
	u32 crc = ~(u32)0;

	crc = crc32_le(crc, (unsigned char *)emeta +
				sizeof(struct line_header) + sizeof(crc),
1250
				lm->emeta_len[0] -
1251 1252 1253 1254 1255
				sizeof(struct line_header) - sizeof(crc));

	return crc;
}

1256
static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
1257
{
1258
	return !(nr_secs % pblk->min_write_pgs);
1259 1260
}

1261
#ifdef CONFIG_NVM_PBLK_DEBUG
1262
static inline void print_ppa(struct pblk *pblk, struct ppa_addr *p,
1263
			     char *msg, int error)
1264
{
1265 1266
	struct nvm_geo *geo = &pblk->dev->geo;

1267
	if (p->c.is_cached) {
1268
		pblk_err(pblk, "ppa: (%s: %x) cache line: %llu\n",
1269
				msg, error, (u64)p->c.line);
1270
	} else if (geo->version == NVM_OCSSD_SPEC_12) {
1271
		pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
1272 1273 1274
			msg, error,
			p->g.ch, p->g.lun, p->g.blk,
			p->g.pg, p->g.pl, p->g.sec);
1275
	} else {
1276
		pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n",
1277 1278
			msg, error,
			p->m.grp, p->m.pu, p->m.chk, p->m.sec);
1279 1280 1281 1282 1283 1284 1285 1286 1287
	}
}

static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
					 int error)
{
	int bit = -1;

	if (rqd->nr_ppas ==  1) {
1288
		print_ppa(pblk, &rqd->ppa_addr, "rqd", error);
1289 1290 1291 1292 1293
		return;
	}

	while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
						bit + 1)) < rqd->nr_ppas) {
1294
		print_ppa(pblk, &rqd->ppa_list[bit], "rqd", error);
1295 1296
	}

1297
	pblk_err(pblk, "error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
}

static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
				       struct ppa_addr *ppas, int nr_ppas)
{
	struct nvm_geo *geo = &tgt_dev->geo;
	struct ppa_addr *ppa;
	int i;

	for (i = 0; i < nr_ppas; i++) {
		ppa = &ppas[i];

1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
		if (geo->version == NVM_OCSSD_SPEC_12) {
			if (!ppa->c.is_cached &&
					ppa->g.ch < geo->num_ch &&
					ppa->g.lun < geo->num_lun &&
					ppa->g.pl < geo->num_pln &&
					ppa->g.blk < geo->num_chk &&
					ppa->g.pg < geo->num_pg &&
					ppa->g.sec < geo->ws_min)
				continue;
		} else {
			if (!ppa->c.is_cached &&
					ppa->m.grp < geo->num_ch &&
					ppa->m.pu < geo->num_lun &&
					ppa->m.chk < geo->num_chk &&
					ppa->m.sec < geo->clba)
				continue;
		}

1328
		print_ppa(tgt_dev->q->queuedata, ppa, "boundary", i);
1329

1330 1331 1332 1333 1334
		return 1;
	}
	return 0;
}

1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
{
	struct nvm_tgt_dev *dev = pblk->dev;
	struct ppa_addr *ppa_list;

	ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;

	if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
		WARN_ON(1);
		return -EINVAL;
	}

	if (rqd->opcode == NVM_OP_PWRITE) {
		struct pblk_line *line;
		struct ppa_addr ppa;
		int i;

		for (i = 0; i < rqd->nr_ppas; i++) {
			ppa = ppa_list[i];
1354
			line = &pblk->lines[pblk_ppa_to_line(ppa)];
1355 1356 1357

			spin_lock(&line->lock);
			if (line->state != PBLK_LINESTATE_OPEN) {
1358
				pblk_err(pblk, "bad ppa: line:%d,state:%d\n",
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371
							line->id, line->state);
				WARN_ON(1);
				spin_unlock(&line->lock);
				return -EINVAL;
			}
			spin_unlock(&line->lock);
		}
	}

	return 0;
}
#endif

1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
{
	struct pblk_line_meta *lm = &pblk->lm;

	if (paddr > lm->sec_per_line)
		return 1;

	return 0;
}

static inline unsigned int pblk_get_bi_idx(struct bio *bio)
{
	return bio->bi_iter.bi_idx;
}

static inline sector_t pblk_get_lba(struct bio *bio)
{
	return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
}

static inline unsigned int pblk_get_secs(struct bio *bio)
{
	return  bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
}

static inline void pblk_setup_uuid(struct pblk *pblk)
{
	uuid_le uuid;

	uuid_le_gen(&uuid);
	memcpy(pblk->instance_uuid, uuid.b, 16);
}
#endif /* PBLK_H_ */