mmc_ops.c 24.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
 *  linux/drivers/mmc/core/mmc_ops.h
4 5 6 7
 *
 *  Copyright 2006-2007 Pierre Ossman
 */

8
#include <linux/slab.h>
9
#include <linux/export.h>
10 11 12 13 14 15 16 17
#include <linux/types.h>
#include <linux/scatterlist.h>

#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>

#include "core.h"
18
#include "card.h"
19
#include "host.h"
20 21
#include "mmc_ops.h"

22 23
#define MMC_BKOPS_TIMEOUT_MS		(120 * 1000) /* 120s */
#define MMC_CACHE_FLUSH_TIMEOUT_MS	(30 * 1000) /* 30s */
24
#define MMC_SANITIZE_TIMEOUT_MS		(240 * 1000) /* 240s */
25

26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
static const u8 tuning_blk_pattern_4bit[] = {
	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
};

static const u8 tuning_blk_pattern_8bit[] = {
	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
};

56
int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
57 58
{
	int err;
59
	struct mmc_command cmd = {};
60 61 62 63 64 65

	cmd.opcode = MMC_SEND_STATUS;
	if (!mmc_host_is_spi(card->host))
		cmd.arg = card->rca << 16;
	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;

66
	err = mmc_wait_for_cmd(card->host, &cmd, retries);
67 68 69 70 71 72 73 74 75 76 77
	if (err)
		return err;

	/* NOTE: callers are required to understand the difference
	 * between "native" and SPI format status words!
	 */
	if (status)
		*status = cmd.resp[0];

	return 0;
}
78 79 80 81 82 83
EXPORT_SYMBOL_GPL(__mmc_send_status);

int mmc_send_status(struct mmc_card *card, u32 *status)
{
	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
}
84
EXPORT_SYMBOL_GPL(mmc_send_status);
85

86 87
static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
{
88
	struct mmc_command cmd = {};
89 90 91 92 93 94 95 96 97 98 99

	cmd.opcode = MMC_SELECT_CARD;

	if (card) {
		cmd.arg = card->rca << 16;
		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
	} else {
		cmd.arg = 0;
		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
	}

100
	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
101 102 103 104 105 106 107 108 109 110 111 112 113
}

int mmc_select_card(struct mmc_card *card)
{

	return _mmc_select_card(card->host, card);
}

int mmc_deselect_cards(struct mmc_host *host)
{
	return _mmc_select_card(host, NULL);
}

114 115 116 117 118 119 120 121 122 123
/*
 * Write the value specified in the device tree or board code into the optional
 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
 * drive strength of the DAT and CMD outputs. The actual meaning of a given
 * value is hardware dependant.
 * The presence of the DSR register can be determined from the CSD register,
 * bit 76.
 */
int mmc_set_dsr(struct mmc_host *host)
{
124
	struct mmc_command cmd = {};
125 126 127 128 129 130 131 132 133

	cmd.opcode = MMC_SET_DSR;

	cmd.arg = (host->dsr << 16) | 0xffff;
	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;

	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
}

134 135 136
int mmc_go_idle(struct mmc_host *host)
{
	int err;
137
	struct mmc_command cmd = {};
138

David Brownell's avatar
David Brownell committed
139 140 141 142 143 144
	/*
	 * Non-SPI hosts need to prevent chipselect going active during
	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
	 *
	 * SPI hosts ignore ios.chip_select; it's managed according to
Lucas De Marchi's avatar
Lucas De Marchi committed
145
	 * rules that must accommodate non-MMC slaves which this layer
David Brownell's avatar
David Brownell committed
146 147 148 149 150 151
	 * won't even know about.
	 */
	if (!mmc_host_is_spi(host)) {
		mmc_set_chip_select(host, MMC_CS_HIGH);
		mmc_delay(1);
	}
152 153 154

	cmd.opcode = MMC_GO_IDLE_STATE;
	cmd.arg = 0;
David Brownell's avatar
David Brownell committed
155
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
156 157 158 159 160

	err = mmc_wait_for_cmd(host, &cmd, 0);

	mmc_delay(1);

David Brownell's avatar
David Brownell committed
161 162 163 164
	if (!mmc_host_is_spi(host)) {
		mmc_set_chip_select(host, MMC_CS_DONTCARE);
		mmc_delay(1);
	}
165

David Brownell's avatar
David Brownell committed
166
	host->use_spi_crc = 0;
167 168 169 170 171 172

	return err;
}

int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
173
	struct mmc_command cmd = {};
174 175 176
	int i, err = 0;

	cmd.opcode = MMC_SEND_OP_COND;
David Brownell's avatar
David Brownell committed
177 178
	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
179 180 181

	for (i = 100; i; i--) {
		err = mmc_wait_for_cmd(host, &cmd, 0);
182
		if (err)
183 184
			break;

185
		/* wait until reset completes */
David Brownell's avatar
David Brownell committed
186 187 188 189 190 191 192 193
		if (mmc_host_is_spi(host)) {
			if (!(cmd.resp[0] & R1_SPI_IDLE))
				break;
		} else {
			if (cmd.resp[0] & MMC_CARD_BUSY)
				break;
		}

194
		err = -ETIMEDOUT;
195 196

		mmc_delay(10);
197 198 199 200 201 202 203 204 205 206

		/*
		 * According to eMMC specification v5.1 section 6.4.3, we
		 * should issue CMD1 repeatedly in the idle state until
		 * the eMMC is ready. Otherwise some eMMC devices seem to enter
		 * the inactive mode after mmc_init_card() issued CMD0 when
		 * the eMMC device is busy.
		 */
		if (!ocr && !mmc_host_is_spi(host))
			cmd.arg = cmd.resp[0] | BIT(30);
207 208
	}

David Brownell's avatar
David Brownell committed
209
	if (rocr && !mmc_host_is_spi(host))
210 211 212 213 214 215 216
		*rocr = cmd.resp[0];

	return err;
}

int mmc_set_relative_addr(struct mmc_card *card)
{
217
	struct mmc_command cmd = {};
218 219 220 221 222

	cmd.opcode = MMC_SET_RELATIVE_ADDR;
	cmd.arg = card->rca << 16;
	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;

223
	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
224 225
}

David Brownell's avatar
David Brownell committed
226 227
static int
mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
228 229
{
	int err;
230
	struct mmc_command cmd = {};
231

David Brownell's avatar
David Brownell committed
232 233
	cmd.opcode = opcode;
	cmd.arg = arg;
234 235
	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;

David Brownell's avatar
David Brownell committed
236
	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
237
	if (err)
238 239
		return err;

David Brownell's avatar
David Brownell committed
240
	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
241

242
	return 0;
243 244
}

245 246 247 248
/*
 * NOTE: void *buf, caller for the buf is required to use DMA-capable
 * buffer or on-stack buffer (with some overhead in callee).
 */
David Brownell's avatar
David Brownell committed
249 250 251
static int
mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
		u32 opcode, void *buf, unsigned len)
252
{
253 254 255
	struct mmc_request mrq = {};
	struct mmc_command cmd = {};
	struct mmc_data data = {};
256 257 258 259 260
	struct scatterlist sg;

	mrq.cmd = &cmd;
	mrq.data = &data;

David Brownell's avatar
David Brownell committed
261
	cmd.opcode = opcode;
262 263
	cmd.arg = 0;

David Brownell's avatar
David Brownell committed
264 265 266 267 268 269 270 271
	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
	 * rely on callers to never use this with "native" calls for reading
	 * CSD or CID.  Native versions of those commands use the R2 type,
	 * not R1 plus a data block.
	 */
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

	data.blksz = len;
272 273 274 275 276
	data.blocks = 1;
	data.flags = MMC_DATA_READ;
	data.sg = &sg;
	data.sg_len = 1;

277
	sg_init_one(&sg, buf, len);
278

279 280 281 282 283 284 285 286 287
	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
		/*
		 * The spec states that CSR and CID accesses have a timeout
		 * of 64 clock cycles.
		 */
		data.timeout_ns = 0;
		data.timeout_clks = 64;
	} else
		mmc_set_data_timeout(&data, card);
288

David Brownell's avatar
David Brownell committed
289 290
	mmc_wait_for_req(host, &mrq);

291
	if (cmd.error)
292
		return cmd.error;
293
	if (data.error)
294 295
		return data.error;

296
	return 0;
297 298
}

299
static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
David Brownell's avatar
David Brownell committed
300
{
301
	int ret, i;
302
	__be32 *csd_tmp;
303

304
	csd_tmp = kzalloc(16, GFP_KERNEL);
305 306 307 308
	if (!csd_tmp)
		return -ENOMEM;

	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
309
	if (ret)
310
		goto err;
311

312
	for (i = 0; i < 4; i++)
313
		csd[i] = be32_to_cpu(csd_tmp[i]);
314

315 316 317
err:
	kfree(csd_tmp);
	return ret;
David Brownell's avatar
David Brownell committed
318 319
}

320 321 322 323 324 325 326 327 328
int mmc_send_csd(struct mmc_card *card, u32 *csd)
{
	if (mmc_host_is_spi(card->host))
		return mmc_spi_send_csd(card, csd);

	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
				MMC_SEND_CSD);
}

329
static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
David Brownell's avatar
David Brownell committed
330
{
331
	int ret, i;
332
	__be32 *cid_tmp;
333

334
	cid_tmp = kzalloc(16, GFP_KERNEL);
335 336 337 338
	if (!cid_tmp)
		return -ENOMEM;

	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
339
	if (ret)
340
		goto err;
341

342
	for (i = 0; i < 4; i++)
343
		cid[i] = be32_to_cpu(cid_tmp[i]);
344

345 346 347
err:
	kfree(cid_tmp);
	return ret;
David Brownell's avatar
David Brownell committed
348 349
}

350 351 352 353 354
int mmc_send_cid(struct mmc_host *host, u32 *cid)
{
	if (mmc_host_is_spi(host))
		return mmc_spi_send_cid(host, cid);

355
	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
356 357
}

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
{
	int err;
	u8 *ext_csd;

	if (!card || !new_ext_csd)
		return -EINVAL;

	if (!mmc_can_ext_csd(card))
		return -EOPNOTSUPP;

	/*
	 * As the ext_csd is so large and mostly unused, we don't store the
	 * raw block in mmc_card.
	 */
373
	ext_csd = kzalloc(512, GFP_KERNEL);
374 375 376
	if (!ext_csd)
		return -ENOMEM;

377 378
	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
				512);
379 380 381 382 383 384 385 386 387
	if (err)
		kfree(ext_csd);
	else
		*new_ext_csd = ext_csd;

	return err;
}
EXPORT_SYMBOL_GPL(mmc_get_ext_csd);

David Brownell's avatar
David Brownell committed
388 389
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
{
390
	struct mmc_command cmd = {};
David Brownell's avatar
David Brownell committed
391 392 393 394 395 396 397 398 399 400 401 402 403 404
	int err;

	cmd.opcode = MMC_SPI_READ_OCR;
	cmd.arg = highcap ? (1 << 30) : 0;
	cmd.flags = MMC_RSP_SPI_R3;

	err = mmc_wait_for_cmd(host, &cmd, 0);

	*ocrp = cmd.resp[1];
	return err;
}

int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
{
405
	struct mmc_command cmd = {};
David Brownell's avatar
David Brownell committed
406 407 408 409 410 411 412 413 414 415 416 417
	int err;

	cmd.opcode = MMC_SPI_CRC_ON_OFF;
	cmd.flags = MMC_RSP_SPI_R1;
	cmd.arg = use_crc;

	err = mmc_wait_for_cmd(host, &cmd, 0);
	if (!err)
		host->use_spi_crc = use_crc;
	return err;
}

418
static int mmc_switch_status_error(struct mmc_host *host, u32 status)
419 420 421 422 423
{
	if (mmc_host_is_spi(host)) {
		if (status & R1_SPI_ILLEGAL_COMMAND)
			return -EBADMSG;
	} else {
424
		if (R1_STATUS(status))
425 426 427 428 429 430 431 432
			pr_warn("%s: unexpected status %#x after switch\n",
				mmc_hostname(host), status);
		if (status & R1_SWITCH_ERROR)
			return -EBADMSG;
	}
	return 0;
}

433
/* Caller must hold re-tuning */
434
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
435 436 437 438 439
{
	u32 status;
	int err;

	err = mmc_send_status(card, &status);
440 441
	if (!crc_err_fatal && err == -EILSEQ)
		return 0;
442 443 444 445 446 447
	if (err)
		return err;

	return mmc_switch_status_error(card->host, status);
}

448
static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
449
			   enum mmc_busy_cmd busy_cmd, bool *busy)
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
{
	struct mmc_host *host = card->host;
	u32 status = 0;
	int err;

	if (host->ops->card_busy) {
		*busy = host->ops->card_busy(host);
		return 0;
	}

	err = mmc_send_status(card, &status);
	if (retry_crc_err && err == -EILSEQ) {
		*busy = true;
		return 0;
	}
	if (err)
		return err;

468 469 470 471 472 473 474
	switch (busy_cmd) {
	case MMC_BUSY_CMD6:
		err = mmc_switch_status_error(card->host, status);
		break;
	case MMC_BUSY_ERASE:
		err = R1_STATUS(status) ? -EIO : 0;
		break;
475 476
	case MMC_BUSY_HPI:
		break;
477 478 479 480
	default:
		err = -EINVAL;
	}

481 482 483
	if (err)
		return err;

484
	*busy = !mmc_ready_for_data(status);
485 486 487
	return 0;
}

488 489 490
static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
			       bool send_status, bool retry_crc_err,
			       enum mmc_busy_cmd busy_cmd)
491 492 493 494
{
	struct mmc_host *host = card->host;
	int err;
	unsigned long timeout;
495
	unsigned int udelay = 32, udelay_max = 32768;
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
	bool expired = false;
	bool busy = false;

	/*
	 * In cases when not allowed to poll by using CMD13 or because we aren't
	 * capable of polling by using ->card_busy(), then rely on waiting the
	 * stated timeout to be sufficient.
	 */
	if (!send_status && !host->ops->card_busy) {
		mmc_delay(timeout_ms);
		return 0;
	}

	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
	do {
		/*
512 513
		 * Due to the possibility of being preempted while polling,
		 * check the expiration time first.
514 515
		 */
		expired = time_after(jiffies, timeout);
516

517
		err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy);
518 519
		if (err)
			return err;
520

521 522 523
		/* Timeout if the device still remains busy. */
		if (expired && busy) {
			pr_err("%s: Card stuck being busy! %s\n",
524 525 526
				mmc_hostname(host), __func__);
			return -ETIMEDOUT;
		}
527 528 529 530 531 532 533

		/* Throttle the polling rate to avoid hogging the CPU. */
		if (busy) {
			usleep_range(udelay, udelay * 2);
			if (udelay < udelay_max)
				udelay *= 2;
		}
534
	} while (busy);
535

536
	return 0;
537 538
}

539 540 541 542 543 544
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
		      enum mmc_busy_cmd busy_cmd)
{
	return __mmc_poll_for_busy(card, timeout_ms, true, false, busy_cmd);
}

545
/**
546
 *	__mmc_switch - modify EXT_CSD register
547 548 549 550 551 552
 *	@card: the MMC card associated with the data transfer
 *	@set: cmd set values
 *	@index: EXT_CSD register index
 *	@value: value to program into EXT_CSD register
 *	@timeout_ms: timeout (ms) for operation performed by register write,
 *                   timeout of zero implies maximum possible timeout
553
 *	@timing: new timing to change to
554
 *	@send_status: send status cmd to poll for busy
555
 *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
556 557 558
 *
 *	Modifies the EXT_CSD register for selected card.
 */
559
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
560
		unsigned int timeout_ms, unsigned char timing,
561
		bool send_status, bool retry_crc_err)
562
{
563
	struct mmc_host *host = card->host;
564
	int err;
565
	struct mmc_command cmd = {};
566
	bool use_r1b_resp = true;
567
	unsigned char old_timing = host->ios.timing;
568

569 570
	mmc_retune_hold(host);

571 572 573 574 575 576
	if (!timeout_ms) {
		pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
			mmc_hostname(host));
		timeout_ms = card->ext_csd.generic_cmd6_time;
	}

577
	/*
578 579 580
	 * If the max_busy_timeout of the host is specified, make sure it's
	 * enough to fit the used timeout_ms. In case it's not, let's instruct
	 * the host to avoid HW busy detection, by converting to a R1 response
581 582
	 * instead of a R1B. Note, some hosts requires R1B, which also means
	 * they are on their own when it comes to deal with the busy timeout.
583
	 */
584 585
	if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
	    (timeout_ms > host->max_busy_timeout))
586
		use_r1b_resp = false;
587 588 589 590 591 592

	cmd.opcode = MMC_SWITCH;
	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
		  (index << 16) |
		  (value << 8) |
		  set;
593
	cmd.flags = MMC_CMD_AC;
594
	if (use_r1b_resp) {
595
		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
596 597
		cmd.busy_timeout = timeout_ms;
	} else {
598
		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
599
	}
600

601
	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
602
	if (err)
603
		goto out;
604

605 606
	/*If SPI or used HW busy detection above, then we don't need to poll. */
	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
607
		mmc_host_is_spi(host))
608
		goto out_tim;
609

610
	/* Let's try to poll to find out when the command is completed. */
611 612
	err = __mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err,
				  MMC_BUSY_CMD6);
613 614
	if (err)
		goto out;
615 616

out_tim:
617 618 619 620 621
	/* Switch to new timing before check switch status. */
	if (timing)
		mmc_set_timing(host, timing);

	if (send_status) {
622
		err = mmc_switch_status(card, true);
623 624 625
		if (err && timing)
			mmc_set_timing(host, old_timing);
	}
626 627
out:
	mmc_retune_release(host);
628

629
	return err;
630
}
631 632 633 634

int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
		unsigned int timeout_ms)
{
635
	return __mmc_switch(card, set, index, value, timeout_ms, 0,
636
			    true, false);
637
}
638
EXPORT_SYMBOL_GPL(mmc_switch);
639

640
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
641
{
642 643 644
	struct mmc_request mrq = {};
	struct mmc_command cmd = {};
	struct mmc_data data = {};
645
	struct scatterlist sg;
646
	struct mmc_ios *ios = &host->ios;
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
	const u8 *tuning_block_pattern;
	int size, err = 0;
	u8 *data_buf;

	if (ios->bus_width == MMC_BUS_WIDTH_8) {
		tuning_block_pattern = tuning_blk_pattern_8bit;
		size = sizeof(tuning_blk_pattern_8bit);
	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
		tuning_block_pattern = tuning_blk_pattern_4bit;
		size = sizeof(tuning_blk_pattern_4bit);
	} else
		return -EINVAL;

	data_buf = kzalloc(size, GFP_KERNEL);
	if (!data_buf)
		return -ENOMEM;

	mrq.cmd = &cmd;
	mrq.data = &data;

	cmd.opcode = opcode;
	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;

	data.blksz = size;
	data.blocks = 1;
	data.flags = MMC_DATA_READ;

	/*
	 * According to the tuning specs, Tuning process
	 * is normally shorter 40 executions of CMD19,
	 * and timeout value should be shorter than 150 ms
	 */
	data.timeout_ns = 150 * NSEC_PER_MSEC;

	data.sg = &sg;
	data.sg_len = 1;
	sg_init_one(&sg, data_buf, size);

685
	mmc_wait_for_req(host, &mrq);
686

687 688 689
	if (cmd_error)
		*cmd_error = cmd.error;

690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
	if (cmd.error) {
		err = cmd.error;
		goto out;
	}

	if (data.error) {
		err = data.error;
		goto out;
	}

	if (memcmp(data_buf, tuning_block_pattern, size))
		err = -EIO;

out:
	kfree(data_buf);
	return err;
}
EXPORT_SYMBOL_GPL(mmc_send_tuning);

709 710
int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
{
711
	struct mmc_command cmd = {};
712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733

	/*
	 * eMMC specification specifies that CMD12 can be used to stop a tuning
	 * command, but SD specification does not, so do nothing unless it is
	 * eMMC.
	 */
	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
		return 0;

	cmd.opcode = MMC_STOP_TRANSMISSION;
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;

	/*
	 * For drivers that override R1 to R1b, set an arbitrary timeout based
	 * on the tuning timeout i.e. 150ms.
	 */
	cmd.busy_timeout = 150;

	return mmc_wait_for_cmd(host, &cmd, 0);
}
EXPORT_SYMBOL_GPL(mmc_abort_tuning);

734 735 736 737
static int
mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
		  u8 len)
{
738 739 740
	struct mmc_request mrq = {};
	struct mmc_command cmd = {};
	struct mmc_data data = {};
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
	struct scatterlist sg;
	u8 *data_buf;
	u8 *test_buf;
	int i, err;
	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };

	/* dma onto stack is unsafe/nonportable, but callers to this
	 * routine normally provide temporary on-stack buffers ...
	 */
	data_buf = kmalloc(len, GFP_KERNEL);
	if (!data_buf)
		return -ENOMEM;

	if (len == 8)
		test_buf = testdata_8bit;
	else if (len == 4)
		test_buf = testdata_4bit;
	else {
760
		pr_err("%s: Invalid bus_width %d\n",
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
		       mmc_hostname(host), len);
		kfree(data_buf);
		return -EINVAL;
	}

	if (opcode == MMC_BUS_TEST_W)
		memcpy(data_buf, test_buf, len);

	mrq.cmd = &cmd;
	mrq.data = &data;
	cmd.opcode = opcode;
	cmd.arg = 0;

	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
	 * rely on callers to never use this with "native" calls for reading
	 * CSD or CID.  Native versions of those commands use the R2 type,
	 * not R1 plus a data block.
	 */
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

	data.blksz = len;
	data.blocks = 1;
	if (opcode == MMC_BUS_TEST_R)
		data.flags = MMC_DATA_READ;
	else
		data.flags = MMC_DATA_WRITE;

	data.sg = &sg;
	data.sg_len = 1;
790
	mmc_set_data_timeout(&data, card);
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812
	sg_init_one(&sg, data_buf, len);
	mmc_wait_for_req(host, &mrq);
	err = 0;
	if (opcode == MMC_BUS_TEST_R) {
		for (i = 0; i < len / 4; i++)
			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
				err = -EIO;
				break;
			}
	}
	kfree(data_buf);

	if (cmd.error)
		return cmd.error;
	if (data.error)
		return data.error;

	return err;
}

int mmc_bus_test(struct mmc_card *card, u8 bus_width)
{
813
	int width;
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828

	if (bus_width == MMC_BUS_WIDTH_8)
		width = 8;
	else if (bus_width == MMC_BUS_WIDTH_4)
		width = 4;
	else if (bus_width == MMC_BUS_WIDTH_1)
		return 0; /* no need for test */
	else
		return -EINVAL;

	/*
	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
	 * is a problem.  This improves chances that the test will work.
	 */
	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
829
	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
830
}
831

832
static int mmc_send_hpi_cmd(struct mmc_card *card)
833
{
834
	unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
835 836
	struct mmc_host *host = card->host;
	bool use_r1b_resp = true;
837
	struct mmc_command cmd = {};
838 839
	int err;

840 841 842 843 844 845 846 847 848 849 850 851
	cmd.opcode = card->ext_csd.hpi_cmd;
	cmd.arg = card->rca << 16 | 1;

	/*
	 * Make sure the host's max_busy_timeout fit the needed timeout for HPI.
	 * In case it doesn't, let's instruct the host to avoid HW busy
	 * detection, by using a R1 response instead of R1B.
	 */
	if (host->max_busy_timeout && busy_timeout_ms > host->max_busy_timeout)
		use_r1b_resp = false;

	if (cmd.opcode == MMC_STOP_TRANSMISSION && use_r1b_resp) {
852
		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
853 854
		cmd.busy_timeout = busy_timeout_ms;
	} else {
855
		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
856 857
		use_r1b_resp = false;
	}
858

859
	err = mmc_wait_for_cmd(host, &cmd, 0);
860
	if (err) {
861 862
		pr_warn("%s: HPI error %d. Command response %#x\n",
			mmc_hostname(host), err, cmd.resp[0]);
863 864 865
		return err;
	}

866 867 868 869
	/* No need to poll when using HW busy detection. */
	if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
		return 0;

870 871
	/* Let's poll to find out when the HPI request completes. */
	return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI);
872
}
873

874 875 876 877 878 879 880
/**
 *	mmc_interrupt_hpi - Issue for High priority Interrupt
 *	@card: the MMC card associated with the HPI transfer
 *
 *	Issued High Priority Interrupt, and check for card status
 *	until out-of prg-state.
 */
881
static int mmc_interrupt_hpi(struct mmc_card *card)
882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
{
	int err;
	u32 status;

	if (!card->ext_csd.hpi_en) {
		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
		return 1;
	}

	err = mmc_send_status(card, &status);
	if (err) {
		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
		goto out;
	}

	switch (R1_CURRENT_STATE(status)) {
	case R1_STATE_IDLE:
	case R1_STATE_READY:
	case R1_STATE_STBY:
	case R1_STATE_TRAN:
		/*
		 * In idle and transfer states, HPI is not needed and the caller
		 * can issue the next intended command immediately
		 */
		goto out;
	case R1_STATE_PRG:
		break;
	default:
		/* In all other states, it's illegal to issue HPI */
		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
			mmc_hostname(card->host), R1_CURRENT_STATE(status));
		err = -EINVAL;
		goto out;
	}

917
	err = mmc_send_hpi_cmd(card);
918 919 920 921
out:
	return err;
}

922 923 924 925
int mmc_can_ext_csd(struct mmc_card *card)
{
	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
}
926

927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
static int mmc_read_bkops_status(struct mmc_card *card)
{
	int err;
	u8 *ext_csd;

	err = mmc_get_ext_csd(card, &ext_csd);
	if (err)
		return err;

	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
	kfree(ext_csd);
	return 0;
}

/**
943 944
 *	mmc_run_bkops - Run BKOPS for supported cards
 *	@card: MMC card to run BKOPS for
945
 *
946 947
 *	Run background operations synchronously for cards having manual BKOPS
 *	enabled and in case it reports urgent BKOPS level.
948
*/
949
void mmc_run_bkops(struct mmc_card *card)
950 951 952
{
	int err;

953
	if (!card->ext_csd.man_bkops_en)
954 955 956 957 958 959 960 961 962
		return;

	err = mmc_read_bkops_status(card);
	if (err) {
		pr_err("%s: Failed to read bkops status: %d\n",
		       mmc_hostname(card->host), err);
		return;
	}

963 964
	if (!card->ext_csd.raw_bkops_status ||
	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
965 966 967 968
		return;

	mmc_retune_hold(card->host);

969 970 971 972 973 974
	/*
	 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
	 * synchronously. Future wise, we may consider to start BKOPS, for less
	 * urgent levels by using an asynchronous background task, when idle.
	 */
	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
975
			 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
976
	if (err)
977 978 979
		pr_warn("%s: Error %d starting bkops\n",
			mmc_hostname(card->host), err);

980
	mmc_retune_release(card->host);
981
}
982
EXPORT_SYMBOL(mmc_run_bkops);
983

984 985 986 987 988 989 990 991 992 993 994
/*
 * Flush the cache to the non-volatile storage.
 */
int mmc_flush_cache(struct mmc_card *card)
{
	int err = 0;

	if (mmc_card_mmc(card) &&
			(card->ext_csd.cache_size > 0) &&
			(card->ext_csd.cache_ctrl & 1)) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
995 996
				 EXT_CSD_FLUSH_CACHE, 1,
				 MMC_CACHE_FLUSH_TIMEOUT_MS);
997 998 999 1000 1001 1002 1003 1004 1005
		if (err)
			pr_err("%s: cache flush error %d\n",
					mmc_hostname(card->host), err);
	}

	return err;
}
EXPORT_SYMBOL(mmc_flush_cache);

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
{
	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
	int err;

	if (!card->ext_csd.cmdq_support)
		return -EOPNOTSUPP;

	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
			 val, card->ext_csd.generic_cmd6_time);
	if (!err)
		card->ext_csd.cmdq_en = enable;

	return err;
}

int mmc_cmdq_enable(struct mmc_card *card)
{
	return mmc_cmdq_switch(card, true);
}
EXPORT_SYMBOL_GPL(mmc_cmdq_enable);

int mmc_cmdq_disable(struct mmc_card *card)
{
	return mmc_cmdq_switch(card, false);
}
EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066

int mmc_sanitize(struct mmc_card *card)
{
	struct mmc_host *host = card->host;
	int err;

	if (!mmc_can_sanitize(card)) {
		pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
		return -EOPNOTSUPP;
	}

	pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));

	mmc_retune_hold(host);

	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
			 1, MMC_SANITIZE_TIMEOUT_MS);
	if (err)
		pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);

	/*
	 * If the sanitize operation timed out, the card is probably still busy
	 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
	 * it with a HPI command to get back into R1_STATE_TRAN.
	 */
	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
		pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));

	mmc_retune_release(host);

	pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
	return err;
}
EXPORT_SYMBOL_GPL(mmc_sanitize);