pci.c 158 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Linus Torvalds's avatar
Linus Torvalds committed
2
/*
Bjorn Helgaas's avatar
Bjorn Helgaas committed
3
 * PCI Bus Services, see include/linux/pci.h for further explanation.
Linus Torvalds's avatar
Linus Torvalds committed
4
 *
Bjorn Helgaas's avatar
Bjorn Helgaas committed
5 6
 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
 * David Mosberger-Tang
Linus Torvalds's avatar
Linus Torvalds committed
7
 *
Bjorn Helgaas's avatar
Bjorn Helgaas committed
8
 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
Linus Torvalds's avatar
Linus Torvalds committed
9 10
 */

11
#include <linux/acpi.h>
Linus Torvalds's avatar
Linus Torvalds committed
12 13
#include <linux/kernel.h>
#include <linux/delay.h>
14
#include <linux/dmi.h>
Linus Torvalds's avatar
Linus Torvalds committed
15
#include <linux/init.h>
16 17
#include <linux/of.h>
#include <linux/of_pci.h>
Linus Torvalds's avatar
Linus Torvalds committed
18
#include <linux/pci.h>
19
#include <linux/pm.h>
20
#include <linux/slab.h>
Linus Torvalds's avatar
Linus Torvalds committed
21 22
#include <linux/module.h>
#include <linux/spinlock.h>
Tim Schmielau's avatar
Tim Schmielau committed
23
#include <linux/string.h>
24
#include <linux/log2.h>
25
#include <linux/logic_pio.h>
26
#include <linux/pci-aspm.h>
27
#include <linux/pm_wakeup.h>
28
#include <linux/interrupt.h>
29
#include <linux/device.h>
30
#include <linux/pm_runtime.h>
31
#include <linux/pci_hotplug.h>
32
#include <linux/vmalloc.h>
33
#include <linux/pci-ats.h>
34
#include <asm/setup.h>
35
#include <asm/dma.h>
36
#include <linux/aer.h>
37
#include "pci.h"
Linus Torvalds's avatar
Linus Torvalds committed
38

39 40 41 42 43
const char *pci_power_names[] = {
	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
};
EXPORT_SYMBOL_GPL(pci_power_names);

44 45 46 47 48 49
int isa_dma_bridge_buggy;
EXPORT_SYMBOL(isa_dma_bridge_buggy);

int pci_pci_problems;
EXPORT_SYMBOL(pci_pci_problems);

50 51
unsigned int pci_pm_d3_delay;

52 53 54 55 56 57 58 59 60 61 62 63 64
static void pci_pme_list_scan(struct work_struct *work);

static LIST_HEAD(pci_pme_list);
static DEFINE_MUTEX(pci_pme_list_mutex);
static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);

struct pci_pme_device {
	struct list_head list;
	struct pci_dev *dev;
};

#define PME_TIMEOUT 1000 /* How long between PME checks */

65 66 67 68 69 70 71
static void pci_dev_d3_sleep(struct pci_dev *dev)
{
	unsigned int delay = dev->d3_delay;

	if (delay < pci_pm_d3_delay)
		delay = pci_pm_d3_delay;

72 73
	if (delay)
		msleep(delay);
74
}
Linus Torvalds's avatar
Linus Torvalds committed
75

76 77 78 79
#ifdef CONFIG_PCI_DOMAINS
int pci_domains_supported = 1;
#endif

80 81 82 83 84 85
#define DEFAULT_CARDBUS_IO_SIZE		(256)
#define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
/* pci=cbmemsize=nnM,cbiosize=nn can override this */
unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;

86 87 88 89 90 91
#define DEFAULT_HOTPLUG_IO_SIZE		(256)
#define DEFAULT_HOTPLUG_MEM_SIZE	(2*1024*1024)
/* pci=hpmemsize=nnM,hpiosize=nn can override this */
unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;

92 93 94
#define DEFAULT_HOTPLUG_BUS_SIZE	1
unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;

95
enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
96

97 98 99 100 101 102
/*
 * The default CLS is used if arch didn't set CLS explicitly and not
 * all pci devices agree on the same value.  Arch can override either
 * the dfl or actual value as it sees fit.  Don't forget this is
 * measured in 32-bit words, not bytes.
 */
Bill Pemberton's avatar
Bill Pemberton committed
103
u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
104 105
u8 pci_cache_line_size;

106 107 108 109 110 111
/*
 * If we set up a device for bus mastering, we need to check the latency
 * timer as certain BIOSes forget to set it properly.
 */
unsigned int pcibios_max_latency = 255;

112 113 114
/* If set, the PCIe ARI capability will not be used. */
static bool pcie_ari_disabled;

115 116 117 118 119 120 121 122
/* If set, the PCIe ATS capability will not be used. */
static bool pcie_ats_disabled;

bool pci_ats_disabled(void)
{
	return pcie_ats_disabled;
}

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
/* Disable bridge_d3 for all PCIe ports */
static bool pci_bridge_d3_disable;
/* Force bridge_d3 for all PCIe ports */
static bool pci_bridge_d3_force;

static int __init pcie_port_pm_setup(char *str)
{
	if (!strcmp(str, "off"))
		pci_bridge_d3_disable = true;
	else if (!strcmp(str, "force"))
		pci_bridge_d3_force = true;
	return 1;
}
__setup("pcie_port_pm=", pcie_port_pm_setup);

138 139 140
/* Time to wait after a reset for device to become responsive */
#define PCIE_RESET_READY_POLL_MS 60000

Linus Torvalds's avatar
Linus Torvalds committed
141 142 143 144 145 146 147
/**
 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
 * @bus: pointer to PCI bus structure to search
 *
 * Given a PCI bus, returns the highest PCI bus number present in the set
 * including the given PCI bus and its list of child PCI buses.
 */
148
unsigned char pci_bus_max_busnr(struct pci_bus *bus)
Linus Torvalds's avatar
Linus Torvalds committed
149
{
150
	struct pci_bus *tmp;
Linus Torvalds's avatar
Linus Torvalds committed
151 152
	unsigned char max, n;

153
	max = bus->busn_res.end;
154 155
	list_for_each_entry(tmp, &bus->children, node) {
		n = pci_bus_max_busnr(tmp);
Ryan Desfosses's avatar
Ryan Desfosses committed
156
		if (n > max)
Linus Torvalds's avatar
Linus Torvalds committed
157 158 159 160
			max = n;
	}
	return max;
}
161
EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
Linus Torvalds's avatar
Linus Torvalds committed
162

163 164 165
#ifdef CONFIG_HAS_IOMEM
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
{
166 167
	struct resource *res = &pdev->resource[bar];

168 169 170
	/*
	 * Make sure the BAR is actually a memory resource, not an IO resource
	 */
171
	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
172
		pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
173 174
		return NULL;
	}
175
	return ioremap_nocache(res->start, resource_size(res));
176 177
}
EXPORT_SYMBOL_GPL(pci_ioremap_bar);
178 179 180 181 182 183 184 185 186 187 188 189 190 191

void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
{
	/*
	 * Make sure the BAR is actually a memory resource, not an IO resource
	 */
	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
		WARN_ON(1);
		return NULL;
	}
	return ioremap_wc(pci_resource_start(pdev, bar),
			  pci_resource_len(pdev, bar));
}
EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
192 193
#endif

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
/**
 * pci_dev_str_match_path - test if a path string matches a device
 * @dev:    the PCI device to test
 * @p:      string to match the device against
 * @endptr: pointer to the string after the match
 *
 * Test if a string (typically from a kernel parameter) formatted as a
 * path of device/function addresses matches a PCI device. The string must
 * be of the form:
 *
 *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
 *
 * A path for a device can be obtained using 'lspci -t'.  Using a path
 * is more robust against bus renumbering than using only a single bus,
 * device and function address.
 *
 * Returns 1 if the string matches the device, 0 if it does not and
 * a negative error code if it fails to parse the string.
 */
static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
				  const char **endptr)
{
	int ret;
	int seg, bus, slot, func;
	char *wpath, *p;
	char end;

	*endptr = strchrnul(path, ';');

	wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
	if (!wpath)
		return -ENOMEM;

	while (1) {
		p = strrchr(wpath, '/');
		if (!p)
			break;
		ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
		if (ret != 2) {
			ret = -EINVAL;
			goto free_and_exit;
		}

		if (dev->devfn != PCI_DEVFN(slot, func)) {
			ret = 0;
			goto free_and_exit;
		}

		/*
		 * Note: we don't need to get a reference to the upstream
		 * bridge because we hold a reference to the top level
		 * device which should hold a reference to the bridge,
		 * and so on.
		 */
		dev = pci_upstream_bridge(dev);
		if (!dev) {
			ret = 0;
			goto free_and_exit;
		}

		*p = 0;
	}

	ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
		     &func, &end);
	if (ret != 4) {
		seg = 0;
		ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
		if (ret != 3) {
			ret = -EINVAL;
			goto free_and_exit;
		}
	}

	ret = (seg == pci_domain_nr(dev->bus) &&
	       bus == dev->bus->number &&
	       dev->devfn == PCI_DEVFN(slot, func));

free_and_exit:
	kfree(wpath);
	return ret;
}

277 278 279 280 281 282 283 284 285
/**
 * pci_dev_str_match - test if a string matches a device
 * @dev:    the PCI device to test
 * @p:      string to match the device against
 * @endptr: pointer to the string after the match
 *
 * Test if a string (typically from a kernel parameter) matches a specified
 * PCI device. The string may be of one of the following formats:
 *
286
 *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
287 288 289 290 291
 *   pci:<vendor>:<device>[:<subvendor>:<subdevice>]
 *
 * The first format specifies a PCI bus/device/function address which
 * may change if new hardware is inserted, if motherboard firmware changes,
 * or due to changes caused in kernel parameters. If the domain is
292 293 294 295
 * left unspecified, it is taken to be 0.  In order to be robust against
 * bus renumbering issues, a path of PCI device/function numbers may be used
 * to address the specific device.  The path for a device can be determined
 * through the use of 'lspci -t'.
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
 *
 * The second format matches devices using IDs in the configuration
 * space which may match multiple devices in the system. A value of 0
 * for any field will match all devices. (Note: this differs from
 * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
 * legacy reasons and convenience so users don't have to specify
 * FFFFFFFFs on the command line.)
 *
 * Returns 1 if the string matches the device, 0 if it does not and
 * a negative error code if the string cannot be parsed.
 */
static int pci_dev_str_match(struct pci_dev *dev, const char *p,
			     const char **endptr)
{
	int ret;
311
	int count;
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
	unsigned short vendor, device, subsystem_vendor, subsystem_device;

	if (strncmp(p, "pci:", 4) == 0) {
		/* PCI vendor/device (subvendor/subdevice) IDs are specified */
		p += 4;
		ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
			     &subsystem_vendor, &subsystem_device, &count);
		if (ret != 4) {
			ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
			if (ret != 2)
				return -EINVAL;

			subsystem_vendor = 0;
			subsystem_device = 0;
		}

		p += count;

		if ((!vendor || vendor == dev->vendor) &&
		    (!device || device == dev->device) &&
		    (!subsystem_vendor ||
			    subsystem_vendor == dev->subsystem_vendor) &&
		    (!subsystem_device ||
			    subsystem_device == dev->subsystem_device))
			goto found;
	} else {
338 339 340 341 342 343 344 345
		/*
		 * PCI Bus, Device, Function IDs are specified
		 *  (optionally, may include a path of devfns following it)
		 */
		ret = pci_dev_str_match_path(dev, p, &p);
		if (ret < 0)
			return ret;
		else if (ret)
346 347 348 349 350 351 352 353 354 355
			goto found;
	}

	*endptr = p;
	return 0;

found:
	*endptr = p;
	return 1;
}
356 357 358

static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
				   u8 pos, int cap, int *ttl)
359 360
{
	u8 id;
361 362 363
	u16 ent;

	pci_bus_read_config_byte(bus, devfn, pos, &pos);
364

365
	while ((*ttl)--) {
366 367 368
		if (pos < 0x40)
			break;
		pos &= ~3;
369 370 371
		pci_bus_read_config_word(bus, devfn, pos, &ent);

		id = ent & 0xff;
372 373 374 375
		if (id == 0xff)
			break;
		if (id == cap)
			return pos;
376
		pos = (ent >> 8);
377 378 379 380
	}
	return 0;
}

381 382 383 384 385 386 387 388
static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
			       u8 pos, int cap)
{
	int ttl = PCI_FIND_CAP_TTL;

	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
}

389 390 391 392 393 394 395
int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
{
	return __pci_find_next_cap(dev->bus, dev->devfn,
				   pos + PCI_CAP_LIST_NEXT, cap);
}
EXPORT_SYMBOL_GPL(pci_find_next_capability);

396 397
static int __pci_bus_find_cap_start(struct pci_bus *bus,
				    unsigned int devfn, u8 hdr_type)
Linus Torvalds's avatar
Linus Torvalds committed
398 399 400 401 402 403 404 405 406 407
{
	u16 status;

	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
	if (!(status & PCI_STATUS_CAP_LIST))
		return 0;

	switch (hdr_type) {
	case PCI_HEADER_TYPE_NORMAL:
	case PCI_HEADER_TYPE_BRIDGE:
408
		return PCI_CAPABILITY_LIST;
Linus Torvalds's avatar
Linus Torvalds committed
409
	case PCI_HEADER_TYPE_CARDBUS:
410
		return PCI_CB_CAPABILITY_LIST;
Linus Torvalds's avatar
Linus Torvalds committed
411
	}
412 413

	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
414 415 416
}

/**
417
 * pci_find_capability - query for devices' capabilities
Linus Torvalds's avatar
Linus Torvalds committed
418 419 420 421 422 423 424 425
 * @dev: PCI device to query
 * @cap: capability code
 *
 * Tell if a device supports a given PCI capability.
 * Returns the address of the requested capability structure within the
 * device's PCI configuration space or 0 in case the device does not
 * support it.  Possible values for @cap:
 *
426 427 428 429
 *  %PCI_CAP_ID_PM           Power Management
 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
 *  %PCI_CAP_ID_VPD          Vital Product Data
 *  %PCI_CAP_ID_SLOTID       Slot Identification
Linus Torvalds's avatar
Linus Torvalds committed
430
 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
431
 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
Linus Torvalds's avatar
Linus Torvalds committed
432 433 434 435 436
 *  %PCI_CAP_ID_PCIX         PCI-X
 *  %PCI_CAP_ID_EXP          PCI Express
 */
int pci_find_capability(struct pci_dev *dev, int cap)
{
437 438 439 440 441 442 443
	int pos;

	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
	if (pos)
		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);

	return pos;
Linus Torvalds's avatar
Linus Torvalds committed
444
}
445
EXPORT_SYMBOL(pci_find_capability);
Linus Torvalds's avatar
Linus Torvalds committed
446 447

/**
448
 * pci_bus_find_capability - query for devices' capabilities
Linus Torvalds's avatar
Linus Torvalds committed
449 450 451 452 453
 * @bus:   the PCI bus to query
 * @devfn: PCI device to query
 * @cap:   capability code
 *
 * Like pci_find_capability() but works for pci devices that do not have a
454
 * pci_dev structure set up yet.
Linus Torvalds's avatar
Linus Torvalds committed
455 456 457 458 459 460 461
 *
 * Returns the address of the requested capability structure within the
 * device's PCI configuration space or 0 in case the device does not
 * support it.
 */
int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
{
462
	int pos;
Linus Torvalds's avatar
Linus Torvalds committed
463 464 465 466
	u8 hdr_type;

	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);

467 468 469 470 471
	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
	if (pos)
		pos = __pci_find_next_cap(bus, devfn, pos, cap);

	return pos;
Linus Torvalds's avatar
Linus Torvalds committed
472
}
473
EXPORT_SYMBOL(pci_bus_find_capability);
Linus Torvalds's avatar
Linus Torvalds committed
474 475

/**
476
 * pci_find_next_ext_capability - Find an extended capability
Linus Torvalds's avatar
Linus Torvalds committed
477
 * @dev: PCI device to query
478
 * @start: address at which to start looking (0 to start at beginning of list)
Linus Torvalds's avatar
Linus Torvalds committed
479 480
 * @cap: capability code
 *
481
 * Returns the address of the next matching extended capability structure
Linus Torvalds's avatar
Linus Torvalds committed
482
 * within the device's PCI configuration space or 0 if the device does
483 484
 * not support it.  Some capabilities can occur several times, e.g., the
 * vendor-specific capability, and this provides a way to find them all.
Linus Torvalds's avatar
Linus Torvalds committed
485
 */
486
int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
Linus Torvalds's avatar
Linus Torvalds committed
487 488
{
	u32 header;
489 490
	int ttl;
	int pos = PCI_CFG_SPACE_SIZE;
Linus Torvalds's avatar
Linus Torvalds committed
491

492 493 494 495
	/* minimum 8 bytes per capability */
	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;

	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
Linus Torvalds's avatar
Linus Torvalds committed
496 497
		return 0;

498 499 500
	if (start)
		pos = start;

Linus Torvalds's avatar
Linus Torvalds committed
501 502 503 504 505 506 507 508 509 510 511
	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
		return 0;

	/*
	 * If we have no capabilities, this is indicated by cap ID,
	 * cap version and next pointer all being 0.
	 */
	if (header == 0)
		return 0;

	while (ttl-- > 0) {
512
		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
Linus Torvalds's avatar
Linus Torvalds committed
513 514 515
			return pos;

		pos = PCI_EXT_CAP_NEXT(header);
516
		if (pos < PCI_CFG_SPACE_SIZE)
Linus Torvalds's avatar
Linus Torvalds committed
517 518 519 520 521 522 523 524
			break;

		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
			break;
	}

	return 0;
}
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);

/**
 * pci_find_ext_capability - Find an extended capability
 * @dev: PCI device to query
 * @cap: capability code
 *
 * Returns the address of the requested extended capability structure
 * within the device's PCI configuration space or 0 if the device does
 * not support it.  Possible values for @cap:
 *
 *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
 *  %PCI_EXT_CAP_ID_VC		Virtual Channel
 *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
 *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
 */
int pci_find_ext_capability(struct pci_dev *dev, int cap)
{
	return pci_find_next_ext_capability(dev, 0, cap);
}
545
EXPORT_SYMBOL_GPL(pci_find_ext_capability);
Linus Torvalds's avatar
Linus Torvalds committed
546

547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
{
	int rc, ttl = PCI_FIND_CAP_TTL;
	u8 cap, mask;

	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
		mask = HT_3BIT_CAP_MASK;
	else
		mask = HT_5BIT_CAP_MASK;

	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
				      PCI_CAP_ID_HT, &ttl);
	while (pos) {
		rc = pci_read_config_byte(dev, pos + 3, &cap);
		if (rc != PCIBIOS_SUCCESSFUL)
			return 0;

		if ((cap & mask) == ht_cap)
			return pos;

567 568
		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
					      pos + PCI_CAP_LIST_NEXT,
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
					      PCI_CAP_ID_HT, &ttl);
	}

	return 0;
}
/**
 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
 * @dev: PCI device to query
 * @pos: Position from which to continue searching
 * @ht_cap: Hypertransport capability code
 *
 * To be used in conjunction with pci_find_ht_capability() to search for
 * all capabilities matching @ht_cap. @pos should always be a value returned
 * from pci_find_ht_capability().
 *
 * NB. To be 100% safe against broken PCI devices, the caller should take
 * steps to avoid an infinite loop.
 */
int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
{
	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
}
EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);

/**
 * pci_find_ht_capability - query a device's Hypertransport capabilities
 * @dev: PCI device to query
 * @ht_cap: Hypertransport capability code
 *
 * Tell if a device supports a given Hypertransport capability.
 * Returns an address within the device's PCI configuration space
 * or 0 in case the device does not support the request capability.
 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
 * which has a Hypertransport capability matching @ht_cap.
 */
int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
{
	int pos;

	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
	if (pos)
		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);

	return pos;
}
EXPORT_SYMBOL_GPL(pci_find_ht_capability);

Linus Torvalds's avatar
Linus Torvalds committed
616 617 618 619 620 621
/**
 * pci_find_parent_resource - return resource region of parent bus of given region
 * @dev: PCI device structure contains resources to be searched
 * @res: child resource record for which parent is sought
 *
 *  For given resource region of given device, return the resource
622
 *  region of parent bus the given region is contained in.
Linus Torvalds's avatar
Linus Torvalds committed
623
 */
Ryan Desfosses's avatar
Ryan Desfosses committed
624 625
struct resource *pci_find_parent_resource(const struct pci_dev *dev,
					  struct resource *res)
Linus Torvalds's avatar
Linus Torvalds committed
626 627
{
	const struct pci_bus *bus = dev->bus;
628
	struct resource *r;
Linus Torvalds's avatar
Linus Torvalds committed
629 630
	int i;

631
	pci_bus_for_each_resource(bus, r, i) {
Linus Torvalds's avatar
Linus Torvalds committed
632 633
		if (!r)
			continue;
634
		if (resource_contains(r, res)) {
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653

			/*
			 * If the window is prefetchable but the BAR is
			 * not, the allocator made a mistake.
			 */
			if (r->flags & IORESOURCE_PREFETCH &&
			    !(res->flags & IORESOURCE_PREFETCH))
				return NULL;

			/*
			 * If we're below a transparent bridge, there may
			 * be both a positively-decoded aperture and a
			 * subtractively-decoded region that contain the BAR.
			 * We want the positively-decoded one, so this depends
			 * on pci_bus_for_each_resource() giving us those
			 * first.
			 */
			return r;
		}
Linus Torvalds's avatar
Linus Torvalds committed
654
	}
655
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
656
}
657
EXPORT_SYMBOL(pci_find_parent_resource);
Linus Torvalds's avatar
Linus Torvalds committed
658

659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
/**
 * pci_find_resource - Return matching PCI device resource
 * @dev: PCI device to query
 * @res: Resource to look for
 *
 * Goes over standard PCI resources (BARs) and checks if the given resource
 * is partially or fully contained in any of them. In that case the
 * matching resource is returned, %NULL otherwise.
 */
struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
{
	int i;

	for (i = 0; i < PCI_ROM_RESOURCE; i++) {
		struct resource *r = &dev->resource[i];

		if (r->start && resource_contains(r, res))
			return r;
	}

	return NULL;
}
EXPORT_SYMBOL(pci_find_resource);

683 684 685 686 687 688 689 690 691
/**
 * pci_find_pcie_root_port - return PCIe Root Port
 * @dev: PCI device to query
 *
 * Traverse up the parent chain and return the PCIe Root Port PCI Device
 * for a given PCI Device.
 */
struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
{
692
	struct pci_dev *bridge, *highest_pcie_bridge = dev;
693 694 695 696 697 698 699

	bridge = pci_upstream_bridge(dev);
	while (bridge && pci_is_pcie(bridge)) {
		highest_pcie_bridge = bridge;
		bridge = pci_upstream_bridge(bridge);
	}

700 701
	if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
		return NULL;
702

703
	return highest_pcie_bridge;
704 705 706
}
EXPORT_SYMBOL(pci_find_pcie_root_port);

707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
/**
 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
 * @dev: the PCI device to operate on
 * @pos: config space offset of status word
 * @mask: mask of bit(s) to care about in status word
 *
 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
 */
int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
{
	int i;

	/* Wait for Transaction Pending bit clean */
	for (i = 0; i < 4; i++) {
		u16 status;
		if (i)
			msleep((1 << (i - 1)) * 100);

		pci_read_config_word(dev, pos, &status);
		if (!(status & mask))
			return 1;
	}

	return 0;
}

733
/**
734
 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
735 736 737 738 739
 * @dev: PCI device to have its BARs restored
 *
 * Restore the BAR values for a given device, so as to make it
 * accessible by its driver.
 */
Ryan Desfosses's avatar
Ryan Desfosses committed
740
static void pci_restore_bars(struct pci_dev *dev)
741
{
742
	int i;
743

744
	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
745
		pci_update_resource(dev, i);
746 747
}

748
static const struct pci_platform_pm_ops *pci_platform_pm;
749

750
int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
751
{
752
	if (!ops->is_manageable || !ops->set_state  || !ops->get_state ||
753
	    !ops->choose_state  || !ops->set_wakeup || !ops->need_resume)
754 755 756 757 758 759 760 761 762 763 764
		return -EINVAL;
	pci_platform_pm = ops;
	return 0;
}

static inline bool platform_pci_power_manageable(struct pci_dev *dev)
{
	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
}

static inline int platform_pci_set_power_state(struct pci_dev *dev,
Ryan Desfosses's avatar
Ryan Desfosses committed
765
					       pci_power_t t)
766 767 768 769
{
	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
}

770 771 772 773 774
static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
{
	return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
}

775 776 777 778 779
static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
{
	return pci_platform_pm ?
			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
}
Randy Dunlap's avatar
Randy Dunlap committed
780

781
static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
782 783
{
	return pci_platform_pm ?
784
			pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
785 786
}

787 788 789 790 791
static inline bool platform_pci_need_resume(struct pci_dev *dev)
{
	return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
}

Linus Torvalds's avatar
Linus Torvalds committed
792
/**
793 794 795 796
 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
 *                           given PCI device
 * @dev: PCI device to handle.
 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
Linus Torvalds's avatar
Linus Torvalds committed
797
 *
798 799 800 801 802 803
 * RETURN VALUE:
 * -EINVAL if the requested state is invalid.
 * -EIO if device does not support PCI PM or its PM capabilities register has a
 * wrong version, or device doesn't support the requested state.
 * 0 if device already is in the requested state.
 * 0 if device's power state has been successfully changed.
Linus Torvalds's avatar
Linus Torvalds committed
804
 */
805
static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
Linus Torvalds's avatar
Linus Torvalds committed
806
{
807
	u16 pmcsr;
808
	bool need_restore = false;
Linus Torvalds's avatar
Linus Torvalds committed
809

810 811 812 813
	/* Check if we're already there */
	if (dev->current_state == state)
		return 0;

814
	if (!dev->pm_cap)
815 816
		return -EIO;

817 818 819
	if (state < PCI_D0 || state > PCI_D3hot)
		return -EINVAL;

Linus Torvalds's avatar
Linus Torvalds committed
820
	/* Validate current state:
821
	 * Can enter D0 from any state, but if we can only go deeper
Linus Torvalds's avatar
Linus Torvalds committed
822 823
	 * to sleep if we're already in a low power state
	 */
824
	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
825
	    && dev->current_state > state) {
826
		pci_err(dev, "invalid power transition (from state %d to %d)\n",
827
			dev->current_state, state);
Linus Torvalds's avatar
Linus Torvalds committed
828
		return -EINVAL;
829
	}
Linus Torvalds's avatar
Linus Torvalds committed
830 831

	/* check if this device supports the desired state */
832 833
	if ((state == PCI_D1 && !dev->d1_support)
	   || (state == PCI_D2 && !dev->d2_support))
834
		return -EIO;
Linus Torvalds's avatar
Linus Torvalds committed
835

836
	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
837

838
	/* If we're (effectively) in D3, force entire word to 0.
Linus Torvalds's avatar
Linus Torvalds committed
839 840 841
	 * This doesn't affect PME_Status, disables PME_En, and
	 * sets PowerState to 0.
	 */
842
	switch (dev->current_state) {
843 844 845 846 847 848
	case PCI_D0:
	case PCI_D1:
	case PCI_D2:
		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
		pmcsr |= state;
		break;
849 850
	case PCI_D3hot:
	case PCI_D3cold:
851 852
	case PCI_UNKNOWN: /* Boot-up */
		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
853
		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
854
			need_restore = true;
855 856
		/* Fall-through: force to D0 */
	default:
857
		pmcsr = 0;
858
		break;
Linus Torvalds's avatar
Linus Torvalds committed
859 860 861
	}

	/* enter specified state */
862
	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
Linus Torvalds's avatar
Linus Torvalds committed
863 864 865 866

	/* Mandatory power management transition delays */
	/* see PCI PM 1.1 5.6.1 table 18 */
	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
867
		pci_dev_d3_sleep(dev);
Linus Torvalds's avatar
Linus Torvalds committed
868
	else if (state == PCI_D2 || dev->current_state == PCI_D2)
869
		udelay(PCI_PM_D2_DELAY);
Linus Torvalds's avatar
Linus Torvalds committed
870

871 872 873
	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
	if (dev->current_state != state && printk_ratelimit())
874
		pci_info(dev, "Refused to change power state, currently in D%d\n",
875
			 dev->current_state);
876

877 878
	/*
	 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
879 880 881 882 883 884 885 886 887 888 889 890 891 892
	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
	 * from D3hot to D0 _may_ perform an internal reset, thereby
	 * going to "D0 Uninitialized" rather than "D0 Initialized".
	 * For example, at least some versions of the 3c905B and the
	 * 3c556B exhibit this behaviour.
	 *
	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
	 * devices in a D3hot state at boot.  Consequently, we need to
	 * restore at least the BARs so that the device will be
	 * accessible to its driver.
	 */
	if (need_restore)
		pci_restore_bars(dev);

893
	if (dev->bus->self)
894 895
		pcie_aspm_pm_state_change(dev->bus->self);

Linus Torvalds's avatar
Linus Torvalds committed
896 897 898
	return 0;
}

899
/**
900
 * pci_update_current_state - Read power state of given device and cache it
901
 * @dev: PCI device to handle.
902
 * @state: State to cache in case the device doesn't have the PM capability
903 904 905 906 907 908 909
 *
 * The power state is read from the PMCSR register, which however is
 * inaccessible in D3cold.  The platform firmware is therefore queried first
 * to detect accessibility of the register.  In case the platform firmware
 * reports an incorrect state or the device isn't power manageable by the
 * platform at all, we try to detect D3cold by testing accessibility of the
 * vendor ID in config space.
910
 */
911
void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
912
{
913 914 915 916
	if (platform_pci_get_power_state(dev) == PCI_D3cold ||
	    !pci_device_is_present(dev)) {
		dev->current_state = PCI_D3cold;
	} else if (dev->pm_cap) {
917 918
		u16 pmcsr;

919
		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
920
		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
921 922
	} else {
		dev->current_state = state;
923 924 925
	}
}

926 927 928 929 930 931 932 933 934 935 936 937 938
/**
 * pci_power_up - Put the given device into D0 forcibly
 * @dev: PCI device to power up
 */
void pci_power_up(struct pci_dev *dev)
{
	if (platform_pci_power_manageable(dev))
		platform_pci_set_power_state(dev, PCI_D0);

	pci_raw_set_power_state(dev, PCI_D0);
	pci_update_current_state(dev, PCI_D0);
}

939 940 941 942 943 944 945 946 947 948 949 950 951
/**
 * pci_platform_power_transition - Use platform to change device power state
 * @dev: PCI device to handle.
 * @state: State to put the device into.
 */
static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
{
	int error;

	if (platform_pci_power_manageable(dev)) {
		error = platform_pci_set_power_state(dev, state);
		if (!error)
			pci_update_current_state(dev, state);
952
	} else
953
		error = -ENODEV;
954 955 956

	if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
		dev->current_state = PCI_D0;
957 958 959 960

	return error;
}

961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976
/**
 * pci_wakeup - Wake up a PCI device
 * @pci_dev: Device to handle.
 * @ign: ignored parameter
 */
static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
{
	pci_wakeup_event(pci_dev);
	pm_request_resume(&pci_dev->dev);
	return 0;
}

/**
 * pci_wakeup_bus - Walk given bus and wake up devices on it
 * @bus: Top bus of the subtree to walk.
 */
977
void pci_wakeup_bus(struct pci_bus *bus)
978 979 980 981 982
{
	if (bus)
		pci_walk_bus(bus, pci_wakeup, NULL);
}

983 984 985 986 987 988 989
/**
 * __pci_start_power_transition - Start power transition of a PCI device
 * @dev: PCI device to handle.
 * @state: State to put the device into.
 */
static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
{
990
	if (state == PCI_D0) {
991
		pci_platform_power_transition(dev, PCI_D0);
992 993 994 995 996 997 998 999
		/*
		 * Mandatory power management transition delays, see
		 * PCI Express Base Specification Revision 2.0 Section
		 * 6.6.1: Conventional Reset.  Do not delay for
		 * devices powered on/off by corresponding bridge,
		 * because have already delayed for the bridge.
		 */
		if (dev->runtime_d3cold) {
1000 1001
			if (dev->d3cold_delay)
				msleep(dev->d3cold_delay);
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
			/*
			 * When powering on a bridge from D3cold, the
			 * whole hierarchy may be powered on into
			 * D0uninitialized state, resume them to give
			 * them a chance to suspend again
			 */
			pci_wakeup_bus(dev->subordinate);
		}
	}
}

/**
 * __pci_dev_set_current_state - Set current state of a PCI device
 * @dev: Device to handle
 * @data: pointer to state to be set
 */
static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
{
	pci_power_t state = *(pci_power_t *)data;

	dev->current_state = state;
	return 0;
}

/**
1027
 * pci_bus_set_current_state - Walk given bus and set current state of devices
1028 1029 1030
 * @bus: Top bus of the subtree to walk.
 * @state: state to be set
 */
1031
void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1032 1033 1034
{
	if (bus)
		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
}

/**
 * __pci_complete_power_transition - Complete power transition of a PCI device
 * @dev: PCI device to handle.
 * @state: State to put the device into.
 *
 * This function should not be called directly by device drivers.
 */
int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
{
1046 1047
	int ret;

1048
	if (state <= PCI_D0)
1049 1050 1051 1052
		return -EINVAL;
	ret = pci_platform_power_transition(dev, state);
	/* Power off the bridge may power off the whole hierarchy */
	if (!ret && state == PCI_D3cold)
1053
		pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1054
	return ret;
1055 1056 1057
}
EXPORT_SYMBOL_GPL(__pci_complete_power_transition);

1058 1059 1060 1061 1062
/**
 * pci_set_power_state - Set the power state of a PCI device
 * @dev: PCI device to handle.
 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 *
1063
 * Transition a device to a new power state, using the platform firmware and/or
1064 1065 1066 1067 1068 1069
 * the device's PCI PM registers.
 *
 * RETURN VALUE:
 * -EINVAL if the requested state is invalid.
 * -EIO if device does not support PCI PM or its PM capabilities register has a
 * wrong version, or device doesn't support the requested state.
1070
 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1071
 * 0 if device already is in the requested state.
1072
 * 0 if the transition is to D3 but D3 is not supported.
1073 1074 1075 1076
 * 0 if device's power state has been successfully changed.
 */
int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
1077
	int error;
1078 1079

	/* bound the state we're entering */
1080 1081
	if (state > PCI_D3cold)
		state = PCI_D3cold;
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
	else if (state < PCI_D0)
		state = PCI_D0;
	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
		/*
		 * If the device or the parent bridge do not support PCI PM,
		 * ignore the request if we're doing anything other than putting
		 * it into D0 (which would only happen on boot).
		 */
		return 0;

1092 1093 1094 1095
	/* Check if we're already there */
	if (dev->current_state == state)
		return 0;

1096 1097
	__pci_start_power_transition(dev, state);

1098 1099
	/* This device is quirked not to be put into D3, so
	   don't put it in D3 */
1100
	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1101
		return 0;
1102

1103 1104 1105 1106 1107 1108
	/*
	 * To put device in D3cold, we put device into D3hot in native
	 * way, then put device into D3cold with platform ops
	 */
	error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
					PCI_D3hot : state);
1109

1110 1111
	if (!__pci_complete_power_transition(dev, state))
		error = 0;
1112 1113 1114

	return error;
}
1115
EXPORT_SYMBOL(pci_set_power_state);
1116

Linus Torvalds's avatar
Linus Torvalds committed
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
/**
 * pci_choose_state - Choose the power state of a PCI device
 * @dev: PCI device to be suspended
 * @state: target sleep state for the whole system. This is the value
 *	that is passed to suspend() function.
 *
 * Returns PCI power state suitable for given device and given system
 * message.
 */

pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
{
1129
	pci_power_t ret;
1130

1131
	if (!dev->pm_cap)
Linus Torvalds's avatar
Linus Torvalds committed
1132 1133
		return PCI_D0;

1134 1135 1136
	ret = platform_pci_choose_state(dev);
	if (ret != PCI_POWER_ERROR)
		return ret;
1137 1138 1139 1140 1141

	switch (state.event) {
	case PM_EVENT_ON:
		return PCI_D0;
	case PM_EVENT_FREEZE:
1142 1143
	case PM_EVENT_PRETHAW:
		/* REVISIT both freeze and pre-thaw "should" use D0 */
1144
	case PM_EVENT_SUSPEND:
1145
	case PM_EVENT_HIBERNATE:
1146
		return PCI_D3hot;
Linus Torvalds's avatar
Linus Torvalds committed
1147
	default:
1148
		pci_info(dev, "unrecognized suspend event %d\n",
1149
			 state.event);
Linus Torvalds's avatar
Linus Torvalds committed
1150 1151 1152 1153 1154 1155
		BUG();
	}
	return PCI_D0;
}
EXPORT_SYMBOL(pci_choose_state);

1156 1157
#define PCI_EXP_SAVE_REGS	7

1158 1159
static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
						       u16 cap, bool extended)
1160 1161 1162
{
	struct pci_cap_saved_state *tmp;

1163
	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1164
		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1165 1166 1167 1168 1169
			return tmp;
	}
	return NULL;
}

1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
{
	return _pci_find_saved_cap(dev, cap, false);
}

struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
{
	return _pci_find_saved_cap(dev, cap, true);
}

1180 1181
static int pci_save_pcie_state(struct pci_dev *dev)
{
1182
	int i = 0;
1183 1184 1185
	struct pci_cap_saved_state *save_state;
	u16 *cap;

1186
	if (!pci_is_pcie(dev))
1187 1188
		return 0;

1189
	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1190
	if (!save_state) {
1191
		pci_err(dev, "buffer not found in %s\n", __func__);
1192 1193
		return -ENOMEM;
	}
1194

1195 1196 1197 1198 1199 1200 1201 1202
	cap = (u16 *)&save_state->cap.data[0];
	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1203

1204 1205 1206 1207 1208
	return 0;
}

static void pci_restore_pcie_state(struct pci_dev *dev)
{
1209
	int i = 0;
1210 1211 1212 1213
	struct pci_cap_saved_state *save_state;
	u16 *cap;

	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1214
	if (!save_state)
1215 1216
		return;

1217 1218 1219 1220 1221 1222 1223 1224
	cap = (u16 *)&save_state->cap.data[0];
	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1225 1226
}

1227 1228 1229

static int pci_save_pcix_state(struct pci_dev *dev)
{
1230
	int pos;
1231 1232 1233
	struct pci_cap_saved_state *save_state;

	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1234
	if (!pos)
1235 1236
		return 0;

1237
	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1238
	if (!save_state) {
1239
		pci_err(dev, "buffer not found in %s\n", __func__);
1240 1241 1242
		return -ENOMEM;
	}

1243 1244
	pci_read_config_word(dev, pos + PCI_X_CMD,
			     (u16 *)save_state->cap.data);
1245

1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
	return 0;
}

static void pci_restore_pcix_state(struct pci_dev *dev)
{
	int i = 0, pos;
	struct pci_cap_saved_state *save_state;
	u16 *cap;

	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1257
	if (!save_state || !pos)
1258
		return;
1259
	cap = (u16 *)&save_state->cap.data[0];
1260 1261 1262 1263 1264

	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
}


Linus Torvalds's avatar
Linus Torvalds committed
1265 1266 1267 1268
/**
 * pci_save_state - save the PCI configuration space of a device before suspending
 * @dev: - PCI device that we're dealing with
 */
Ryan Desfosses's avatar
Ryan Desfosses committed
1269
int pci_save_state(struct pci_dev *dev)
Linus Torvalds's avatar
Linus Torvalds committed
1270 1271 1272 1273
{
	int i;
	/* XXX: 100% dword access ok here? */
	for (i = 0; i < 16; i++)
1274
		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1275
	dev->state_saved = true;
1276 1277 1278

	i = pci_save_pcie_state(dev);
	if (i != 0)
1279
		return i;
1280 1281 1282

	i = pci_save_pcix_state(dev);
	if (i != 0)
1283
		return i;
1284

1285
	return pci_save_vc_state(dev);
Linus Torvalds's avatar
Linus Torvalds committed
1286
}
1287
EXPORT_SYMBOL(pci_save_state);
Linus Torvalds's avatar
Linus Torvalds committed
1288

1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
				     u32 saved_val, int retry)
{
	u32 val;

	pci_read_config_dword(pdev, offset, &val);
	if (val == saved_val)
		return;

	for (;;) {
1299
		pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1300
			offset, val, saved_val);
1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
		pci_write_config_dword(pdev, offset, saved_val);
		if (retry-- <= 0)
			return;

		pci_read_config_dword(pdev, offset, &val);
		if (val == saved_val)
			return;

		mdelay(1);
	}
}

1313 1314
static void pci_restore_config_space_range(struct pci_dev *pdev,
					   int start, int end, int retry)
1315 1316 1317 1318 1319 1320 1321 1322 1323
{
	int index;

	for (index = end; index >= start; index--)
		pci_restore_config_dword(pdev, 4 * index,
					 pdev->saved_config_space[index],
					 retry);
}

1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335
static void pci_restore_config_space(struct pci_dev *pdev)
{
	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
		pci_restore_config_space_range(pdev, 10, 15, 0);
		/* Restore BARs before the command register. */
		pci_restore_config_space_range(pdev, 4, 9, 10);
		pci_restore_config_space_range(pdev, 0, 3, 0);
	} else {
		pci_restore_config_space_range(pdev, 0, 15, 0);
	}
}

1336
/**
Linus Torvalds's avatar
Linus Torvalds committed
1337 1338 1339
 * pci_restore_state - Restore the saved state of a PCI device
 * @dev: - PCI device that we're dealing with
 */
1340
void pci_restore_state(struct pci_dev *dev)
Linus Torvalds's avatar
Linus Torvalds committed
1341
{
1342
	if (!dev->state_saved)
1343
		return;
1344

1345 1346
	/* PCI Express register must be restored first */
	pci_restore_pcie_state(dev);
1347 1348
	pci_restore_pasid_state(dev);
	pci_restore_pri_state(dev);
1349
	pci_restore_ats_state(dev);
1350
	pci_restore_vc_state(dev);
1351

1352 1353
	pci_cleanup_aer_error_status_regs(dev);

1354
	pci_restore_config_space(dev);
1355

1356
	pci_restore_pcix_state(dev);
1357
	pci_restore_msi_state(dev);
1358 1359 1360

	/* Restore ACS and IOV configuration state */
	pci_enable_acs(dev);
Yu Zhao's avatar
Yu Zhao committed
1361
	pci_restore_iov_state(dev);
1362

1363
	dev->state_saved = false;
Linus Torvalds's avatar
Linus Torvalds committed
1364
}
1365
EXPORT_SYMBOL(pci_restore_state);
Linus Torvalds's avatar
Linus Torvalds committed
1366

1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
struct pci_saved_state {
	u32 config_space[16];
	struct pci_cap_saved_data cap[0];
};

/**
 * pci_store_saved_state - Allocate and return an opaque struct containing
 *			   the device saved state.
 * @dev: PCI device that we're dealing with
 *
1377
 * Return NULL if no state or error.
1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
 */
struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
{
	struct pci_saved_state *state;
	struct pci_cap_saved_state *tmp;
	struct pci_cap_saved_data *cap;
	size_t size;

	if (!dev->state_saved)
		return NULL;

	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);

1391
	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;

	state = kzalloc(size, GFP_KERNEL);
	if (!state)
		return NULL;

	memcpy(state->config_space, dev->saved_config_space,
	       sizeof(state->config_space));

	cap = state->cap;
1402
	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
		memcpy(cap, &tmp->cap, len);
		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
	}
	/* Empty cap_save terminates list */

	return state;
}
EXPORT_SYMBOL_GPL(pci_store_saved_state);

/**
 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
 * @dev: PCI device that we're dealing with
 * @state: Saved state returned from pci_store_saved_state()
 */
1418 1419
int pci_load_saved_state(struct pci_dev *dev,
			 struct pci_saved_state *state)
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
{
	struct pci_cap_saved_data *cap;

	dev->state_saved = false;

	if (!state)
		return 0;

	memcpy(dev->saved_config_space, state->config_space,
	       sizeof(state->config_space));

	cap = state->cap;
	while (cap->size) {
		struct pci_cap_saved_state *tmp;

1435
		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446
		if (!tmp || tmp->cap.size != cap->size)
			return -EINVAL;

		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
		cap = (struct pci_cap_saved_data *)((u8 *)cap +
		       sizeof(struct pci_cap_saved_data) + cap->size);
	}

	dev->state_saved = true;
	return 0;
}
1447
EXPORT_SYMBOL_GPL(pci_load_saved_state);
1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464

/**
 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
 *				   and free the memory allocated for it.
 * @dev: PCI device that we're dealing with
 * @state: Pointer to saved state returned from pci_store_saved_state()
 */
int pci_load_and_free_saved_state(struct pci_dev *dev,
				  struct pci_saved_state **state)
{
	int ret = pci_load_saved_state(dev, *state);
	kfree(*state);
	*state = NULL;
	return ret;
}
EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);

1465 1466 1467 1468 1469
int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
{
	return pci_enable_resources(dev, bars);
}

1470 1471 1472
static int do_pci_enable_device(struct pci_dev *dev, int bars)
{
	int err;
1473
	struct pci_dev *bridge;
1474 1475
	u16 cmd;
	u8 pin;
1476 1477 1478 1479

	err = pci_set_power_state(dev, PCI_D0);
	if (err < 0 && err != -EIO)
		return err;
1480 1481 1482 1483 1484

	bridge = pci_upstream_bridge(dev);
	if (bridge)
		pcie_aspm_powersave_config_link(bridge);

1485 1486 1487 1488 1489
	err = pcibios_enable_device(dev, bars);
	if (err < 0)
		return err;
	pci_fixup_device(pci_fixup_enable, dev);

1490 1491 1492
	if (dev->msi_enabled || dev->msix_enabled)
		return 0;

1493 1494 1495 1496 1497 1498 1499 1500
	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
	if (pin) {
		pci_read_config_word(dev, PCI_COMMAND, &cmd);
		if (cmd & PCI_COMMAND_INTX_DISABLE)
			pci_write_config_word(dev, PCI_COMMAND,
					      cmd & ~PCI_COMMAND_INTX_DISABLE);
	}

1501 1502 1503 1504
	return 0;
}

/**
1505
 * pci_reenable_device - Resume abandoned device
1506 1507 1508 1509 1510
 * @dev: PCI device to be resumed
 *
 *  Note this function is a backend of pci_default_resume and is not supposed
 *  to be called by normal code, write proper resume handler and use it instead.
 */
1511
int pci_reenable_device(struct pci_dev *dev)
1512
{
1513
	if (pci_is_enabled(dev))
1514 1515 1516
		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
	return 0;
}
1517
EXPORT_SYMBOL(pci_reenable_device);
1518

1519 1520
static void pci_enable_bridge(struct pci_dev *dev)
{
1521
	struct pci_dev *bridge;
1522 1523
	int retval;

1524 1525 1526
	bridge = pci_upstream_bridge(dev);
	if (bridge)
		pci_enable_bridge(bridge);
1527

1528
	if (pci_is_enabled(dev)) {
1529
		if (!dev->is_busmaster)
1530
			pci_set_master(dev);
1531
		return;
1532 1533
	}

1534 1535
	retval = pci_enable_device(dev);
	if (retval)
1536
		pci_err(dev, "Error enabling bridge (%d), continuing\n",
1537 1538 1539 1540
			retval);
	pci_set_master(dev);
}

1541
static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
Linus Torvalds's avatar
Linus Torvalds committed
1542
{
1543
	struct pci_dev *bridge;
Linus Torvalds's avatar
Linus Torvalds committed
1544
	int err;
1545
	int i, bars = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1546

1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558
	/*
	 * Power state could be unknown at this point, either due to a fresh
	 * boot or a device removal call.  So get the current power state
	 * so that things like MSI message writing will behave as expected
	 * (e.g. if the device really is in D0 at enable time).
	 */
	if (dev->pm_cap) {
		u16 pmcsr;
		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
	}

1559
	if (atomic_inc_return(&dev->enable_cnt) > 1)
1560 1561
		return 0;		/* already enabled */

1562
	bridge = pci_upstream_bridge(dev);
1563
	if (bridge)
1564
		pci_enable_bridge(bridge);
1565

1566 1567 1568 1569 1570
	/* only skip sriov related */
	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
		if (dev->resource[i].flags & flags)
			bars |= (1 << i);
	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1571 1572 1573
		if (dev->resource[i].flags & flags)
			bars |= (1 << i);

1574
	err = do_pci_enable_device(dev, bars);
1575
	if (err < 0)
1576
		atomic_dec(&dev->enable_cnt);
1577
	return err;
Linus Torvalds's avatar
Linus Torvalds committed
1578 1579
}

1580 1581 1582 1583 1584 1585 1586 1587 1588 1589
/**
 * pci_enable_device_io - Initialize a device for use with IO space
 * @dev: PCI device to be initialized
 *
 *  Initialize device before it's used by a driver. Ask low-level code
 *  to enable I/O resources. Wake up the device if it was suspended.
 *  Beware, this function can fail.
 */
int pci_enable_device_io(struct pci_dev *dev)
{
1590
	return pci_enable_device_flags(dev, IORESOURCE_IO);
1591
}
1592
EXPORT_SYMBOL(pci_enable_device_io);
1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603

/**
 * pci_enable_device_mem - Initialize a device for use with Memory space
 * @dev: PCI device to be initialized
 *
 *  Initialize device before it's used by a driver. Ask low-level code
 *  to enable Memory resources. Wake up the device if it was suspended.
 *  Beware, this function can fail.
 */
int pci_enable_device_mem(struct pci_dev *dev)
{
1604
	return pci_enable_device_flags(dev, IORESOURCE_MEM);
1605
}
1606
EXPORT_SYMBOL(pci_enable_device_mem);
1607

1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620
/**
 * pci_enable_device - Initialize device before it's used by a driver.
 * @dev: PCI device to be initialized
 *
 *  Initialize device before it's used by a driver. Ask low-level code
 *  to enable I/O and memory. Wake up the device if it was suspended.
 *  Beware, this function can fail.
 *
 *  Note we don't actually enable the device many times if we call
 *  this function repeatedly (we just increment the count).
 */
int pci_enable_device(struct pci_dev *dev)
{
1621
	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1622
}
1623
EXPORT_SYMBOL(pci_enable_device);
1624

1625 1626 1627 1628 1629 1630 1631
/*
 * Managed PCI resources.  This manages device on/off, intx/msi/msix
 * on/off and BAR regions.  pci_dev itself records msi/msix status, so
 * there's no need to track it separately.  pci_devres is initialized
 * when a device is enabled using managed PCI device enable interface.
 */
struct pci_devres {
1632 1633
	unsigned int enabled:1;
	unsigned int pinned:1;
1634 1635
	unsigned int orig_intx:1;
	unsigned int restore_intx:1;
1636
	unsigned int mwi:1;
1637 1638 1639 1640 1641
	u32 region_mask;
};

static void pcim_release(struct device *gendev, void *res)
{
1642
	struct pci_dev *dev = to_pci_dev(gendev);
1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
	struct pci_devres *this = res;
	int i;

	if (dev->msi_enabled)
		pci_disable_msi(dev);
	if (dev->msix_enabled)
		pci_disable_msix(dev);

	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
		if (this->region_mask & (1 << i))
			pci_release_region(dev, i);

1655 1656 1657
	if (this->mwi)
		pci_clear_mwi(dev);

1658 1659 1660
	if (this->restore_intx)
		pci_intx(dev, this->orig_intx);

1661
	if (this->enabled && !this->pinned)
1662 1663 1664
		pci_disable_device(dev);
}

1665
static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
{
	struct pci_devres *dr, *new_dr;

	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
	if (dr)
		return dr;

	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
	if (!new_dr)
		return NULL;
	return devres_get(&pdev->dev, new_dr, NULL, NULL);
}

1679
static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
{
	if (pci_is_managed(pdev))
		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
	return NULL;
}

/**
 * pcim_enable_device - Managed pci_enable_device()
 * @pdev: PCI device to be initialized
 *
 * Managed pci_enable_device().
 */
int pcim_enable_device(struct pci_dev *pdev)
{
	struct pci_devres *dr;
	int rc;

	dr = get_pci_dr(pdev);
	if (unlikely(!dr))
		return -ENOMEM;
1700 1701
	if (dr->enabled)
		return 0;
1702 1703 1704 1705

	rc = pci_enable_device(pdev);
	if (!rc) {
		pdev->is_managed = 1;
1706
		dr->enabled = 1;
1707 1708 1709
	}
	return rc;
}
1710
EXPORT_SYMBOL(pcim_enable_device);
1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724

/**
 * pcim_pin_device - Pin managed PCI device
 * @pdev: PCI device to pin
 *
 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
 * driver detach.  @pdev must have been enabled with
 * pcim_enable_device().
 */
void pcim_pin_device(struct pci_dev *pdev)
{
	struct pci_devres *dr;

	dr = find_pci_dr(pdev);
1725
	WARN_ON(!dr || !dr->enabled);
1726
	if (dr)
1727
		dr->pinned = 1;
1728
}
1729
EXPORT_SYMBOL(pcim_pin_device);
1730

1731 1732 1733 1734 1735 1736 1737 1738
/*
 * pcibios_add_device - provide arch specific hooks when adding device dev
 * @dev: the PCI device being added
 *
 * Permits the platform to provide architecture specific functionality when
 * devices are added. This is the default implementation. Architecture
 * implementations can override this.
 */
Ryan Desfosses's avatar
Ryan Desfosses committed
1739
int __weak pcibios_add_device(struct pci_dev *dev)
1740 1741 1742 1743
{
	return 0;
}

1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
/**
 * pcibios_release_device - provide arch specific hooks when releasing device dev
 * @dev: the PCI device being released
 *
 * Permits the platform to provide architecture specific functionality when
 * devices are released. This is the default implementation. Architecture
 * implementations can override this.
 */
void __weak pcibios_release_device(struct pci_dev *dev) {}

Linus Torvalds's avatar
Linus Torvalds committed
1754 1755 1756 1757 1758 1759 1760 1761
/**
 * pcibios_disable_device - disable arch specific PCI resources for device dev
 * @dev: the PCI device to disable
 *
 * Disables architecture specific PCI resources for the device. This
 * is the default implementation. Architecture implementations can
 * override this.
 */
1762
void __weak pcibios_disable_device(struct pci_dev *dev) {}
Linus Torvalds's avatar
Linus Torvalds committed
1763

1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774
/**
 * pcibios_penalize_isa_irq - penalize an ISA IRQ
 * @irq: ISA IRQ to penalize
 * @active: IRQ active or not
 *
 * Permits the platform to provide architecture-specific functionality when
 * penalizing ISA IRQs. This is the default implementation. Architecture
 * implementations can override this.
 */
void __weak pcibios_penalize_isa_irq(int irq, int active) {}

1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
static void do_pci_disable_device(struct pci_dev *dev)
{
	u16 pci_command;

	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
	if (pci_command & PCI_COMMAND_MASTER) {
		pci_command &= ~PCI_COMMAND_MASTER;
		pci_write_config_word(dev, PCI_COMMAND, pci_command);
	}

	pcibios_disable_device(dev);
}

/**
 * pci_disable_enabled_device - Disable device without updating enable_cnt
 * @dev: PCI device to disable
 *
 * NOTE: This function is a backend of PCI power management routines and is
 * not supposed to be called drivers.
 */
void pci_disable_enabled_device(struct pci_dev *dev)
{
1797
	if (pci_is_enabled(dev))
1798 1799 1800
		do_pci_disable_device(dev);
}

Linus Torvalds's avatar
Linus Torvalds committed
1801 1802 1803 1804 1805 1806
/**
 * pci_disable_device - Disable PCI device after use
 * @dev: PCI device to be disabled
 *
 * Signal to the system that the PCI device is not in use by the system
 * anymore.  This only involves disabling PCI bus-mastering, if active.
1807 1808
 *
 * Note we don't actually disable the device until all callers of
1809
 * pci_enable_device() have called pci_disable_device().
Linus Torvalds's avatar
Linus Torvalds committed
1810
 */
Ryan Desfosses's avatar
Ryan Desfosses committed
1811
void pci_disable_device(struct pci_dev *dev)
Linus Torvalds's avatar
Linus Torvalds committed
1812
{
1813
	struct pci_devres *dr;
1814

1815 1816
	dr = find_pci_dr(dev);
	if (dr)
1817
		dr->enabled = 0;
1818

1819 1820 1821
	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
		      "disabling already-disabled device");

1822
	if (atomic_dec_return(&dev->enable_cnt) != 0)
1823 1824
		return;

1825
	do_pci_disable_device(dev);
Linus Torvalds's avatar
Linus Torvalds committed
1826

1827
	dev->is_busmaster = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1828
}
1829
EXPORT_SYMBOL(pci_disable_device);
Linus Torvalds's avatar
Linus Torvalds committed
1830

Brian King's avatar
Brian King committed
1831 1832
/**
 * pcibios_set_pcie_reset_state - set reset state for device dev
1833
 * @dev: the PCIe device reset
Brian King's avatar
Brian King committed
1834 1835 1836
 * @state: Reset state to enter into
 *
 *
1837
 * Sets the PCIe reset state for the device. This is the default
Brian King's avatar
Brian King committed
1838 1839
 * implementation. Architecture implementations can override this.
 */
Bjorn Helgaas's avatar
Bjorn Helgaas committed
1840 1841
int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
					enum pcie_reset_state state)
Brian King's avatar
Brian King committed
1842 1843 1844 1845 1846 1847
{
	return -EINVAL;
}

/**
 * pci_set_pcie_reset_state - set reset state for device dev
1848
 * @dev: the PCIe device reset
Brian King's avatar
Brian King committed
1849 1850 1851 1852 1853 1854 1855 1856 1857
 * @state: Reset state to enter into
 *
 *
 * Sets the PCI reset state for the device.
 */
int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
{
	return pcibios_set_pcie_reset_state(dev, state);
}
1858
EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
Brian King's avatar
Brian King committed
1859

1860 1861 1862 1863 1864 1865 1866 1867 1868
/**
 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
 * @dev: PCIe root port or event collector.
 */
void pcie_clear_root_pme_status(struct pci_dev *dev)
{
	pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
}

1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903
/**
 * pci_check_pme_status - Check if given device has generated PME.
 * @dev: Device to check.
 *
 * Check the PME status of the device and if set, clear it and clear PME enable
 * (if set).  Return 'true' if PME status and PME enable were both set or
 * 'false' otherwise.
 */
bool pci_check_pme_status(struct pci_dev *dev)
{
	int pmcsr_pos;
	u16 pmcsr;
	bool ret = false;

	if (!dev->pm_cap)
		return false;

	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
		return false;

	/* Clear PME status. */
	pmcsr |= PCI_PM_CTRL_PME_STATUS;
	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
		/* Disable PME to avoid interrupt flood. */
		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
		ret = true;
	}

	pci_write_config_word(dev, pmcsr_pos, pmcsr);

	return ret;
}

1904 1905 1906
/**
 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
 * @dev: Device to handle.
1907
 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1908 1909 1910 1911
 *
 * Check if @dev has generated PME and queue a resume request for it in that
 * case.
 */
1912
static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1913
{
1914 1915 1916
	if (pme_poll_reset && dev->pme_poll)
		dev->pme_poll = false;

1917 1918
	if (pci_check_pme_status(dev)) {
		pci_wakeup_event(dev);
1919
		pm_request_resume(&dev->dev);
1920
	}
1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
	return 0;
}

/**
 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
 * @bus: Top bus of the subtree to walk.
 */
void pci_pme_wakeup_bus(struct pci_bus *bus)
{
	if (bus)
1931
		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1932 1933
}

1934

1935 1936 1937 1938 1939
/**
 * pci_pme_capable - check the capability of PCI device to generate PME#
 * @dev: PCI device to handle.
 * @state: PCI state from which device will issue PME#.
 */
1940
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1941
{
1942
	if (!dev->pm_cap)
1943 1944
		return false;

1945
	return !!(dev->pme_support & (1 << state));
1946
}
1947
EXPORT_SYMBOL(pci_pme_capable);
1948

1949 1950
static void pci_pme_list_scan(struct work_struct *work)
{
1951
	struct pci_pme_device *pme_dev, *n;
1952 1953

	mutex_lock(&pci_pme_list_mutex);
1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969
	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
		if (pme_dev->dev->pme_poll) {
			struct pci_dev *bridge;

			bridge = pme_dev->dev->bus->self;
			/*
			 * If bridge is in low power state, the
			 * configuration space of subordinate devices
			 * may be not accessible
			 */
			if (bridge && bridge->current_state != PCI_D0)
				continue;
			pci_pme_wakeup(pme_dev->dev, NULL);
		} else {
			list_del(&pme_dev->list);
			kfree(pme_dev);
1970
		}
1971
	}
1972
	if (!list_empty(&pci_pme_list))
1973 1974
		queue_delayed_work(system_freezable_wq, &pci_pme_work,
				   msecs_to_jiffies(PME_TIMEOUT));
1975 1976 1977
	mutex_unlock(&pci_pme_list_mutex);
}

1978
static void __pci_pme_active(struct pci_dev *dev, bool enable)
1979 1980 1981
{
	u16 pmcsr;

1982
	if (!dev->pme_support)
1983 1984
		return;

1985
	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1986 1987 1988 1989 1990
	/* Clear PME_Status by writing 1 to it and enable PME# */
	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
	if (!enable)
		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;

1991
	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1992 1993
}

1994 1995 1996 1997 1998
/**
 * pci_pme_restore - Restore PME configuration after config space restore.
 * @dev: PCI device to update.
 */
void pci_pme_restore(struct pci_dev *dev)
1999 2000 2001 2002 2003 2004 2005 2006 2007
{
	u16 pmcsr;

	if (!dev->pme_support)
		return;

	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
	if (dev->wakeup_prepared) {
		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2008
		pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2009 2010 2011 2012 2013 2014 2015
	} else {
		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
		pmcsr |= PCI_PM_CTRL_PME_STATUS;
	}
	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
}

2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026
/**
 * pci_pme_active - enable or disable PCI device's PME# function
 * @dev: PCI device to handle.
 * @enable: 'true' to enable PME# generation; 'false' to disable it.
 *
 * The caller must verify that the device is capable of generating PME# before
 * calling this function with @enable equal to 'true'.
 */
void pci_pme_active(struct pci_dev *dev, bool enable)
{
	__pci_pme_active(dev, enable);
2027

2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046
	/*
	 * PCI (as opposed to PCIe) PME requires that the device have
	 * its PME# line hooked up correctly. Not all hardware vendors
	 * do this, so the PME never gets delivered and the device
	 * remains asleep. The easiest way around this is to
	 * periodically walk the list of suspended devices and check
	 * whether any have their PME flag set. The assumption is that
	 * we'll wake up often enough anyway that this won't be a huge
	 * hit, and the power savings from the devices will still be a
	 * win.
	 *
	 * Although PCIe uses in-band PME message instead of PME# line
	 * to report PME, PME does not work for some PCIe devices in
	 * reality.  For example, there are devices that set their PME
	 * status bits, but don't really bother to send a PME message;
	 * there are PCI Express Root Ports that don't bother to
	 * trigger interrupts when they receive PME messages from the
	 * devices below.  So PME poll is used for PCIe devices too.
	 */
2047

2048
	if (dev->pme_poll) {
2049 2050 2051 2052
		struct pci_pme_device *pme_dev;
		if (enable) {
			pme_dev = kmalloc(sizeof(struct pci_pme_device),
					  GFP_KERNEL);
2053
			if (!pme_dev) {
2054
				pci_warn(dev, "can't enable PME#\n");
2055 2056
				return;
			}
2057 2058 2059 2060
			pme_dev->dev = dev;
			mutex_lock(&pci_pme_list_mutex);
			list_add(&pme_dev->list, &pci_pme_list);
			if (list_is_singular(&pci_pme_list))
2061 2062 2063
				queue_delayed_work(system_freezable_wq,
						   &pci_pme_work,
						   msecs_to_jiffies(PME_TIMEOUT));
2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077
			mutex_unlock(&pci_pme_list_mutex);
		} else {
			mutex_lock(&pci_pme_list_mutex);
			list_for_each_entry(pme_dev, &pci_pme_list, list) {
				if (pme_dev->dev == dev) {
					list_del(&pme_dev->list);
					kfree(pme_dev);
					break;
				}
			}
			mutex_unlock(&pci_pme_list_mutex);
		}
	}

2078
	pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2079
}
2080
EXPORT_SYMBOL(pci_pme_active);
2081

Linus Torvalds's avatar
Linus Torvalds committed
2082
/**
2083
 * __pci_enable_wake - enable PCI device as wakeup event source
2084 2085 2086 2087 2088 2089 2090 2091 2092
 * @dev: PCI device affected
 * @state: PCI state from which device will issue wakeup events
 * @enable: True to enable event generation; false to disable
 *
 * This enables the device as a wakeup event source, or disables it.
 * When such events involves platform-specific hooks, those hooks are
 * called automatically by this routine.
 *
 * Devices with legacy power management (no standard PCI PM capabilities)
2093
 * always require such platform hooks.
2094
 *
2095 2096 2097 2098 2099
 * RETURN VALUE:
 * 0 is returned on success
 * -EINVAL is returned if device is not supposed to wake up the system
 * Error code depending on the platform is returned if both the platform and
 * the native mechanism fail to enable the generation of wake-up events
Linus Torvalds's avatar
Linus Torvalds committed
2100
 */
2101
static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
Linus Torvalds's avatar
Linus Torvalds committed
2102
{
2103
	int ret = 0;
2104

2105 2106 2107 2108 2109 2110 2111
	/*
	 * Bridges can only signal wakeup on behalf of subordinate devices,
	 * but that is set up elsewhere, so skip them.
	 */
	if (pci_has_subordinate(dev))
		return 0;

2112 2113
	/* Don't do the same thing twice in a row for one device. */
	if (!!enable == !!dev->wakeup_prepared)
2114 2115
		return 0;

2116 2117 2118 2119
	/*
	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
	 * Anderson we should be doing PME# wake enable followed by ACPI wake
	 * enable.  To disable wake-up we call the platform first, for symmetry.
2120
	 */
Linus Torvalds's avatar
Linus Torvalds committed
2121

2122 2123
	if (enable) {
		int error;
Linus Torvalds's avatar
Linus Torvalds committed
2124

2125 2126 2127 2128
		if (pci_pme_capable(dev, state))
			pci_pme_active(dev, true);
		else
			ret = 1;
2129
		error = platform_pci_set_wakeup(dev, true);
2130 2131
		if (ret)
			ret = error;
2132 2133
		if (!ret)
			dev->wakeup_prepared = true;
2134
	} else {
2135
		platform_pci_set_wakeup(dev, false);
2136
		pci_pme_active(dev, false);
2137
		dev->wakeup_prepared = false;
2138
	}
Linus Torvalds's avatar
Linus Torvalds committed
2139

2140
	return ret;
2141
}
2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158

/**
 * pci_enable_wake - change wakeup settings for a PCI device
 * @pci_dev: Target device
 * @state: PCI state from which device will issue wakeup events
 * @enable: Whether or not to enable event generation
 *
 * If @enable is set, check device_may_wakeup() for the device before calling
 * __pci_enable_wake() for it.
 */
int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
{
	if (enable && !device_may_wakeup(&pci_dev->dev))
		return -EINVAL;

	return __pci_enable_wake(pci_dev, state, enable);
}
2159
EXPORT_SYMBOL(pci_enable_wake);
Linus Torvalds's avatar
Linus Torvalds committed
2160

2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
/**
 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
 * @dev: PCI device to prepare
 * @enable: True to enable wake-up event generation; false to disable
 *
 * Many drivers want the device to wake up the system from D3_hot or D3_cold
 * and this function allows them to set that up cleanly - pci_enable_wake()
 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
 * ordering constraints.
 *
2171 2172 2173
 * This function only returns error code if the device is not allowed to wake
 * up the system from sleep or it is not capable of generating PME# from both
 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2174 2175 2176 2177 2178 2179 2180
 */
int pci_wake_from_d3(struct pci_dev *dev, bool enable)
{
	return pci_pme_capable(dev, PCI_D3cold) ?
			pci_enable_wake(dev, PCI_D3cold, enable) :
			pci_enable_wake(dev, PCI_D3hot, enable);
}
2181
EXPORT_SYMBOL(pci_wake_from_d3);
2182

2183
/**
2184 2185
 * pci_target_state - find an appropriate low power state for a given PCI dev
 * @dev: PCI device
2186
 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2187 2188 2189 2190
 *
 * Use underlying platform code to find a supported low power state for @dev.
 * If the platform can't manage @dev, return the deepest state from which it
 * can generate wake events, based on any available PME info.
2191
 */
2192
static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2193 2194 2195 2196 2197
{
	pci_power_t target_state = PCI_D3hot;

	if (platform_pci_power_manageable(dev)) {
		/*
2198
		 * Call the platform to find the target state for the device.
2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212
		 */
		pci_power_t state = platform_pci_choose_state(dev);

		switch (state) {
		case PCI_POWER_ERROR:
		case PCI_UNKNOWN:
			break;
		case PCI_D1:
		case PCI_D2:
			if (pci_no_d1d2(dev))
				break;
		default:
			target_state = state;
		}
2213 2214 2215 2216 2217

		return target_state;
	}

	if (!dev->pm_cap)
2218
		target_state = PCI_D0;
2219 2220 2221 2222 2223 2224 2225 2226 2227

	/*
	 * If the device is in D3cold even though it's not power-manageable by
	 * the platform, it may have been powered down by non-standard means.
	 * Best to let it slumber.
	 */
	if (dev->current_state == PCI_D3cold)
		target_state = PCI_D3cold;

2228
	if (wakeup) {
2229 2230
		/*
		 * Find the deepest state from which the device can generate
2231
		 * PME#.
2232
		 */
2233 2234 2235 2236
		if (dev->pme_support) {
			while (target_state
			      && !(dev->pme_support & (1 << target_state)))
				target_state--;
2237 2238 2239
		}
	}

2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
	return target_state;
}

/**
 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
 * @dev: Device to handle.
 *
 * Choose the power state appropriate for the device depending on whether
 * it can wake up the system and/or is power manageable by the platform
 * (PCI_D3hot is the default) and put the device into that state.
 */
int pci_prepare_to_sleep(struct pci_dev *dev)
{
2253 2254
	bool wakeup = device_may_wakeup(&dev->dev);
	pci_power_t target_state = pci_target_state(dev, wakeup);
2255 2256 2257 2258 2259
	int error;

	if (target_state == PCI_POWER_ERROR)
		return -EIO;

2260
	pci_enable_wake(dev, target_state, wakeup);
2261

2262 2263 2264 2265 2266 2267 2268
	error = pci_set_power_state(dev, target_state);

	if (error)
		pci_enable_wake(dev, target_state, false);

	return error;
}
2269
EXPORT_SYMBOL(pci_prepare_to_sleep);
2270 2271

/**
Randy Dunlap's avatar
Randy Dunlap committed
2272
 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
2273 2274
 * @dev: Device to handle.
 *
Thomas Weber's avatar
Thomas Weber committed
2275
 * Disable device's system wake-up capability and put it into D0.
2276 2277 2278 2279 2280 2281
 */
int pci_back_from_sleep(struct pci_dev *dev)
{
	pci_enable_wake(dev, PCI_D0, false);
	return pci_set_power_state(dev, PCI_D0);
}
2282
EXPORT_SYMBOL(pci_back_from_sleep);
2283

2284 2285 2286 2287 2288 2289 2290 2291 2292
/**
 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
 * @dev: PCI device being suspended.
 *
 * Prepare @dev to generate wake-up events at run time and put it into a low
 * power state.
 */
int pci_finish_runtime_suspend(struct pci_dev *dev)
{
2293
	pci_power_t target_state;
2294 2295
	int error;

2296
	target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2297 2298 2299
	if (target_state == PCI_POWER_ERROR)
		return -EIO;

2300 2301
	dev->runtime_d3cold = target_state == PCI_D3cold;

2302
	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2303 2304 2305

	error = pci_set_power_state(dev, target_state);

2306
	if (error) {
2307
		pci_enable_wake(dev, target_state, false);
2308 2309
		dev->runtime_d3cold = false;
	}
2310 2311 2312 2313

	return error;
}

2314 2315 2316 2317
/**
 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
 * @dev: Device to check.
 *
2318
 * Return true if the device itself is capable of generating wake-up events
2319 2320 2321 2322 2323 2324 2325 2326 2327 2328
 * (through the platform or using the native PCIe PME) or if the device supports
 * PME and one of its upstream bridges can generate wake-up events.
 */
bool pci_dev_run_wake(struct pci_dev *dev)
{
	struct pci_bus *bus = dev->bus;

	if (!dev->pme_support)
		return false;

2329
	/* PME-capable in principle, but not from the target power state */
2330
	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2331 2332
		return false;

2333 2334 2335
	if (device_can_wakeup(&dev->dev))
		return true;

2336 2337 2338
	while (bus->parent) {
		struct pci_dev *bridge = bus->self;

2339
		if (device_can_wakeup(&bridge->dev))
2340 2341 2342 2343 2344 2345 2346
			return true;

		bus = bus->parent;
	}

	/* We have reached the root bus. */
	if (bus->bridge)
2347
		return device_can_wakeup(bus->bridge);
2348 2349 2350 2351 2352

	return false;
}
EXPORT_SYMBOL_GPL(pci_dev_run_wake);

2353 2354 2355 2356 2357 2358 2359 2360
/**
 * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
 * @pci_dev: Device to check.
 *
 * Return 'true' if the device is runtime-suspended, it doesn't have to be
 * reconfigured due to wakeup settings difference between system and runtime
 * suspend and the current power state of it is suitable for the upcoming
 * (system) transition.
2361 2362 2363
 *
 * If the device is not configured for system wakeup, disable PME for it before
 * returning 'true' to prevent it from waking up the system unnecessarily.
2364 2365 2366 2367
 */
bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
{
	struct device *dev = &pci_dev->dev;
2368
	bool wakeup = device_may_wakeup(dev);
2369 2370

	if (!pm_runtime_suspended(dev)
2371
	    || pci_target_state(pci_dev, wakeup) != pci_dev->current_state
2372
	    || platform_pci_need_resume(pci_dev))
2373 2374
		return false;

2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387
	/*
	 * At this point the device is good to go unless it's been configured
	 * to generate PME at the runtime suspend time, but it is not supposed
	 * to wake up the system.  In that case, simply disable PME for it
	 * (it will have to be re-enabled on exit from system resume).
	 *
	 * If the device's power state is D3cold and the platform check above
	 * hasn't triggered, the device's configuration is suitable and we don't
	 * need to manipulate it at all.
	 */
	spin_lock_irq(&dev->power.lock);

	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
2388
	    !wakeup)
2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415
		__pci_pme_active(pci_dev, false);

	spin_unlock_irq(&dev->power.lock);
	return true;
}

/**
 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
 * @pci_dev: Device to handle.
 *
 * If the device is runtime suspended and wakeup-capable, enable PME for it as
 * it might have been disabled during the prepare phase of system suspend if
 * the device was not configured for system wakeup.
 */
void pci_dev_complete_resume(struct pci_dev *pci_dev)
{
	struct device *dev = &pci_dev->dev;

	if (!pci_dev_run_wake(pci_dev))
		return;

	spin_lock_irq(&dev->power.lock);

	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
		__pci_pme_active(pci_dev, true);

	spin_unlock_irq(&dev->power.lock);
2416 2417
}

2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449
void pci_config_pm_runtime_get(struct pci_dev *pdev)
{
	struct device *dev = &pdev->dev;
	struct device *parent = dev->parent;

	if (parent)
		pm_runtime_get_sync(parent);
	pm_runtime_get_noresume(dev);
	/*
	 * pdev->current_state is set to PCI_D3cold during suspending,
	 * so wait until suspending completes
	 */
	pm_runtime_barrier(dev);
	/*
	 * Only need to resume devices in D3cold, because config
	 * registers are still accessible for devices suspended but
	 * not in D3cold.
	 */
	if (pdev->current_state == PCI_D3cold)
		pm_runtime_resume(dev);
}

void pci_config_pm_runtime_put(struct pci_dev *pdev)
{
	struct device *dev = &pdev->dev;
	struct device *parent = dev->parent;

	pm_runtime_put(dev);
	if (parent)
		pm_runtime_put_sync(parent);
}

2450 2451 2452 2453 2454 2455 2456
/**
 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
 * @bridge: Bridge to check
 *
 * This function checks if it is possible to move the bridge to D3.
 * Currently we only allow D3 for recent enough PCIe ports.
 */
2457
bool pci_bridge_d3_possible(struct pci_dev *bridge)
2458 2459 2460 2461 2462 2463 2464 2465 2466 2467
{
	if (!pci_is_pcie(bridge))
		return false;

	switch (pci_pcie_type(bridge)) {
	case PCI_EXP_TYPE_ROOT_PORT:
	case PCI_EXP_TYPE_UPSTREAM:
	case PCI_EXP_TYPE_DOWNSTREAM:
		if (pci_bridge_d3_disable)
			return false;
2468 2469

		/*
2470 2471 2472
		 * Hotplug interrupts cannot be delivered if the link is down,
		 * so parents of a hotplug port must stay awake. In addition,
		 * hotplug ports handled by firmware in System Management Mode
2473
		 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2474
		 * For simplicity, disallow in general for now.
2475
		 */
2476
		if (bridge->is_hotplug_bridge)
2477 2478
			return false;

2479 2480 2481 2482 2483 2484 2485
		if (pci_bridge_d3_force)
			return true;

		/*
		 * It should be safe to put PCIe ports from 2015 or newer
		 * to D3.
		 */
2486
		if (dmi_get_bios_year() >= 2015)
2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497
			return true;
		break;
	}

	return false;
}

static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
{
	bool *d3cold_ok = data;

2498 2499 2500 2501 2502 2503 2504 2505
	if (/* The device needs to be allowed to go D3cold ... */
	    dev->no_d3cold || !dev->d3cold_allowed ||

	    /* ... and if it is wakeup capable to do so from D3cold. */
	    (device_may_wakeup(&dev->dev) &&
	     !pci_pme_capable(dev, PCI_D3cold)) ||

	    /* If it is a bridge it must be allowed to go to D3. */
2506
	    !pci_power_manageable(dev))
2507

2508
		*d3cold_ok = false;
2509

2510
	return !*d3cold_ok;
2511 2512 2513 2514 2515 2516 2517 2518 2519 2520
}

/*
 * pci_bridge_d3_update - Update bridge D3 capabilities
 * @dev: PCI device which is changed
 *
 * Update upstream bridge PM capabilities accordingly depending on if the
 * device PM configuration was changed or the device is being removed.  The
 * change is also propagated upstream.
 */
2521
void pci_bridge_d3_update(struct pci_dev *dev)
2522
{
2523
	bool remove = !device_is_registered(&dev->dev);
2524 2525 2526 2527 2528 2529 2530 2531
	struct pci_dev *bridge;
	bool d3cold_ok = true;

	bridge = pci_upstream_bridge(dev);
	if (!bridge || !pci_bridge_d3_possible(bridge))
		return;

	/*
2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544
	 * If D3 is currently allowed for the bridge, removing one of its
	 * children won't change that.
	 */
	if (remove && bridge->bridge_d3)
		return;

	/*
	 * If D3 is currently allowed for the bridge and a child is added or
	 * changed, disallowance of D3 can only be caused by that child, so
	 * we only need to check that single device, not any of its siblings.
	 *
	 * If D3 is currently not allowed for the bridge, checking the device
	 * first may allow us to skip checking its siblings.
2545 2546 2547 2548
	 */
	if (!remove)
		pci_dev_check_d3cold(dev, &d3cold_ok);

2549 2550 2551 2552 2553 2554 2555
	/*
	 * If D3 is currently not allowed for the bridge, this may be caused
	 * either by the device being changed/removed or any of its siblings,
	 * so we need to go through all children to find out if one of them
	 * continues to block D3.
	 */
	if (d3cold_ok && !bridge->bridge_d3)
2556 2557 2558 2559 2560 2561
		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
			     &d3cold_ok);

	if (bridge->bridge_d3 != d3cold_ok) {
		bridge->bridge_d3 = d3cold_ok;
		/* Propagate change to upstream bridges */
2562
		pci_bridge_d3_update(bridge);
2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577
	}
}

/**
 * pci_d3cold_enable - Enable D3cold for device
 * @dev: PCI device to handle
 *
 * This function can be used in drivers to enable D3cold from the device
 * they handle.  It also updates upstream PCI bridge PM capabilities
 * accordingly.
 */
void pci_d3cold_enable(struct pci_dev *dev)
{
	if (dev->no_d3cold) {
		dev->no_d3cold = false;
2578
		pci_bridge_d3_update(dev);
2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594
	}
}
EXPORT_SYMBOL_GPL(pci_d3cold_enable);

/**
 * pci_d3cold_disable - Disable D3cold for device
 * @dev: PCI device to handle
 *
 * This function can be used in drivers to disable D3cold from the device
 * they handle.  It also updates upstream PCI bridge PM capabilities
 * accordingly.
 */
void pci_d3cold_disable(struct pci_dev *dev)
{
	if (!dev->no_d3cold) {
		dev->no_d3cold = true;
2595
		pci_bridge_d3_update(dev);
2596 2597 2598 2599
	}
}
EXPORT_SYMBOL_GPL(pci_d3cold_disable);

2600 2601 2602 2603 2604 2605 2606 2607
/**
 * pci_pm_init - Initialize PM functions of given PCI device
 * @dev: PCI device to handle.
 */
void pci_pm_init(struct pci_dev *dev)
{
	int pm;
	u16 pmc;
Linus Torvalds's avatar
Linus Torvalds committed
2608

2609
	pm_runtime_forbid(&dev->dev);
2610 2611
	pm_runtime_set_active(&dev->dev);
	pm_runtime_enable(&dev->dev);
2612
	device_enable_async_suspend(&dev->dev);
2613
	dev->wakeup_prepared = false;
2614

2615
	dev->pm_cap = 0;
2616
	dev->pme_support = 0;
2617

2618 2619 2620
	/* find PCI PM capability in list */
	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
	if (!pm)
2621
		return;
2622 2623
	/* Check device's ability to generate PME# */
	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2624

2625
	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2626
		pci_err(dev, "unsupported PM cap regs version (%u)\n",
2627
			pmc & PCI_PM_CAP_VER_MASK);
2628
		return;
2629 2630
	}

2631
	dev->pm_cap = pm;
2632
	dev->d3_delay = PCI_PM_D3_WAIT;
2633
	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2634
	dev->bridge_d3 = pci_bridge_d3_possible(dev);
2635
	dev->d3cold_allowed = true;
2636 2637 2638 2639

	dev->d1_support = false;
	dev->d2_support = false;
	if (!pci_no_d1d2(dev)) {
2640
		if (pmc & PCI_PM_CAP_D1)
2641
			dev->d1_support = true;
2642
		if (pmc & PCI_PM_CAP_D2)
2643
			dev->d2_support = true;
2644 2645

		if (dev->d1_support || dev->d2_support)
2646
			pci_printk(KERN_DEBUG, dev, "supports%s%s\n",
2647 2648
				   dev->d1_support ? " D1" : "",
				   dev->d2_support ? " D2" : "");
2649 2650 2651 2652
	}

	pmc &= PCI_PM_CAP_PME_MASK;
	if (pmc) {
2653
		pci_printk(KERN_DEBUG, dev, "PME# supported from%s%s%s%s%s\n",
2654 2655 2656 2657 2658
			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2659
		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2660
		dev->pme_poll = true;
2661 2662 2663 2664 2665 2666
		/*
		 * Make device's PM flags reflect the wake-up capability, but
		 * let the user space enable it to wake up the system as needed.
		 */
		device_set_wakeup_capable(&dev->dev, true);
		/* Disable the PME# generation functionality */
2667
		pci_pme_active(dev, false);
2668
	}
Linus Torvalds's avatar
Linus Torvalds committed
2669 2670
}

2671 2672
static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
{
2673
	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698

	switch (prop) {
	case PCI_EA_P_MEM:
	case PCI_EA_P_VF_MEM:
		flags |= IORESOURCE_MEM;
		break;
	case PCI_EA_P_MEM_PREFETCH:
	case PCI_EA_P_VF_MEM_PREFETCH:
		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
		break;
	case PCI_EA_P_IO:
		flags |= IORESOURCE_IO;
		break;
	default:
		return 0;
	}

	return flags;
}

static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
					    u8 prop)
{
	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
		return &dev->resource[bei];
2699 2700 2701 2702 2703 2704
#ifdef CONFIG_PCI_IOV
	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
		return &dev->resource[PCI_IOV_RESOURCES +
				      bei - PCI_EA_BEI_VF_BAR0];
#endif
2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717
	else if (bei == PCI_EA_BEI_ROM)
		return &dev->resource[PCI_ROM_RESOURCE];
	else
		return NULL;
}

/* Read an Enhanced Allocation (EA) entry */
static int pci_ea_read(struct pci_dev *dev, int offset)
{
	struct resource *res;
	int ent_size, ent_offset = offset;
	resource_size_t start, end;
	unsigned long flags;
2718
	u32 dw0, bei, base, max_offset;
2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730
	u8 prop;
	bool support_64 = (sizeof(resource_size_t) >= 8);

	pci_read_config_dword(dev, ent_offset, &dw0);
	ent_offset += 4;

	/* Entry size field indicates DWORDs after 1st */
	ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;

	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
		goto out;

2731 2732 2733
	bei = (dw0 & PCI_EA_BEI) >> 4;
	prop = (dw0 & PCI_EA_PP) >> 8;

2734 2735 2736 2737 2738
	/*
	 * If the Property is in the reserved range, try the Secondary
	 * Property instead.
	 */
	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
2739
		prop = (dw0 & PCI_EA_SP) >> 16;
2740 2741 2742
	if (prop > PCI_EA_P_BRIDGE_IO)
		goto out;

2743
	res = pci_ea_get_resource(dev, bei, prop);
2744
	if (!res) {
2745
		pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
2746 2747 2748 2749 2750
		goto out;
	}

	flags = pci_ea_flags(dev, prop);
	if (!flags) {
2751
		pci_err(dev, "Unsupported EA properties: %#x\n", prop);
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800
		goto out;
	}

	/* Read Base */
	pci_read_config_dword(dev, ent_offset, &base);
	start = (base & PCI_EA_FIELD_MASK);
	ent_offset += 4;

	/* Read MaxOffset */
	pci_read_config_dword(dev, ent_offset, &max_offset);
	ent_offset += 4;

	/* Read Base MSBs (if 64-bit entry) */
	if (base & PCI_EA_IS_64) {
		u32 base_upper;

		pci_read_config_dword(dev, ent_offset, &base_upper);
		ent_offset += 4;

		flags |= IORESOURCE_MEM_64;

		/* entry starts above 32-bit boundary, can't use */
		if (!support_64 && base_upper)
			goto out;

		if (support_64)
			start |= ((u64)base_upper << 32);
	}

	end = start + (max_offset | 0x03);

	/* Read MaxOffset MSBs (if 64-bit entry) */
	if (max_offset & PCI_EA_IS_64) {
		u32 max_offset_upper;

		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
		ent_offset += 4;

		flags |= IORESOURCE_MEM_64;

		/* entry too big, can't use */
		if (!support_64 && max_offset_upper)
			goto out;

		if (support_64)
			end += ((u64)max_offset_upper << 32);
	}

	if (end < start) {
2801
		pci_err(dev, "EA Entry crosses address boundary\n");
2802 2803 2804 2805
		goto out;
	}

	if (ent_size != ent_offset - offset) {
2806
		pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
2807 2808 2809 2810 2811 2812 2813 2814
			ent_size, ent_offset - offset);
		goto out;
	}

	res->name = pci_name(dev);
	res->start = start;
	res->end = end;
	res->flags = flags;
2815 2816

	if (bei <= PCI_EA_BEI_BAR5)
2817
		pci_printk(KERN_DEBUG, dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2818 2819
			   bei, res, prop);
	else if (bei == PCI_EA_BEI_ROM)
2820
		pci_printk(KERN_DEBUG, dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
2821 2822
			   res, prop);
	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
2823
		pci_printk(KERN_DEBUG, dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2824 2825
			   bei - PCI_EA_BEI_VF_BAR0, res, prop);
	else
2826
		pci_printk(KERN_DEBUG, dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
2827 2828
			   bei, res, prop);

2829 2830 2831 2832
out:
	return offset + ent_size;
}

Colin Ian King's avatar
Colin Ian King committed
2833
/* Enhanced Allocation Initialization */
2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861
void pci_ea_init(struct pci_dev *dev)
{
	int ea;
	u8 num_ent;
	int offset;
	int i;

	/* find PCI EA capability in list */
	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
	if (!ea)
		return;

	/* determine the number of entries */
	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
					&num_ent);
	num_ent &= PCI_EA_NUM_ENT_MASK;

	offset = ea + PCI_EA_FIRST_ENT;

	/* Skip DWORD 2 for type 1 functions */
	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
		offset += 4;

	/* parse each EA entry */
	for (i = 0; i < num_ent; ++i)
		offset = pci_ea_read(dev, offset);
}

2862 2863 2864 2865 2866 2867
static void pci_add_saved_cap(struct pci_dev *pci_dev,
	struct pci_cap_saved_state *new_cap)
{
	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
}

2868
/**
2869 2870
 * _pci_add_cap_save_buffer - allocate buffer for saving given
 *                            capability registers
2871 2872
 * @dev: the PCI device
 * @cap: the capability to allocate the buffer for
2873
 * @extended: Standard or Extended capability ID
2874 2875
 * @size: requested size of the buffer
 */
2876 2877
static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
				    bool extended, unsigned int size)
2878 2879 2880 2881
{
	int pos;
	struct pci_cap_saved_state *save_state;

2882 2883 2884 2885 2886
	if (extended)
		pos = pci_find_ext_capability(dev, cap);
	else
		pos = pci_find_capability(dev, cap);

2887
	if (!pos)
2888 2889 2890 2891 2892 2893
		return 0;

	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
	if (!save_state)
		return -ENOMEM;

2894
	save_state->cap.cap_nr = cap;
2895
	save_state->cap.cap_extended = extended;
2896
	save_state->cap.size = size;
2897 2898 2899 2900 2901
	pci_add_saved_cap(dev, save_state);

	return 0;
}

2902 2903 2904 2905 2906 2907 2908 2909 2910 2911
int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
{
	return _pci_add_cap_save_buffer(dev, cap, false, size);
}

int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
{
	return _pci_add_cap_save_buffer(dev, cap, true, size);
}

2912 2913 2914 2915 2916 2917 2918 2919
/**
 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
 * @dev: the PCI device
 */
void pci_allocate_cap_save_buffers(struct pci_dev *dev)
{
	int error;

2920 2921
	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
					PCI_EXP_SAVE_REGS * sizeof(u16));
2922
	if (error)
2923
		pci_err(dev, "unable to preallocate PCI Express save buffer\n");
2924 2925 2926

	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
	if (error)
2927
		pci_err(dev, "unable to preallocate PCI-X save buffer\n");
2928 2929

	pci_allocate_vc_save_buffers(dev);
2930 2931
}

2932 2933 2934
void pci_free_cap_save_buffers(struct pci_dev *dev)
{
	struct pci_cap_saved_state *tmp;
2935
	struct hlist_node *n;
2936

2937
	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
2938 2939 2940
		kfree(tmp);
}

Yu Zhao's avatar
Yu Zhao committed
2941
/**
2942
 * pci_configure_ari - enable or disable ARI forwarding
Yu Zhao's avatar
Yu Zhao committed
2943
 * @dev: the PCI device
2944 2945 2946
 *
 * If @dev and its upstream bridge both support ARI, enable ARI in the
 * bridge.  Otherwise, disable ARI in the bridge.
Yu Zhao's avatar
Yu Zhao committed
2947
 */
2948
void pci_configure_ari(struct pci_dev *dev)
Yu Zhao's avatar
Yu Zhao committed
2949 2950
{
	u32 cap;
2951
	struct pci_dev *bridge;
Yu Zhao's avatar
Yu Zhao committed
2952

2953
	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
Yu Zhao's avatar
Yu Zhao committed
2954 2955
		return;

2956
	bridge = dev->bus->self;
2957
	if (!bridge)
2958 2959
		return;

2960
	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
Yu Zhao's avatar
Yu Zhao committed
2961 2962 2963
	if (!(cap & PCI_EXP_DEVCAP2_ARI))
		return;

2964 2965 2966 2967 2968 2969 2970 2971 2972
	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
					 PCI_EXP_DEVCTL2_ARI);
		bridge->ari_enabled = 1;
	} else {
		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
					   PCI_EXP_DEVCTL2_ARI);
		bridge->ari_enabled = 0;
	}
Yu Zhao's avatar
Yu Zhao committed
2973 2974
}

Chris Wright's avatar
Chris Wright committed
2975 2976 2977 2978 2979 2980 2981 2982 2983 2984
static int pci_acs_enable;

/**
 * pci_request_acs - ask for ACS to be enabled if supported
 */
void pci_request_acs(void)
{
	pci_acs_enable = 1;
}

2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041
static const char *disable_acs_redir_param;

/**
 * pci_disable_acs_redir - disable ACS redirect capabilities
 * @dev: the PCI device
 *
 * For only devices specified in the disable_acs_redir parameter.
 */
static void pci_disable_acs_redir(struct pci_dev *dev)
{
	int ret = 0;
	const char *p;
	int pos;
	u16 ctrl;

	if (!disable_acs_redir_param)
		return;

	p = disable_acs_redir_param;
	while (*p) {
		ret = pci_dev_str_match(dev, p, &p);
		if (ret < 0) {
			pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
				     disable_acs_redir_param);

			break;
		} else if (ret == 1) {
			/* Found a match */
			break;
		}

		if (*p != ';' && *p != ',') {
			/* End of param or invalid format */
			break;
		}
		p++;
	}

	if (ret != 1)
		return;

	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
	if (!pos) {
		pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
		return;
	}

	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);

	/* P2P Request & Completion Redirect */
	ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);

	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);

	pci_info(dev, "disabled ACS redirect\n");
}

3042
/**
3043
 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
3044 3045
 * @dev: the PCI device
 */
3046
static void pci_std_enable_acs(struct pci_dev *dev)
3047 3048 3049 3050 3051 3052 3053
{
	int pos;
	u16 cap;
	u16 ctrl;

	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
	if (!pos)
3054
		return;
3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071

	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);

	/* Source Validation */
	ctrl |= (cap & PCI_ACS_SV);

	/* P2P Request Redirect */
	ctrl |= (cap & PCI_ACS_RR);

	/* P2P Completion Redirect */
	ctrl |= (cap & PCI_ACS_CR);

	/* Upstream Forwarding */
	ctrl |= (cap & PCI_ACS_UF);

	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3072 3073 3074 3075 3076 3077 3078 3079 3080
}

/**
 * pci_enable_acs - enable ACS if hardware support it
 * @dev: the PCI device
 */
void pci_enable_acs(struct pci_dev *dev)
{
	if (!pci_acs_enable)
3081
		goto disable_acs_redir;
3082

3083
	if (!pci_dev_specific_enable_acs(dev))
3084
		goto disable_acs_redir;
3085

3086
	pci_std_enable_acs(dev);
3087 3088 3089 3090 3091 3092 3093 3094 3095 3096

disable_acs_redir:
	/*
	 * Note: pci_disable_acs_redir() must be called even if ACS was not
	 * enabled by the kernel because it may have been enabled by
	 * platform firmware.  So if we are told to disable it, we should
	 * always disable it after setting the kernel's default
	 * preferences.
	 */
	pci_disable_acs_redir(dev);
3097 3098
}

3099 3100 3101
static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
{
	int pos;
3102
	u16 cap, ctrl;
3103 3104 3105 3106 3107

	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
	if (!pos)
		return false;

3108 3109 3110 3111 3112 3113 3114 3115
	/*
	 * Except for egress control, capabilities are either required
	 * or only required if controllable.  Features missing from the
	 * capability field can therefore be assumed as hard-wired enabled.
	 */
	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
	acs_flags &= (cap | PCI_ACS_EC);

3116 3117 3118 3119
	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
	return (ctrl & acs_flags) == acs_flags;
}

3120 3121 3122 3123 3124 3125 3126
/**
 * pci_acs_enabled - test ACS against required flags for a given device
 * @pdev: device to test
 * @acs_flags: required PCI ACS flags
 *
 * Return true if the device supports the provided flags.  Automatically
 * filters out flags that are not implemented on multifunction devices.
3127 3128 3129 3130 3131 3132 3133 3134
 *
 * Note that this interface checks the effective ACS capabilities of the
 * device rather than the actual capabilities.  For instance, most single
 * function endpoints are not required to support ACS because they have no
 * opportunity for peer-to-peer access.  We therefore return 'true'
 * regardless of whether the device exposes an ACS capability.  This makes
 * it much easier for callers of this function to ignore the actual type
 * or topology of the device when testing ACS support.
3135 3136 3137
 */
bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
{
3138
	int ret;
3139 3140 3141 3142 3143

	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
	if (ret >= 0)
		return ret > 0;

3144 3145 3146 3147 3148
	/*
	 * Conventional PCI and PCI-X devices never support ACS, either
	 * effectively or actually.  The shared bus topology implies that
	 * any device on the bus can receive or snoop DMA.
	 */
3149 3150 3151
	if (!pci_is_pcie(pdev))
		return false;

3152 3153 3154
	switch (pci_pcie_type(pdev)) {
	/*
	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3155
	 * but since their primary interface is PCI/X, we conservatively
3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178
	 * handle them as we would a non-PCIe device.
	 */
	case PCI_EXP_TYPE_PCIE_BRIDGE:
	/*
	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
	 * applicable... must never implement an ACS Extended Capability...".
	 * This seems arbitrary, but we take a conservative interpretation
	 * of this statement.
	 */
	case PCI_EXP_TYPE_PCI_BRIDGE:
	case PCI_EXP_TYPE_RC_EC:
		return false;
	/*
	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
	 * implement ACS in order to indicate their peer-to-peer capabilities,
	 * regardless of whether they are single- or multi-function devices.
	 */
	case PCI_EXP_TYPE_DOWNSTREAM:
	case PCI_EXP_TYPE_ROOT_PORT:
		return pci_acs_flags_enabled(pdev, acs_flags);
	/*
	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
	 * implemented by the remaining PCIe types to indicate peer-to-peer
3179
	 * capabilities, but only when they are part of a multifunction
3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190
	 * device.  The footnote for section 6.12 indicates the specific
	 * PCIe types included here.
	 */
	case PCI_EXP_TYPE_ENDPOINT:
	case PCI_EXP_TYPE_UPSTREAM:
	case PCI_EXP_TYPE_LEG_END:
	case PCI_EXP_TYPE_RC_END:
		if (!pdev->multifunction)
			break;

		return pci_acs_flags_enabled(pdev, acs_flags);
3191 3192
	}

3193
	/*
3194
	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3195 3196
	 * to single function devices with the exception of downstream ports.
	 */
3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228
	return true;
}

/**
 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
 * @start: starting downstream device
 * @end: ending upstream device or NULL to search to the root bus
 * @acs_flags: required flags
 *
 * Walk up a device tree from start to end testing PCI ACS support.  If
 * any step along the way does not support the required flags, return false.
 */
bool pci_acs_path_enabled(struct pci_dev *start,
			  struct pci_dev *end, u16 acs_flags)
{
	struct pci_dev *pdev, *parent = start;

	do {
		pdev = parent;

		if (!pci_acs_enabled(pdev, acs_flags))
			return false;

		if (pci_is_root_bus(pdev->bus))
			return (end == NULL);

		parent = pdev->bus->self;
	} while (pdev != end);

	return true;
}

3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329
/**
 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
 * @pdev: PCI device
 * @bar: BAR to find
 *
 * Helper to find the position of the ctrl register for a BAR.
 * Returns -ENOTSUPP if resizable BARs are not supported at all.
 * Returns -ENOENT if no ctrl register for the BAR could be found.
 */
static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
{
	unsigned int pos, nbars, i;
	u32 ctrl;

	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
	if (!pos)
		return -ENOTSUPP;

	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
	nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
		    PCI_REBAR_CTRL_NBAR_SHIFT;

	for (i = 0; i < nbars; i++, pos += 8) {
		int bar_idx;

		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
		if (bar_idx == bar)
			return pos;
	}

	return -ENOENT;
}

/**
 * pci_rebar_get_possible_sizes - get possible sizes for BAR
 * @pdev: PCI device
 * @bar: BAR to query
 *
 * Get the possible sizes of a resizable BAR as bitmask defined in the spec
 * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
 */
u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
{
	int pos;
	u32 cap;

	pos = pci_rebar_find_pos(pdev, bar);
	if (pos < 0)
		return 0;

	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
	return (cap & PCI_REBAR_CAP_SIZES) >> 4;
}

/**
 * pci_rebar_get_current_size - get the current size of a BAR
 * @pdev: PCI device
 * @bar: BAR to set size to
 *
 * Read the size of a BAR from the resizable BAR config.
 * Returns size if found or negative error code.
 */
int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
{
	int pos;
	u32 ctrl;

	pos = pci_rebar_find_pos(pdev, bar);
	if (pos < 0)
		return pos;

	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
	return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> 8;
}

/**
 * pci_rebar_set_size - set a new size for a BAR
 * @pdev: PCI device
 * @bar: BAR to set size to
 * @size: new size as defined in the spec (0=1MB, 19=512GB)
 *
 * Set the new size of a BAR as defined in the spec.
 * Returns zero if resizing was successful, error code otherwise.
 */
int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
{
	int pos;
	u32 ctrl;

	pos = pci_rebar_find_pos(pdev, bar);
	if (pos < 0)
		return pos;

	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
	ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
	ctrl |= size << 8;
	pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
	return 0;
}

3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404
/**
 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
 * @dev: the PCI device
 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
 *	PCI_EXP_DEVCAP2_ATOMIC_COMP32
 *	PCI_EXP_DEVCAP2_ATOMIC_COMP64
 *	PCI_EXP_DEVCAP2_ATOMIC_COMP128
 *
 * Return 0 if all upstream bridges support AtomicOp routing, egress
 * blocking is disabled on all upstream ports, and the root port supports
 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
 * AtomicOp completion), or negative otherwise.
 */
int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
{
	struct pci_bus *bus = dev->bus;
	struct pci_dev *bridge;
	u32 cap, ctl2;

	if (!pci_is_pcie(dev))
		return -EINVAL;

	/*
	 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
	 * AtomicOp requesters.  For now, we only support endpoints as
	 * requesters and root ports as completers.  No endpoints as
	 * completers, and no peer-to-peer.
	 */

	switch (pci_pcie_type(dev)) {
	case PCI_EXP_TYPE_ENDPOINT:
	case PCI_EXP_TYPE_LEG_END:
	case PCI_EXP_TYPE_RC_END:
		break;
	default:
		return -EINVAL;
	}

	while (bus->parent) {
		bridge = bus->self;

		pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);

		switch (pci_pcie_type(bridge)) {
		/* Ensure switch ports support AtomicOp routing */
		case PCI_EXP_TYPE_UPSTREAM:
		case PCI_EXP_TYPE_DOWNSTREAM:
			if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
				return -EINVAL;
			break;

		/* Ensure root port supports all the sizes we care about */
		case PCI_EXP_TYPE_ROOT_PORT:
			if ((cap & cap_mask) != cap_mask)
				return -EINVAL;
			break;
		}

		/* Ensure upstream ports don't block AtomicOps on egress */
		if (!bridge->has_secondary_link) {
			pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
						   &ctl2);
			if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
				return -EINVAL;
		}

		bus = bus->parent;
	}

	pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
				 PCI_EXP_DEVCTL2_ATOMIC_REQ);
	return 0;
}
EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);

3405 3406 3407
/**
 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
 * @dev: the PCI device
3408
 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
3409 3410 3411
 *
 * Perform INTx swizzling for a device behind one level of bridge.  This is
 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
3412 3413 3414
 * behind bridges on add-in cards.  For devices with ARI enabled, the slot
 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
 * the PCI Express Base Specification, Revision 2.1)
3415
 */
3416
u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3417
{
3418 3419 3420 3421 3422 3423 3424 3425
	int slot;

	if (pci_ari_enabled(dev->bus))
		slot = 0;
	else
		slot = PCI_SLOT(dev->devfn);

	return (((pin - 1) + slot) % 4) + 1;
3426 3427
}

Ryan Desfosses's avatar
Ryan Desfosses committed
3428
int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
Linus Torvalds's avatar
Linus Torvalds committed
3429 3430 3431
{
	u8 pin;

3432
	pin = dev->pin;
Linus Torvalds's avatar
Linus Torvalds committed
3433 3434
	if (!pin)
		return -1;
3435

3436
	while (!pci_is_root_bus(dev->bus)) {
3437
		pin = pci_swizzle_interrupt_pin(dev, pin);
Linus Torvalds's avatar
Linus Torvalds committed
3438 3439 3440 3441 3442 3443
		dev = dev->bus->self;
	}
	*bridge = dev;
	return pin;
}

3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455
/**
 * pci_common_swizzle - swizzle INTx all the way to root bridge
 * @dev: the PCI device
 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
 *
 * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
 * bridges all the way up to a PCI root bus.
 */
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
{
	u8 pin = *pinp;

3456
	while (!pci_is_root_bus(dev->bus)) {
3457 3458 3459 3460 3461 3462
		pin = pci_swizzle_interrupt_pin(dev, pin);
		dev = dev->bus->self;
	}
	*pinp = pin;
	return PCI_SLOT(dev->devfn);
}
3463
EXPORT_SYMBOL_GPL(pci_common_swizzle);
3464

Linus Torvalds's avatar
Linus Torvalds committed
3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475
/**
 *	pci_release_region - Release a PCI bar
 *	@pdev: PCI device whose resources were previously reserved by pci_request_region
 *	@bar: BAR to release
 *
 *	Releases the PCI I/O and memory resources previously reserved by a
 *	successful call to pci_request_region.  Call this function only
 *	after all use of the PCI regions has ceased.
 */
void pci_release_region(struct pci_dev *pdev, int bar)
{
3476 3477
	struct pci_devres *dr;

Linus Torvalds's avatar
Linus Torvalds committed
3478 3479 3480 3481 3482 3483 3484 3485
	if (pci_resource_len(pdev, bar) == 0)
		return;
	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
		release_region(pci_resource_start(pdev, bar),
				pci_resource_len(pdev, bar));
	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
		release_mem_region(pci_resource_start(pdev, bar),
				pci_resource_len(pdev, bar));
3486 3487 3488 3489

	dr = find_pci_dr(pdev);
	if (dr)
		dr->region_mask &= ~(1 << bar);
Linus Torvalds's avatar
Linus Torvalds committed
3490
}
3491
EXPORT_SYMBOL(pci_release_region);
Linus Torvalds's avatar
Linus Torvalds committed
3492 3493

/**
3494
 *	__pci_request_region - Reserved PCI I/O and memory resource
Linus Torvalds's avatar
Linus Torvalds committed
3495 3496 3497
 *	@pdev: PCI device whose resources are to be reserved
 *	@bar: BAR to be reserved
 *	@res_name: Name to be associated with resource.
3498
 *	@exclusive: whether the region access is exclusive or not
Linus Torvalds's avatar
Linus Torvalds committed
3499 3500 3501 3502 3503 3504
 *
 *	Mark the PCI region associated with PCI device @pdev BR @bar as
 *	being reserved by owner @res_name.  Do not access any
 *	address inside the PCI regions unless this call returns
 *	successfully.
 *
3505 3506
 *	If @exclusive is set, then the region is marked so that userspace
 *	is explicitly not allowed to map the resource via /dev/mem or
3507
 *	sysfs MMIO access.
3508
 *
Linus Torvalds's avatar
Linus Torvalds committed
3509 3510 3511
 *	Returns 0 on success, or %EBUSY on error.  A warning
 *	message is also printed on failure.
 */
Ryan Desfosses's avatar
Ryan Desfosses committed
3512 3513
static int __pci_request_region(struct pci_dev *pdev, int bar,
				const char *res_name, int exclusive)
Linus Torvalds's avatar
Linus Torvalds committed
3514
{
3515 3516
	struct pci_devres *dr;

Linus Torvalds's avatar
Linus Torvalds committed
3517 3518
	if (pci_resource_len(pdev, bar) == 0)
		return 0;
3519

Linus Torvalds's avatar
Linus Torvalds committed
3520 3521 3522 3523
	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
		if (!request_region(pci_resource_start(pdev, bar),
			    pci_resource_len(pdev, bar), res_name))
			goto err_out;
Ryan Desfosses's avatar
Ryan Desfosses committed
3524
	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3525 3526 3527
		if (!__request_mem_region(pci_resource_start(pdev, bar),
					pci_resource_len(pdev, bar), res_name,
					exclusive))
Linus Torvalds's avatar
Linus Torvalds committed
3528 3529
			goto err_out;
	}
3530 3531 3532 3533 3534

	dr = find_pci_dr(pdev);
	if (dr)
		dr->region_mask |= 1 << bar;

Linus Torvalds's avatar
Linus Torvalds committed
3535 3536 3537
	return 0;

err_out:
3538
	pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3539
		 &pdev->resource[bar]);
Linus Torvalds's avatar
Linus Torvalds committed
3540 3541 3542
	return -EBUSY;
}

3543
/**
3544
 *	pci_request_region - Reserve PCI I/O and memory resource
3545 3546
 *	@pdev: PCI device whose resources are to be reserved
 *	@bar: BAR to be reserved
3547
 *	@res_name: Name to be associated with resource
3548
 *
3549
 *	Mark the PCI region associated with PCI device @pdev BAR @bar as
3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560
 *	being reserved by owner @res_name.  Do not access any
 *	address inside the PCI regions unless this call returns
 *	successfully.
 *
 *	Returns 0 on success, or %EBUSY on error.  A warning
 *	message is also printed on failure.
 */
int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
{
	return __pci_request_region(pdev, bar, res_name, 0);
}
3561
EXPORT_SYMBOL(pci_request_region);
3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578

/**
 *	pci_request_region_exclusive - Reserved PCI I/O and memory resource
 *	@pdev: PCI device whose resources are to be reserved
 *	@bar: BAR to be reserved
 *	@res_name: Name to be associated with resource.
 *
 *	Mark the PCI region associated with PCI device @pdev BR @bar as
 *	being reserved by owner @res_name.  Do not access any
 *	address inside the PCI regions unless this call returns
 *	successfully.
 *
 *	Returns 0 on success, or %EBUSY on error.  A warning
 *	message is also printed on failure.
 *
 *	The key difference that _exclusive makes it that userspace is
 *	explicitly not allowed to map the resource via /dev/mem or
3579
 *	sysfs.
3580
 */
Ryan Desfosses's avatar
Ryan Desfosses committed
3581 3582
int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
				 const char *res_name)
3583 3584 3585
{
	return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
}
3586 3587
EXPORT_SYMBOL(pci_request_region_exclusive);

3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603
/**
 * pci_release_selected_regions - Release selected PCI I/O and memory resources
 * @pdev: PCI device whose resources were previously reserved
 * @bars: Bitmask of BARs to be released
 *
 * Release selected PCI I/O and memory resources previously reserved.
 * Call this function only after all use of the PCI regions has ceased.
 */
void pci_release_selected_regions(struct pci_dev *pdev, int bars)
{
	int i;

	for (i = 0; i < 6; i++)
		if (bars & (1 << i))
			pci_release_region(pdev, i);
}
3604
EXPORT_SYMBOL(pci_release_selected_regions);
3605

3606
static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
Ryan Desfosses's avatar
Ryan Desfosses committed
3607
					  const char *res_name, int excl)
3608 3609 3610 3611 3612
{
	int i;

	for (i = 0; i < 6; i++)
		if (bars & (1 << i))
3613
			if (__pci_request_region(pdev, i, res_name, excl))
3614 3615 3616 3617
				goto err_out;
	return 0;

err_out:
Ryan Desfosses's avatar
Ryan Desfosses committed
3618
	while (--i >= 0)
3619 3620 3621 3622 3623
		if (bars & (1 << i))
			pci_release_region(pdev, i);

	return -EBUSY;
}
Linus Torvalds's avatar
Linus Torvalds committed
3624

3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636

/**
 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
 * @pdev: PCI device whose resources are to be reserved
 * @bars: Bitmask of BARs to be requested
 * @res_name: Name to be associated with resource
 */
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
				 const char *res_name)
{
	return __pci_request_selected_regions(pdev, bars, res_name, 0);
}
3637
EXPORT_SYMBOL(pci_request_selected_regions);
3638

Ryan Desfosses's avatar
Ryan Desfosses committed
3639 3640
int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
					   const char *res_name)
3641 3642 3643 3644
{
	return __pci_request_selected_regions(pdev, bars, res_name,
			IORESOURCE_EXCLUSIVE);
}
3645
EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3646

Linus Torvalds's avatar
Linus Torvalds committed
3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657
/**
 *	pci_release_regions - Release reserved PCI I/O and memory resources
 *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
 *
 *	Releases all PCI I/O and memory resources previously reserved by a
 *	successful call to pci_request_regions.  Call this function only
 *	after all use of the PCI regions has ceased.
 */

void pci_release_regions(struct pci_dev *pdev)
{
3658
	pci_release_selected_regions(pdev, (1 << 6) - 1);
Linus Torvalds's avatar
Linus Torvalds committed
3659
}
3660
EXPORT_SYMBOL(pci_release_regions);
Linus Torvalds's avatar
Linus Torvalds committed
3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674

/**
 *	pci_request_regions - Reserved PCI I/O and memory resources
 *	@pdev: PCI device whose resources are to be reserved
 *	@res_name: Name to be associated with resource.
 *
 *	Mark all PCI regions associated with PCI device @pdev as
 *	being reserved by owner @res_name.  Do not access any
 *	address inside the PCI regions unless this call returns
 *	successfully.
 *
 *	Returns 0 on success, or %EBUSY on error.  A warning
 *	message is also printed on failure.
 */
3675
int pci_request_regions(struct pci_dev *pdev, const char *res_name)
Linus Torvalds's avatar
Linus Torvalds committed
3676
{
3677
	return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
Linus Torvalds's avatar
Linus Torvalds committed
3678
}
3679
EXPORT_SYMBOL(pci_request_regions);
Linus Torvalds's avatar
Linus Torvalds committed
3680

3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691
/**
 *	pci_request_regions_exclusive - Reserved PCI I/O and memory resources
 *	@pdev: PCI device whose resources are to be reserved
 *	@res_name: Name to be associated with resource.
 *
 *	Mark all PCI regions associated with PCI device @pdev as
 *	being reserved by owner @res_name.  Do not access any
 *	address inside the PCI regions unless this call returns
 *	successfully.
 *
 *	pci_request_regions_exclusive() will mark the region so that
3692
 *	/dev/mem and the sysfs MMIO access will not be allowed.
3693 3694 3695 3696 3697 3698 3699 3700 3701
 *
 *	Returns 0 on success, or %EBUSY on error.  A warning
 *	message is also printed on failure.
 */
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
{
	return pci_request_selected_regions_exclusive(pdev,
					((1 << 6) - 1), res_name);
}
3702
EXPORT_SYMBOL(pci_request_regions_exclusive);
3703

3704 3705 3706 3707
/*
 * Record the PCI IO range (expressed as CPU physical address + size).
 * Return a negative value if an error has occured, zero otherwise
 */
3708 3709
int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
			resource_size_t	size)
3710
{
3711
	int ret = 0;
3712
#ifdef PCI_IOBASE
3713
	struct logic_pio_hwaddr *range;
3714

3715 3716
	if (!size || addr + size < addr)
		return -EINVAL;
3717 3718

	range = kzalloc(sizeof(*range), GFP_ATOMIC);
3719 3720
	if (!range)
		return -ENOMEM;
3721

3722
	range->fwnode = fwnode;
3723
	range->size = size;
3724 3725
	range->hw_start = addr;
	range->flags = LOGIC_PIO_CPU_MMIO;
3726

3727 3728 3729
	ret = logic_pio_register_range(range);
	if (ret)
		kfree(range);
3730 3731
#endif

3732
	return ret;
3733 3734 3735 3736 3737 3738 3739
}

phys_addr_t pci_pio_to_address(unsigned long pio)
{
	phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;

#ifdef PCI_IOBASE
3740
	if (pio >= MMIO_UPPER_LIMIT)
3741 3742
		return address;

3743
	address = logic_pio_to_hwaddr(pio);
3744 3745 3746 3747 3748 3749 3750 3751
#endif

	return address;
}

unsigned long __weak pci_address_to_pio(phys_addr_t address)
{
#ifdef PCI_IOBASE
3752
	return logic_pio_trans_cpuaddr(address);
3753 3754 3755 3756 3757 3758 3759 3760
#else
	if (address > IO_SPACE_LIMIT)
		return (unsigned long)-1;

	return (unsigned long) address;
#endif
}

3761 3762 3763 3764 3765 3766 3767 3768 3769 3770
/**
 *	pci_remap_iospace - Remap the memory mapped I/O space
 *	@res: Resource describing the I/O space
 *	@phys_addr: physical address of range to be mapped
 *
 *	Remap the memory mapped I/O space described by the @res
 *	and the CPU physical address @phys_addr into virtual address space.
 *	Only architectures that have memory mapped IO functions defined
 *	(and the PCI_IOBASE value defined) should call this function.
 */
3771
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790
{
#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;

	if (!(res->flags & IORESOURCE_IO))
		return -EINVAL;

	if (res->end > IO_SPACE_LIMIT)
		return -EINVAL;

	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
				  pgprot_device(PAGE_KERNEL));
#else
	/* this architecture does not have memory mapped I/O space,
	   so this function should never be called */
	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
	return -ENODEV;
#endif
}
3791
EXPORT_SYMBOL(pci_remap_iospace);
3792

3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808
/**
 *	pci_unmap_iospace - Unmap the memory mapped I/O space
 *	@res: resource to be unmapped
 *
 *	Unmap the CPU virtual address @res from virtual address space.
 *	Only architectures that have memory mapped IO functions defined
 *	(and the PCI_IOBASE value defined) should call this function.
 */
void pci_unmap_iospace(struct resource *res)
{
#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;

	unmap_kernel_range(vaddr, resource_size(res));
#endif
}
3809
EXPORT_SYMBOL(pci_unmap_iospace);
3810

3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852
/**
 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
 * @dev: Generic device to remap IO address for
 * @offset: Resource address to map
 * @size: Size of map
 *
 * Managed pci_remap_cfgspace().  Map is automatically unmapped on driver
 * detach.
 */
void __iomem *devm_pci_remap_cfgspace(struct device *dev,
				      resource_size_t offset,
				      resource_size_t size)
{
	void __iomem **ptr, *addr;

	ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return NULL;

	addr = pci_remap_cfgspace(offset, size);
	if (addr) {
		*ptr = addr;
		devres_add(dev, ptr);
	} else
		devres_free(ptr);

	return addr;
}
EXPORT_SYMBOL(devm_pci_remap_cfgspace);

/**
 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
 * @dev: generic device to handle the resource for
 * @res: configuration space resource to be handled
 *
 * Checks that a resource is a valid memory region, requests the memory
 * region and ioremaps with pci_remap_cfgspace() API that ensures the
 * proper PCI configuration space memory attributes are guaranteed.
 *
 * All operations are managed and will be undone on driver detach.
 *
 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
3853
 * on failure. Usage example::
3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892
 *
 *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 *	base = devm_pci_remap_cfg_resource(&pdev->dev, res);
 *	if (IS_ERR(base))
 *		return PTR_ERR(base);
 */
void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
					  struct resource *res)
{
	resource_size_t size;
	const char *name;
	void __iomem *dest_ptr;

	BUG_ON(!dev);

	if (!res || resource_type(res) != IORESOURCE_MEM) {
		dev_err(dev, "invalid resource\n");
		return IOMEM_ERR_PTR(-EINVAL);
	}

	size = resource_size(res);
	name = res->name ?: dev_name(dev);

	if (!devm_request_mem_region(dev, res->start, size, name)) {
		dev_err(dev, "can't request region for resource %pR\n", res);
		return IOMEM_ERR_PTR(-EBUSY);
	}

	dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
	if (!dest_ptr) {
		dev_err(dev, "ioremap failed for resource %pR\n", res);
		devm_release_mem_region(dev, res->start, size);
		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
	}

	return dest_ptr;
}
EXPORT_SYMBOL(devm_pci_remap_cfg_resource);

3893 3894 3895 3896 3897 3898 3899 3900 3901 3902
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
	u16 old_cmd, cmd;

	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
	if (enable)
		cmd = old_cmd | PCI_COMMAND_MASTER;
	else
		cmd = old_cmd & ~PCI_COMMAND_MASTER;
	if (cmd != old_cmd) {
3903
		pci_dbg(dev, "%s bus mastering\n",
3904 3905 3906 3907 3908
			enable ? "enabling" : "disabling");
		pci_write_config_word(dev, PCI_COMMAND, cmd);
	}
	dev->is_busmaster = enable;
}
3909

3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921
/**
 * pcibios_setup - process "pci=" kernel boot arguments
 * @str: string used to pass in "pci=" kernel boot arguments
 *
 * Process kernel boot arguments.  This is the default implementation.
 * Architecture specific implementations can override this as necessary.
 */
char * __weak __init pcibios_setup(char *str)
{
	return str;
}

3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933
/**
 * pcibios_set_master - enable PCI bus-mastering for device dev
 * @dev: the PCI device to enable
 *
 * Enables PCI bus-mastering for the device.  This is the default
 * implementation.  Architecture specific implementations can override
 * this if necessary.
 */
void __weak pcibios_set_master(struct pci_dev *dev)
{
	u8 lat;

3934 3935 3936 3937
	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
	if (pci_is_pcie(dev))
		return;

3938 3939 3940 3941 3942 3943 3944
	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
	if (lat < 16)
		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
	else if (lat > pcibios_max_latency)
		lat = pcibios_max_latency;
	else
		return;
3945

3946 3947 3948
	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}

Linus Torvalds's avatar
Linus Torvalds committed
3949 3950 3951 3952 3953 3954 3955
/**
 * pci_set_master - enables bus-mastering for device dev
 * @dev: the PCI device to enable
 *
 * Enables bus-mastering on the device and calls pcibios_set_master()
 * to do the needed arch specific settings.
 */
3956
void pci_set_master(struct pci_dev *dev)
Linus Torvalds's avatar
Linus Torvalds committed
3957
{
3958
	__pci_set_master(dev, true);
Linus Torvalds's avatar
Linus Torvalds committed
3959 3960
	pcibios_set_master(dev);
}
3961
EXPORT_SYMBOL(pci_set_master);
Linus Torvalds's avatar
Linus Torvalds committed
3962

3963 3964 3965 3966 3967 3968 3969 3970
/**
 * pci_clear_master - disables bus-mastering for device dev
 * @dev: the PCI device to disable
 */
void pci_clear_master(struct pci_dev *dev)
{
	__pci_set_master(dev, false);
}
3971
EXPORT_SYMBOL(pci_clear_master);
3972

Linus Torvalds's avatar
Linus Torvalds committed
3973
/**
3974 3975
 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
 * @dev: the PCI device for which MWI is to be enabled
Linus Torvalds's avatar
Linus Torvalds committed
3976
 *
3977 3978
 * Helper function for pci_set_mwi.
 * Originally copied from drivers/net/acenic.c.
Linus Torvalds's avatar
Linus Torvalds committed
3979 3980 3981 3982
 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
 *
 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
 */
Tejun Heo's avatar
Tejun Heo committed
3983
int pci_set_cacheline_size(struct pci_dev *dev)
Linus Torvalds's avatar
Linus Torvalds committed
3984 3985 3986 3987
{
	u8 cacheline_size;

	if (!pci_cache_line_size)
Tejun Heo's avatar
Tejun Heo committed
3988
		return -EINVAL;
Linus Torvalds's avatar
Linus Torvalds committed
3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003

	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
	   equal to or multiple of the right value. */
	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
	if (cacheline_size >= pci_cache_line_size &&
	    (cacheline_size % pci_cache_line_size) == 0)
		return 0;

	/* Write the correct value. */
	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
	/* Read it back. */
	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
	if (cacheline_size == pci_cache_line_size)
		return 0;

4004
	pci_printk(KERN_DEBUG, dev, "cache line size of %d is not supported\n",
4005
		   pci_cache_line_size << 2);
Linus Torvalds's avatar
Linus Torvalds committed
4006 4007 4008

	return -EINVAL;
}
Tejun Heo's avatar
Tejun Heo committed
4009 4010
EXPORT_SYMBOL_GPL(pci_set_cacheline_size);

Linus Torvalds's avatar
Linus Torvalds committed
4011 4012 4013 4014
/**
 * pci_set_mwi - enables memory-write-invalidate PCI transaction
 * @dev: the PCI device for which MWI is enabled
 *
Randy Dunlap's avatar
Randy Dunlap committed
4015
 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
Linus Torvalds's avatar
Linus Torvalds committed
4016 4017 4018
 *
 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
 */
Ryan Desfosses's avatar
Ryan Desfosses committed
4019
int pci_set_mwi(struct pci_dev *dev)
Linus Torvalds's avatar
Linus Torvalds committed
4020
{
4021 4022 4023
#ifdef PCI_DISABLE_MWI
	return 0;
#else
Linus Torvalds's avatar
Linus Torvalds committed
4024 4025 4026
	int rc;
	u16 cmd;

4027
	rc = pci_set_cacheline_size(dev);
Linus Torvalds's avatar
Linus Torvalds committed
4028 4029 4030 4031
	if (rc)
		return rc;

	pci_read_config_word(dev, PCI_COMMAND, &cmd);
Ryan Desfosses's avatar
Ryan Desfosses committed
4032
	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4033
		pci_dbg(dev, "enabling Mem-Wr-Inval\n");
Linus Torvalds's avatar
Linus Torvalds committed
4034 4035 4036 4037
		cmd |= PCI_COMMAND_INVALIDATE;
		pci_write_config_word(dev, PCI_COMMAND, cmd);
	}
	return 0;
4038
#endif
Linus Torvalds's avatar
Linus Torvalds committed
4039
}
4040
EXPORT_SYMBOL(pci_set_mwi);
Linus Torvalds's avatar
Linus Torvalds committed
4041

4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062
/**
 * pcim_set_mwi - a device-managed pci_set_mwi()
 * @dev: the PCI device for which MWI is enabled
 *
 * Managed pci_set_mwi().
 *
 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
 */
int pcim_set_mwi(struct pci_dev *dev)
{
	struct pci_devres *dr;

	dr = find_pci_dr(dev);
	if (!dr)
		return -ENOMEM;

	dr->mwi = 1;
	return pci_set_mwi(dev);
}
EXPORT_SYMBOL(pcim_set_mwi);

Randy Dunlap's avatar
Randy Dunlap committed
4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073
/**
 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
 * @dev: the PCI device for which MWI is enabled
 *
 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
 * Callers are not required to check the return value.
 *
 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
 */
int pci_try_set_mwi(struct pci_dev *dev)
{
4074 4075 4076 4077 4078
#ifdef PCI_DISABLE_MWI
	return 0;
#else
	return pci_set_mwi(dev);
#endif
Randy Dunlap's avatar
Randy Dunlap committed
4079
}
4080
EXPORT_SYMBOL(pci_try_set_mwi);
Randy Dunlap's avatar
Randy Dunlap committed
4081

Linus Torvalds's avatar
Linus Torvalds committed
4082 4083 4084 4085 4086 4087
/**
 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
 * @dev: the PCI device to disable
 *
 * Disables PCI Memory-Write-Invalidate transaction on the device
 */
Ryan Desfosses's avatar
Ryan Desfosses committed
4088
void pci_clear_mwi(struct pci_dev *dev)
Linus Torvalds's avatar
Linus Torvalds committed
4089
{
4090
#ifndef PCI_DISABLE_MWI
Linus Torvalds's avatar
Linus Torvalds committed
4091 4092 4093 4094 4095 4096 4097
	u16 cmd;

	pci_read_config_word(dev, PCI_COMMAND, &cmd);
	if (cmd & PCI_COMMAND_INVALIDATE) {
		cmd &= ~PCI_COMMAND_INVALIDATE;
		pci_write_config_word(dev, PCI_COMMAND, cmd);
	}
4098
#endif
Linus Torvalds's avatar
Linus Torvalds committed
4099
}
4100
EXPORT_SYMBOL(pci_clear_mwi);
Linus Torvalds's avatar
Linus Torvalds committed
4101

4102 4103
/**
 * pci_intx - enables/disables PCI INTx for device dev
Randy Dunlap's avatar
Randy Dunlap committed
4104 4105
 * @pdev: the PCI device to operate on
 * @enable: boolean: whether to enable or disable PCI INTx
4106 4107 4108
 *
 * Enables/disables PCI INTx for device dev
 */
Ryan Desfosses's avatar
Ryan Desfosses committed
4109
void pci_intx(struct pci_dev *pdev, int enable)
4110 4111 4112 4113 4114
{
	u16 pci_command, new;

	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);

Ryan Desfosses's avatar
Ryan Desfosses committed
4115
	if (enable)
4116
		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
Ryan Desfosses's avatar
Ryan Desfosses committed
4117
	else
4118 4119 4120
		new = pci_command | PCI_COMMAND_INTX_DISABLE;

	if (new != pci_command) {
4121 4122
		struct pci_devres *dr;

4123
		pci_write_config_word(pdev, PCI_COMMAND, new);
4124 4125 4126 4127 4128 4129

		dr = find_pci_dr(pdev);
		if (dr && !dr->restore_intx) {
			dr->restore_intx = 1;
			dr->orig_intx = !enable;
		}
4130 4131
	}
}
4132
EXPORT_SYMBOL_GPL(pci_intx);
4133

4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180
static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
{
	struct pci_bus *bus = dev->bus;
	bool mask_updated = true;
	u32 cmd_status_dword;
	u16 origcmd, newcmd;
	unsigned long flags;
	bool irq_pending;

	/*
	 * We do a single dword read to retrieve both command and status.
	 * Document assumptions that make this possible.
	 */
	BUILD_BUG_ON(PCI_COMMAND % 4);
	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);

	raw_spin_lock_irqsave(&pci_lock, flags);

	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);

	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;

	/*
	 * Check interrupt status register to see whether our device
	 * triggered the interrupt (when masking) or the next IRQ is
	 * already pending (when unmasking).
	 */
	if (mask != irq_pending) {
		mask_updated = false;
		goto done;
	}

	origcmd = cmd_status_dword;
	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
	if (mask)
		newcmd |= PCI_COMMAND_INTX_DISABLE;
	if (newcmd != origcmd)
		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);

done:
	raw_spin_unlock_irqrestore(&pci_lock, flags);

	return mask_updated;
}

/**
 * pci_check_and_mask_intx - mask INTx on pending interrupt
4181
 * @dev: the PCI device to operate on
4182 4183
 *
 * Check if the device dev has its INTx line asserted, mask it and
4184
 * return true in that case. False is returned if no interrupt was
4185 4186 4187 4188 4189 4190 4191 4192 4193
 * pending.
 */
bool pci_check_and_mask_intx(struct pci_dev *dev)
{
	return pci_check_and_set_intx_mask(dev, true);
}
EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);

/**
4194
 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
4195
 * @dev: the PCI device to operate on
4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206
 *
 * Check if the device dev has its INTx line asserted, unmask it if not
 * and return true. False is returned and the mask remains active if
 * there was still an interrupt pending.
 */
bool pci_check_and_unmask_intx(struct pci_dev *dev)
{
	return pci_check_and_set_intx_mask(dev, false);
}
EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);

4207 4208 4209 4210 4211 4212 4213
/**
 * pci_wait_for_pending_transaction - waits for pending transaction
 * @dev: the PCI device to operate on
 *
 * Return 0 if transaction is pending 1 otherwise.
 */
int pci_wait_for_pending_transaction(struct pci_dev *dev)
4214
{
4215 4216
	if (!pci_is_pcie(dev))
		return 1;
Yu Zhao's avatar
Yu Zhao committed
4217

4218 4219
	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
				    PCI_EXP_DEVSTA_TRPND);
4220 4221 4222
}
EXPORT_SYMBOL(pci_wait_for_pending_transaction);

4223
static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
4224
{
4225
	int delay = 1;
4226 4227
	u32 id;

4228
	/*
4229
	 * After reset, the device should not silently discard config
4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242
	 * requests, but it may still indicate that it needs more time by
	 * responding to them with CRS completions.  The Root Port will
	 * generally synthesize ~0 data to complete the read (except when
	 * CRS SV is enabled and the read was for the Vendor ID; in that
	 * case it synthesizes 0x0001 data).
	 *
	 * Wait for the device to return a non-CRS completion.  Read the
	 * Command register instead of Vendor ID so we don't have to
	 * contend with the CRS SV value.
	 */
	pci_read_config_dword(dev, PCI_COMMAND, &id);
	while (id == ~0) {
		if (delay > timeout) {
4243 4244
			pci_warn(dev, "not ready %dms after %s; giving up\n",
				 delay - 1, reset_type);
4245
			return -ENOTTY;
4246 4247 4248
		}

		if (delay > 1000)
4249 4250
			pci_info(dev, "not ready %dms after %s; waiting\n",
				 delay - 1, reset_type);
4251 4252 4253

		msleep(delay);
		delay *= 2;
4254
		pci_read_config_dword(dev, PCI_COMMAND, &id);
4255
	}
4256

4257
	if (delay > 1000)
4258 4259
		pci_info(dev, "ready %dms after %s\n", delay - 1,
			 reset_type);
4260 4261

	return 0;
4262 4263
}

Christoph Hellwig's avatar
Christoph Hellwig committed
4264 4265 4266 4267 4268 4269 4270 4271
/**
 * pcie_has_flr - check if a device supports function level resets
 * @dev:	device to check
 *
 * Returns true if the device advertises support for PCIe function level
 * resets.
 */
static bool pcie_has_flr(struct pci_dev *dev)
4272 4273 4274
{
	u32 cap;

4275
	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
Christoph Hellwig's avatar
Christoph Hellwig committed
4276
		return false;
4277

Christoph Hellwig's avatar
Christoph Hellwig committed
4278 4279 4280
	pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
	return cap & PCI_EXP_DEVCAP_FLR;
}
4281

Christoph Hellwig's avatar
Christoph Hellwig committed
4282 4283 4284 4285 4286 4287 4288 4289
/**
 * pcie_flr - initiate a PCIe function level reset
 * @dev:	device to reset
 *
 * Initiate a function level reset on @dev.  The caller should ensure the
 * device supports FLR before calling this function, e.g. by using the
 * pcie_has_flr() helper.
 */
4290
int pcie_flr(struct pci_dev *dev)
Christoph Hellwig's avatar
Christoph Hellwig committed
4291
{
4292
	if (!pci_wait_for_pending_transaction(dev))
4293
		pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
Yu Zhao's avatar
Yu Zhao committed
4294

4295
	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4296 4297 4298 4299 4300 4301 4302 4303 4304

	/*
	 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
	 * 100ms, but may silently discard requests while the FLR is in
	 * progress.  Wait 100ms before trying to access the device.
	 */
	msleep(100);

	return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4305
}
Christoph Hellwig's avatar
Christoph Hellwig committed
4306
EXPORT_SYMBOL_GPL(pcie_flr);
4307

Yu Zhao's avatar
Yu Zhao committed
4308
static int pci_af_flr(struct pci_dev *dev, int probe)
4309
{
Yu Zhao's avatar
Yu Zhao committed
4310
	int pos;
4311 4312
	u8 cap;

Yu Zhao's avatar
Yu Zhao committed
4313 4314
	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
	if (!pos)
4315
		return -ENOTTY;
Yu Zhao's avatar
Yu Zhao committed
4316

4317 4318 4319
	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
		return -ENOTTY;

Yu Zhao's avatar
Yu Zhao committed
4320
	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4321 4322 4323 4324 4325 4326
	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
		return -ENOTTY;

	if (probe)
		return 0;

4327 4328 4329 4330 4331
	/*
	 * Wait for Transaction Pending bit to clear.  A word-aligned test
	 * is used, so we use the conrol offset rather than status and shift
	 * the test bit to match.
	 */
4332
	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4333
				 PCI_AF_STATUS_TP << 8))
4334
		pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4335

Yu Zhao's avatar
Yu Zhao committed
4336
	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4337 4338 4339 4340 4341 4342 4343 4344 4345 4346

	/*
	 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
	 * updated 27 July 2006; a device must complete an FLR within
	 * 100ms, but may silently discard requests while the FLR is in
	 * progress.  Wait 100ms before trying to access the device.
	 */
	msleep(100);

	return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4347 4348
}

4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360
/**
 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
 * @dev: Device to reset.
 * @probe: If set, only check if the device can be reset this way.
 *
 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
 * unset, it will be reinitialized internally when going from PCI_D3hot to
 * PCI_D0.  If that's the case and the device is not in a low-power state
 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
 *
 * NOTE: This causes the caller to sleep for twice the device power transition
 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4361
 * by default (i.e. unless the @dev's d3_delay field has a different value).
4362 4363
 * Moreover, only devices in D0 can be reset by this function.
 */
4364
static int pci_pm_reset(struct pci_dev *dev, int probe)
4365
{
4366 4367
	u16 csr;

4368
	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4369
		return -ENOTTY;
4370

4371 4372 4373
	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
		return -ENOTTY;
4374

4375 4376
	if (probe)
		return 0;
4377

4378 4379 4380 4381 4382 4383
	if (dev->current_state != PCI_D0)
		return -EINVAL;

	csr &= ~PCI_PM_CTRL_STATE_MASK;
	csr |= PCI_D3hot;
	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4384
	pci_dev_d3_sleep(dev);
4385 4386 4387 4388

	csr &= ~PCI_PM_CTRL_STATE_MASK;
	csr |= PCI_D0;
	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4389
	pci_dev_d3_sleep(dev);
4390

4391
	return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
4392
}
4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421
/**
 * pcie_wait_for_link - Wait until link is active or inactive
 * @pdev: Bridge device
 * @active: waiting for active or inactive?
 *
 * Use this to wait till link becomes active or inactive.
 */
bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
{
	int timeout = 1000;
	bool ret;
	u16 lnk_status;

	for (;;) {
		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
		ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
		if (ret == active)
			return true;
		if (timeout <= 0)
			break;
		msleep(10);
		timeout -= 10;
	}

	pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
		 active ? "set" : "cleared");

	return false;
}
4422

4423
void pci_reset_secondary_bus(struct pci_dev *dev)
Yu Zhao's avatar
Yu Zhao committed
4424 4425
{
	u16 ctrl;
4426 4427 4428 4429

	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
Bjorn Helgaas's avatar
Bjorn Helgaas committed
4430

4431 4432
	/*
	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
4433
	 * this to 2ms to ensure that we meet the minimum requirement.
4434 4435
	 */
	msleep(2);
4436 4437 4438

	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4439 4440 4441 4442 4443 4444 4445 4446 4447

	/*
	 * Trhfa for conventional PCI is 2^25 clock cycles.
	 * Assuming a minimum 33MHz clock this results in a 1s
	 * delay before we can consider subordinate devices to
	 * be re-initialized.  PCIe has some ways to shorten this,
	 * but we don't make use of them yet.
	 */
	ssleep(1);
4448
}
4449

4450 4451 4452 4453 4454
void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
{
	pci_reset_secondary_bus(dev);
}

4455 4456 4457 4458 4459 4460 4461
/**
 * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
 * @dev: Bridge device
 *
 * Use the bridge control register to assert reset on the secondary bus.
 * Devices on the secondary bus are left in power-on state.
 */
4462
int pci_reset_bridge_secondary_bus(struct pci_dev *dev)
4463 4464
{
	pcibios_reset_secondary_bus(dev);
4465

4466
	return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4467
}
4468 4469 4470 4471
EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);

static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
{
Yu Zhao's avatar
Yu Zhao committed
4472 4473
	struct pci_dev *pdev;

4474 4475
	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
Yu Zhao's avatar
Yu Zhao committed
4476 4477 4478 4479 4480 4481 4482 4483 4484
		return -ENOTTY;

	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
		if (pdev != dev)
			return -ENOTTY;

	if (probe)
		return 0;

4485
	pci_reset_bridge_secondary_bus(dev->bus->self);
Yu Zhao's avatar
Yu Zhao committed
4486 4487 4488 4489

	return 0;
}

4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508
static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
{
	int rc = -ENOTTY;

	if (!hotplug || !try_module_get(hotplug->ops->owner))
		return rc;

	if (hotplug->ops->reset_slot)
		rc = hotplug->ops->reset_slot(hotplug, probe);

	module_put(hotplug->ops->owner);

	return rc;
}

static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
{
	struct pci_dev *pdev;

4509 4510
	if (dev->subordinate || !dev->slot ||
	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4511 4512 4513 4514 4515 4516 4517 4518 4519
		return -ENOTTY;

	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
		if (pdev != dev && pdev->slot == dev->slot)
			return -ENOTTY;

	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
}

4520 4521 4522 4523 4524 4525 4526
static void pci_dev_lock(struct pci_dev *dev)
{
	pci_cfg_access_lock(dev);
	/* block PM suspend, driver probe, etc. */
	device_lock(&dev->dev);
}

4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538
/* Return 1 on successful lock, 0 on contention */
static int pci_dev_trylock(struct pci_dev *dev)
{
	if (pci_cfg_access_trylock(dev)) {
		if (device_trylock(&dev->dev))
			return 1;
		pci_cfg_access_unlock(dev);
	}

	return 0;
}

4539 4540 4541 4542 4543 4544
static void pci_dev_unlock(struct pci_dev *dev)
{
	device_unlock(&dev->dev);
	pci_cfg_access_unlock(dev);
}

4545
static void pci_dev_save_and_disable(struct pci_dev *dev)
4546 4547 4548 4549
{
	const struct pci_error_handlers *err_handler =
			dev->driver ? dev->driver->err_handler : NULL;

4550
	/*
4551
	 * dev->driver->err_handler->reset_prepare() is protected against
4552 4553 4554
	 * races with ->remove() by the device lock, which must be held by
	 * the caller.
	 */
4555 4556
	if (err_handler && err_handler->reset_prepare)
		err_handler->reset_prepare(dev);
4557

4558 4559 4560 4561 4562 4563 4564
	/*
	 * Wake-up device prior to save.  PM registers default to D0 after
	 * reset and a simple register restore doesn't reliably return
	 * to a non-D0 state anyway.
	 */
	pci_set_power_state(dev, PCI_D0);

4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577
	pci_save_state(dev);
	/*
	 * Disable the device by clearing the Command register, except for
	 * INTx-disable which is set.  This not only disables MMIO and I/O port
	 * BARs, but also prevents the device from being Bus Master, preventing
	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
	 * compliant devices, INTx-disable prevents legacy interrupts.
	 */
	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
}

static void pci_dev_restore(struct pci_dev *dev)
{
4578 4579
	const struct pci_error_handlers *err_handler =
			dev->driver ? dev->driver->err_handler : NULL;
4580

4581 4582
	pci_restore_state(dev);

4583 4584 4585 4586 4587 4588 4589
	/*
	 * dev->driver->err_handler->reset_done() is protected against
	 * races with ->remove() by the device lock, which must be held by
	 * the caller.
	 */
	if (err_handler && err_handler->reset_done)
		err_handler->reset_done(dev);
4590
}
4591

4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612
/**
 * __pci_reset_function_locked - reset a PCI device function while holding
 * the @dev mutex lock.
 * @dev: PCI device to reset
 *
 * Some devices allow an individual function to be reset without affecting
 * other functions in the same device.  The PCI device must be responsive
 * to PCI config space in order to use this function.
 *
 * The device function is presumed to be unused and the caller is holding
 * the device mutex lock when this function is called.
 * Resetting the device will make the contents of PCI configuration space
 * random, so any caller of this must be prepared to reinitialise the
 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
 * etc.
 *
 * Returns 0 if the device function was successfully reset or negative if the
 * device doesn't support resetting a single function.
 */
int __pci_reset_function_locked(struct pci_dev *dev)
{
4613 4614 4615 4616
	int rc;

	might_sleep();

4617 4618 4619 4620 4621 4622 4623 4624
	/*
	 * A reset method returns -ENOTTY if it doesn't support this device
	 * and we should try the next method.
	 *
	 * If it returns 0 (success), we're finished.  If it returns any
	 * other error, we're also finished: this indicates that further
	 * reset mechanisms might be broken on the device.
	 */
4625 4626 4627 4628
	rc = pci_dev_specific_reset(dev, 0);
	if (rc != -ENOTTY)
		return rc;
	if (pcie_has_flr(dev)) {
4629 4630 4631
		rc = pcie_flr(dev);
		if (rc != -ENOTTY)
			return rc;
4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642
	}
	rc = pci_af_flr(dev, 0);
	if (rc != -ENOTTY)
		return rc;
	rc = pci_pm_reset(dev, 0);
	if (rc != -ENOTTY)
		return rc;
	rc = pci_dev_reset_slot_function(dev, 0);
	if (rc != -ENOTTY)
		return rc;
	return pci_parent_bus_reset(dev, 0);
4643 4644 4645
}
EXPORT_SYMBOL_GPL(__pci_reset_function_locked);

4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658
/**
 * pci_probe_reset_function - check whether the device can be safely reset
 * @dev: PCI device to reset
 *
 * Some devices allow an individual function to be reset without affecting
 * other functions in the same device.  The PCI device must be responsive
 * to PCI config space in order to use this function.
 *
 * Returns 0 if the device function can be reset or negative if the
 * device doesn't support resetting a single function.
 */
int pci_probe_reset_function(struct pci_dev *dev)
{
4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678
	int rc;

	might_sleep();

	rc = pci_dev_specific_reset(dev, 1);
	if (rc != -ENOTTY)
		return rc;
	if (pcie_has_flr(dev))
		return 0;
	rc = pci_af_flr(dev, 1);
	if (rc != -ENOTTY)
		return rc;
	rc = pci_pm_reset(dev, 1);
	if (rc != -ENOTTY)
		return rc;
	rc = pci_dev_reset_slot_function(dev, 1);
	if (rc != -ENOTTY)
		return rc;

	return pci_parent_bus_reset(dev, 1);
4679 4680
}

4681
/**
Yu Zhao's avatar
Yu Zhao committed
4682 4683
 * pci_reset_function - quiesce and reset a PCI device function
 * @dev: PCI device to reset
4684 4685 4686 4687 4688 4689 4690
 *
 * Some devices allow an individual function to be reset without affecting
 * other functions in the same device.  The PCI device must be responsive
 * to PCI config space in order to use this function.
 *
 * This function does not just reset the PCI portion of a device, but
 * clears all the state associated with the device.  This function differs
4691 4692
 * from __pci_reset_function_locked() in that it saves and restores device state
 * over the reset and takes the PCI device lock.
4693
 *
Yu Zhao's avatar
Yu Zhao committed
4694
 * Returns 0 if the device function was successfully reset or negative if the
4695 4696 4697 4698
 * device doesn't support resetting a single function.
 */
int pci_reset_function(struct pci_dev *dev)
{
Yu Zhao's avatar
Yu Zhao committed
4699
	int rc;
4700

4701 4702
	if (!dev->reset_fn)
		return -ENOTTY;
4703

4704
	pci_dev_lock(dev);
4705
	pci_dev_save_and_disable(dev);
4706

4707
	rc = __pci_reset_function_locked(dev);
4708

4709
	pci_dev_restore(dev);
4710
	pci_dev_unlock(dev);
4711

Yu Zhao's avatar
Yu Zhao committed
4712
	return rc;
4713 4714 4715
}
EXPORT_SYMBOL_GPL(pci_reset_function);

4716 4717 4718 4719 4720 4721 4722 4723 4724 4725
/**
 * pci_reset_function_locked - quiesce and reset a PCI device function
 * @dev: PCI device to reset
 *
 * Some devices allow an individual function to be reset without affecting
 * other functions in the same device.  The PCI device must be responsive
 * to PCI config space in order to use this function.
 *
 * This function does not just reset the PCI portion of a device, but
 * clears all the state associated with the device.  This function differs
4726
 * from __pci_reset_function_locked() in that it saves and restores device state
4727 4728 4729 4730 4731 4732 4733 4734 4735 4736
 * over the reset.  It also differs from pci_reset_function() in that it
 * requires the PCI device lock to be held.
 *
 * Returns 0 if the device function was successfully reset or negative if the
 * device doesn't support resetting a single function.
 */
int pci_reset_function_locked(struct pci_dev *dev)
{
	int rc;

4737 4738
	if (!dev->reset_fn)
		return -ENOTTY;
4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749

	pci_dev_save_and_disable(dev);

	rc = __pci_reset_function_locked(dev);

	pci_dev_restore(dev);

	return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_function_locked);

4750 4751 4752 4753 4754 4755 4756 4757 4758 4759
/**
 * pci_try_reset_function - quiesce and reset a PCI device function
 * @dev: PCI device to reset
 *
 * Same as above, except return -EAGAIN if unable to lock device.
 */
int pci_try_reset_function(struct pci_dev *dev)
{
	int rc;

4760 4761
	if (!dev->reset_fn)
		return -ENOTTY;
4762

4763 4764
	if (!pci_dev_trylock(dev))
		return -EAGAIN;
4765

4766
	pci_dev_save_and_disable(dev);
4767
	rc = __pci_reset_function_locked(dev);
4768
	pci_dev_restore(dev);
4769
	pci_dev_unlock(dev);
4770 4771 4772 4773 4774

	return rc;
}
EXPORT_SYMBOL_GPL(pci_try_reset_function);

4775 4776 4777 4778 4779
/* Do any devices on or below this bus prevent a bus reset? */
static bool pci_bus_resetable(struct pci_bus *bus)
{
	struct pci_dev *dev;

4780 4781 4782 4783

	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
		return false;

4784 4785 4786 4787 4788 4789 4790 4791 4792
	list_for_each_entry(dev, &bus->devices, bus_list) {
		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
			return false;
	}

	return true;
}

4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816
/* Lock devices from the top of the tree down */
static void pci_bus_lock(struct pci_bus *bus)
{
	struct pci_dev *dev;

	list_for_each_entry(dev, &bus->devices, bus_list) {
		pci_dev_lock(dev);
		if (dev->subordinate)
			pci_bus_lock(dev->subordinate);
	}
}

/* Unlock devices from the bottom of the tree up */
static void pci_bus_unlock(struct pci_bus *bus)
{
	struct pci_dev *dev;

	list_for_each_entry(dev, &bus->devices, bus_list) {
		if (dev->subordinate)
			pci_bus_unlock(dev->subordinate);
		pci_dev_unlock(dev);
	}
}

4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842
/* Return 1 on successful lock, 0 on contention */
static int pci_bus_trylock(struct pci_bus *bus)
{
	struct pci_dev *dev;

	list_for_each_entry(dev, &bus->devices, bus_list) {
		if (!pci_dev_trylock(dev))
			goto unlock;
		if (dev->subordinate) {
			if (!pci_bus_trylock(dev->subordinate)) {
				pci_dev_unlock(dev);
				goto unlock;
			}
		}
	}
	return 1;

unlock:
	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
		if (dev->subordinate)
			pci_bus_unlock(dev->subordinate);
		pci_dev_unlock(dev);
	}
	return 0;
}

4843 4844 4845 4846 4847
/* Do any devices on or below this slot prevent a bus reset? */
static bool pci_slot_resetable(struct pci_slot *slot)
{
	struct pci_dev *dev;

4848 4849 4850 4851
	if (slot->bus->self &&
	    (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
		return false;

4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862
	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
		if (!dev->slot || dev->slot != slot)
			continue;
		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
			return false;
	}

	return true;
}

4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890
/* Lock devices from the top of the tree down */
static void pci_slot_lock(struct pci_slot *slot)
{
	struct pci_dev *dev;

	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
		if (!dev->slot || dev->slot != slot)
			continue;
		pci_dev_lock(dev);
		if (dev->subordinate)
			pci_bus_lock(dev->subordinate);
	}
}

/* Unlock devices from the bottom of the tree up */
static void pci_slot_unlock(struct pci_slot *slot)
{
	struct pci_dev *dev;

	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
		if (!dev->slot || dev->slot != slot)
			continue;
		if (dev->subordinate)
			pci_bus_unlock(dev->subordinate);
		pci_dev_unlock(dev);
	}
}

4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921
/* Return 1 on successful lock, 0 on contention */
static int pci_slot_trylock(struct pci_slot *slot)
{
	struct pci_dev *dev;

	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
		if (!dev->slot || dev->slot != slot)
			continue;
		if (!pci_dev_trylock(dev))
			goto unlock;
		if (dev->subordinate) {
			if (!pci_bus_trylock(dev->subordinate)) {
				pci_dev_unlock(dev);
				goto unlock;
			}
		}
	}
	return 1;

unlock:
	list_for_each_entry_continue_reverse(dev,
					     &slot->bus->devices, bus_list) {
		if (!dev->slot || dev->slot != slot)
			continue;
		if (dev->subordinate)
			pci_bus_unlock(dev->subordinate);
		pci_dev_unlock(dev);
	}
	return 0;
}

4922 4923 4924 4925 4926 4927
/* Save and disable devices from the top of the tree down */
static void pci_bus_save_and_disable(struct pci_bus *bus)
{
	struct pci_dev *dev;

	list_for_each_entry(dev, &bus->devices, bus_list) {
4928
		pci_dev_lock(dev);
4929
		pci_dev_save_and_disable(dev);
4930
		pci_dev_unlock(dev);
4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944
		if (dev->subordinate)
			pci_bus_save_and_disable(dev->subordinate);
	}
}

/*
 * Restore devices from top of the tree down - parent bridges need to be
 * restored before we can get to subordinate devices.
 */
static void pci_bus_restore(struct pci_bus *bus)
{
	struct pci_dev *dev;

	list_for_each_entry(dev, &bus->devices, bus_list) {
4945
		pci_dev_lock(dev);
4946
		pci_dev_restore(dev);
4947
		pci_dev_unlock(dev);
4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977
		if (dev->subordinate)
			pci_bus_restore(dev->subordinate);
	}
}

/* Save and disable devices from the top of the tree down */
static void pci_slot_save_and_disable(struct pci_slot *slot)
{
	struct pci_dev *dev;

	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
		if (!dev->slot || dev->slot != slot)
			continue;
		pci_dev_save_and_disable(dev);
		if (dev->subordinate)
			pci_bus_save_and_disable(dev->subordinate);
	}
}

/*
 * Restore devices from top of the tree down - parent bridges need to be
 * restored before we can get to subordinate devices.
 */
static void pci_slot_restore(struct pci_slot *slot)
{
	struct pci_dev *dev;

	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
		if (!dev->slot || dev->slot != slot)
			continue;
4978
		pci_dev_lock(dev);
4979
		pci_dev_restore(dev);
4980
		pci_dev_unlock(dev);
4981 4982 4983 4984 4985 4986 4987 4988 4989
		if (dev->subordinate)
			pci_bus_restore(dev->subordinate);
	}
}

static int pci_slot_reset(struct pci_slot *slot, int probe)
{
	int rc;

4990
	if (!slot || !pci_slot_resetable(slot))
4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005
		return -ENOTTY;

	if (!probe)
		pci_slot_lock(slot);

	might_sleep();

	rc = pci_reset_hotplug_slot(slot->hotplug, probe);

	if (!probe)
		pci_slot_unlock(slot);

	return rc;
}

5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017
/**
 * pci_probe_reset_slot - probe whether a PCI slot can be reset
 * @slot: PCI slot to probe
 *
 * Return 0 if slot can be reset, negative if a slot reset is not supported.
 */
int pci_probe_reset_slot(struct pci_slot *slot)
{
	return pci_slot_reset(slot, 1);
}
EXPORT_SYMBOL_GPL(pci_probe_reset_slot);

5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050
/**
 * pci_reset_slot - reset a PCI slot
 * @slot: PCI slot to reset
 *
 * A PCI bus may host multiple slots, each slot may support a reset mechanism
 * independent of other slots.  For instance, some slots may support slot power
 * control.  In the case of a 1:1 bus to slot architecture, this function may
 * wrap the bus reset to avoid spurious slot related events such as hotplug.
 * Generally a slot reset should be attempted before a bus reset.  All of the
 * function of the slot and any subordinate buses behind the slot are reset
 * through this function.  PCI config space of all devices in the slot and
 * behind the slot is saved before and restored after reset.
 *
 * Return 0 on success, non-zero on error.
 */
int pci_reset_slot(struct pci_slot *slot)
{
	int rc;

	rc = pci_slot_reset(slot, 1);
	if (rc)
		return rc;

	pci_slot_save_and_disable(slot);

	rc = pci_slot_reset(slot, 0);

	pci_slot_restore(slot);

	return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_slot);

5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079
/**
 * pci_try_reset_slot - Try to reset a PCI slot
 * @slot: PCI slot to reset
 *
 * Same as above except return -EAGAIN if the slot cannot be locked
 */
int pci_try_reset_slot(struct pci_slot *slot)
{
	int rc;

	rc = pci_slot_reset(slot, 1);
	if (rc)
		return rc;

	pci_slot_save_and_disable(slot);

	if (pci_slot_trylock(slot)) {
		might_sleep();
		rc = pci_reset_hotplug_slot(slot->hotplug, 0);
		pci_slot_unlock(slot);
	} else
		rc = -EAGAIN;

	pci_slot_restore(slot);

	return rc;
}
EXPORT_SYMBOL_GPL(pci_try_reset_slot);

5080 5081
static int pci_bus_reset(struct pci_bus *bus, int probe)
{
5082
	if (!bus->self || !pci_bus_resetable(bus))
5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098
		return -ENOTTY;

	if (probe)
		return 0;

	pci_bus_lock(bus);

	might_sleep();

	pci_reset_bridge_secondary_bus(bus->self);

	pci_bus_unlock(bus);

	return 0;
}

5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110
/**
 * pci_probe_reset_bus - probe whether a PCI bus can be reset
 * @bus: PCI bus to probe
 *
 * Return 0 if bus can be reset, negative if a bus reset is not supported.
 */
int pci_probe_reset_bus(struct pci_bus *bus)
{
	return pci_bus_reset(bus, 1);
}
EXPORT_SYMBOL_GPL(pci_probe_reset_bus);

5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137
/**
 * pci_reset_bus - reset a PCI bus
 * @bus: top level PCI bus to reset
 *
 * Do a bus reset on the given bus and any subordinate buses, saving
 * and restoring state of all devices.
 *
 * Return 0 on success, non-zero on error.
 */
int pci_reset_bus(struct pci_bus *bus)
{
	int rc;

	rc = pci_bus_reset(bus, 1);
	if (rc)
		return rc;

	pci_bus_save_and_disable(bus);

	rc = pci_bus_reset(bus, 0);

	pci_bus_restore(bus);

	return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_bus);

5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166
/**
 * pci_try_reset_bus - Try to reset a PCI bus
 * @bus: top level PCI bus to reset
 *
 * Same as above except return -EAGAIN if the bus cannot be locked
 */
int pci_try_reset_bus(struct pci_bus *bus)
{
	int rc;

	rc = pci_bus_reset(bus, 1);
	if (rc)
		return rc;

	pci_bus_save_and_disable(bus);

	if (pci_bus_trylock(bus)) {
		might_sleep();
		pci_reset_bridge_secondary_bus(bus->self);
		pci_bus_unlock(bus);
	} else
		rc = -EAGAIN;

	pci_bus_restore(bus);

	return rc;
}
EXPORT_SYMBOL_GPL(pci_try_reset_bus);

5167 5168 5169 5170 5171 5172 5173 5174 5175
/**
 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
 * @dev: PCI device to query
 *
 * Returns mmrbc: maximum designed memory read count in bytes
 *    or appropriate error value.
 */
int pcix_get_max_mmrbc(struct pci_dev *dev)
{
5176
	int cap;
5177 5178 5179 5180 5181 5182
	u32 stat;

	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
	if (!cap)
		return -EINVAL;

5183
	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5184 5185
		return -EINVAL;

5186
	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198
}
EXPORT_SYMBOL(pcix_get_max_mmrbc);

/**
 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
 * @dev: PCI device to query
 *
 * Returns mmrbc: maximum memory read count in bytes
 *    or appropriate error value.
 */
int pcix_get_mmrbc(struct pci_dev *dev)
{
5199
	int cap;
5200
	u16 cmd;
5201 5202 5203 5204 5205

	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
	if (!cap)
		return -EINVAL;

5206 5207
	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
		return -EINVAL;
5208

5209
	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223
}
EXPORT_SYMBOL(pcix_get_mmrbc);

/**
 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
 * @dev: PCI device to query
 * @mmrbc: maximum memory read count in bytes
 *    valid values are 512, 1024, 2048, 4096
 *
 * If possible sets maximum memory read byte count, some bridges have erratas
 * that prevent this.
 */
int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
{
5224
	int cap;
5225 5226
	u32 stat, v, o;
	u16 cmd;
5227

5228
	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5229
		return -EINVAL;
5230 5231 5232 5233 5234

	v = ffs(mmrbc) - 10;

	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
	if (!cap)
5235
		return -EINVAL;
5236

5237 5238
	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
		return -EINVAL;
5239 5240 5241 5242

	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
		return -E2BIG;

5243 5244
	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
		return -EINVAL;
5245 5246 5247

	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
	if (o != v) {
5248
		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5249 5250 5251 5252
			return -EIO;

		cmd &= ~PCI_X_CMD_MAX_READ;
		cmd |= v << 2;
5253 5254
		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
			return -EIO;
5255
	}
5256
	return 0;
5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270
}
EXPORT_SYMBOL(pcix_set_mmrbc);

/**
 * pcie_get_readrq - get PCI Express read request size
 * @dev: PCI device to query
 *
 * Returns maximum memory read request in bytes
 *    or appropriate error value.
 */
int pcie_get_readrq(struct pci_dev *dev)
{
	u16 ctl;

5271
	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5272

5273
	return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5274 5275 5276 5277 5278 5279
}
EXPORT_SYMBOL(pcie_get_readrq);

/**
 * pcie_set_readrq - set PCI Express maximum memory read request
 * @dev: PCI device to query
5280
 * @rq: maximum memory read count in bytes
5281 5282
 *    valid values are 128, 256, 512, 1024, 2048, 4096
 *
5283
 * If possible sets maximum memory read request in bytes
5284 5285 5286
 */
int pcie_set_readrq(struct pci_dev *dev, int rq)
{
5287
	u16 v;
5288

5289
	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5290
		return -EINVAL;
5291

5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305
	/*
	 * If using the "performance" PCIe config, we clamp the
	 * read rq size to the max packet size to prevent the
	 * host bridge generating requests larger than we can
	 * cope with
	 */
	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
		int mps = pcie_get_mps(dev);

		if (mps < rq)
			rq = mps;
	}

	v = (ffs(rq) - 8) << 12;
5306

5307 5308
	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
						  PCI_EXP_DEVCTL_READRQ, v);
5309 5310 5311
}
EXPORT_SYMBOL(pcie_set_readrq);

5312 5313 5314 5315 5316 5317 5318 5319 5320 5321
/**
 * pcie_get_mps - get PCI Express maximum payload size
 * @dev: PCI device to query
 *
 * Returns maximum payload size in bytes
 */
int pcie_get_mps(struct pci_dev *dev)
{
	u16 ctl;

5322
	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5323

5324
	return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5325
}
5326
EXPORT_SYMBOL(pcie_get_mps);
5327 5328 5329 5330

/**
 * pcie_set_mps - set PCI Express maximum payload size
 * @dev: PCI device to query
5331
 * @mps: maximum payload size in bytes
5332 5333 5334 5335 5336 5337
 *    valid values are 128, 256, 512, 1024, 2048, 4096
 *
 * If possible sets maximum payload size
 */
int pcie_set_mps(struct pci_dev *dev, int mps)
{
5338
	u16 v;
5339 5340

	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5341
		return -EINVAL;
5342 5343

	v = ffs(mps) - 8;
5344
	if (v > dev->pcie_mpss)
5345
		return -EINVAL;
5346 5347
	v <<= 5;

5348 5349
	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
						  PCI_EXP_DEVCTL_PAYLOAD, v);
5350
}
5351
EXPORT_SYMBOL(pcie_set_mps);
5352

5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410
/**
 * pcie_bandwidth_available - determine minimum link settings of a PCIe
 *			      device and its bandwidth limitation
 * @dev: PCI device to query
 * @limiting_dev: storage for device causing the bandwidth limitation
 * @speed: storage for speed of limiting device
 * @width: storage for width of limiting device
 *
 * Walk up the PCI device chain and find the point where the minimum
 * bandwidth is available.  Return the bandwidth available there and (if
 * limiting_dev, speed, and width pointers are supplied) information about
 * that point.  The bandwidth returned is in Mb/s, i.e., megabits/second of
 * raw bandwidth.
 */
u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
			     enum pci_bus_speed *speed,
			     enum pcie_link_width *width)
{
	u16 lnksta;
	enum pci_bus_speed next_speed;
	enum pcie_link_width next_width;
	u32 bw, next_bw;

	if (speed)
		*speed = PCI_SPEED_UNKNOWN;
	if (width)
		*width = PCIE_LNK_WIDTH_UNKNOWN;

	bw = 0;

	while (dev) {
		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);

		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
			PCI_EXP_LNKSTA_NLW_SHIFT;

		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);

		/* Check if current device limits the total bandwidth */
		if (!bw || next_bw <= bw) {
			bw = next_bw;

			if (limiting_dev)
				*limiting_dev = dev;
			if (speed)
				*speed = next_speed;
			if (width)
				*width = next_width;
		}

		dev = pci_upstream_bridge(dev);
	}

	return bw;
}
EXPORT_SYMBOL(pcie_bandwidth_available);

5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454
/**
 * pcie_get_speed_cap - query for the PCI device's link speed capability
 * @dev: PCI device to query
 *
 * Query the PCI device speed capability.  Return the maximum link speed
 * supported by the device.
 */
enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
{
	u32 lnkcap2, lnkcap;

	/*
	 * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link
	 * Speeds Vector in Link Capabilities 2 when supported, falling
	 * back to Max Link Speed in Link Capabilities otherwise.
	 */
	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
	if (lnkcap2) { /* PCIe r3.0-compliant */
		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
			return PCIE_SPEED_16_0GT;
		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
			return PCIE_SPEED_8_0GT;
		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
			return PCIE_SPEED_5_0GT;
		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
			return PCIE_SPEED_2_5GT;
		return PCI_SPEED_UNKNOWN;
	}

	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
	if (lnkcap) {
		if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
			return PCIE_SPEED_16_0GT;
		else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
			return PCIE_SPEED_8_0GT;
		else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
			return PCIE_SPEED_5_0GT;
		else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
			return PCIE_SPEED_2_5GT;
	}

	return PCI_SPEED_UNKNOWN;
}

5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472
/**
 * pcie_get_width_cap - query for the PCI device's link width capability
 * @dev: PCI device to query
 *
 * Query the PCI device width capability.  Return the maximum link width
 * supported by the device.
 */
enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
{
	u32 lnkcap;

	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
	if (lnkcap)
		return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;

	return PCIE_LNK_WIDTH_UNKNOWN;
}

5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494
/**
 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
 * @dev: PCI device
 * @speed: storage for link speed
 * @width: storage for link width
 *
 * Calculate a PCI device's link bandwidth by querying for its link speed
 * and width, multiplying them, and applying encoding overhead.  The result
 * is in Mb/s, i.e., megabits/second of raw bandwidth.
 */
u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
			   enum pcie_link_width *width)
{
	*speed = pcie_get_speed_cap(dev);
	*width = pcie_get_width_cap(dev);

	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
		return 0;

	return *width * PCIE_SPEED2MBS_ENC(*speed);
}

5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513
/**
 * pcie_print_link_status - Report the PCI device's link speed and width
 * @dev: PCI device to query
 *
 * Report the available bandwidth at the device.  If this is less than the
 * device is capable of, report the device's maximum possible bandwidth and
 * the upstream link that limits its performance to less than that.
 */
void pcie_print_link_status(struct pci_dev *dev)
{
	enum pcie_link_width width, width_cap;
	enum pci_bus_speed speed, speed_cap;
	struct pci_dev *limiting_dev = NULL;
	u32 bw_avail, bw_cap;

	bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
	bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);

	if (bw_avail >= bw_cap)
5514
		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5515 5516 5517
			 bw_cap / 1000, bw_cap % 1000,
			 PCIE_SPEED2STR(speed_cap), width_cap);
	else
5518
		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5519 5520 5521 5522 5523 5524 5525 5526
			 bw_avail / 1000, bw_avail % 1000,
			 PCIE_SPEED2STR(speed), width,
			 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
			 bw_cap / 1000, bw_cap % 1000,
			 PCIE_SPEED2STR(speed_cap), width_cap);
}
EXPORT_SYMBOL(pcie_print_link_status);

5527 5528
/**
 * pci_select_bars - Make BAR mask from the type of resource
5529
 * @dev: the PCI device for which BAR mask is made
5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541
 * @flags: resource type mask to be selected
 *
 * This helper routine makes bar mask from the type of resource.
 */
int pci_select_bars(struct pci_dev *dev, unsigned long flags)
{
	int i, bars = 0;
	for (i = 0; i < PCI_NUM_RESOURCES; i++)
		if (pci_resource_flags(dev, i) & flags)
			bars |= (1 << i);
	return bars;
}
5542
EXPORT_SYMBOL(pci_select_bars);
5543

5544 5545 5546 5547 5548 5549 5550 5551 5552
/* Some architectures require additional programming to enable VGA */
static arch_set_vga_state_t arch_set_vga_state;

void __init pci_register_set_vga_state(arch_set_vga_state_t func)
{
	arch_set_vga_state = func;	/* NULL disables */
}

static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
Ryan Desfosses's avatar
Ryan Desfosses committed
5553
				  unsigned int command_bits, u32 flags)
5554 5555 5556
{
	if (arch_set_vga_state)
		return arch_set_vga_state(dev, decode, command_bits,
5557
						flags);
5558 5559 5560
	return 0;
}

5561 5562
/**
 * pci_set_vga_state - set VGA decode state on device and parents if requested
5563 5564 5565
 * @dev: the PCI device
 * @decode: true = enable decoding, false = disable decoding
 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
5566
 * @flags: traverse ancestors and change bridges
5567
 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
5568 5569
 */
int pci_set_vga_state(struct pci_dev *dev, bool decode,
5570
		      unsigned int command_bits, u32 flags)
5571 5572 5573 5574
{
	struct pci_bus *bus;
	struct pci_dev *bridge;
	u16 cmd;
5575
	int rc;
5576

5577
	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
5578

5579
	/* ARCH specific VGA enables */
5580
	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
5581 5582 5583
	if (rc)
		return rc;

5584 5585 5586 5587 5588 5589 5590 5591
	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
		pci_read_config_word(dev, PCI_COMMAND, &cmd);
		if (decode == true)
			cmd |= command_bits;
		else
			cmd &= ~command_bits;
		pci_write_config_word(dev, PCI_COMMAND, cmd);
	}
5592

5593
	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613
		return 0;

	bus = dev->bus;
	while (bus) {
		bridge = bus->self;
		if (bridge) {
			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
					     &cmd);
			if (decode == true)
				cmd |= PCI_BRIDGE_CTL_VGA;
			else
				cmd &= ~PCI_BRIDGE_CTL_VGA;
			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
					      cmd);
		}
		bus = bus->parent;
	}
	return 0;
}

5614 5615 5616 5617 5618 5619 5620 5621 5622 5623
/**
 * pci_add_dma_alias - Add a DMA devfn alias for a device
 * @dev: the PCI device for which alias is added
 * @devfn: alias slot and function
 *
 * This helper encodes 8-bit devfn as bit number in dma_alias_mask.
 * It should be called early, preferably as PCI fixup header quirk.
 */
void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
{
5624 5625 5626 5627
	if (!dev->dma_alias_mask)
		dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
					      sizeof(long), GFP_KERNEL);
	if (!dev->dma_alias_mask) {
5628
		pci_warn(dev, "Unable to allocate DMA alias mask\n");
5629 5630 5631 5632
		return;
	}

	set_bit(devfn, dev->dma_alias_mask);
5633
	pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
5634
		 PCI_SLOT(devfn), PCI_FUNC(devfn));
5635 5636
}

5637 5638 5639 5640 5641 5642 5643 5644
bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
{
	return (dev1->dma_alias_mask &&
		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
	       (dev2->dma_alias_mask &&
		test_bit(dev1->devfn, dev2->dma_alias_mask));
}

5645 5646 5647 5648
bool pci_device_is_present(struct pci_dev *pdev)
{
	u32 v;

5649 5650
	if (pci_dev_is_disconnected(pdev))
		return false;
5651 5652 5653 5654
	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
}
EXPORT_SYMBOL_GPL(pci_device_is_present);

5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665
void pci_ignore_hotplug(struct pci_dev *dev)
{
	struct pci_dev *bridge = dev->bus->self;

	dev->ignore_hotplug = 1;
	/* Propagate the "ignore hotplug" setting to the parent bridge. */
	if (bridge)
		bridge->ignore_hotplug = 1;
}
EXPORT_SYMBOL_GPL(pci_ignore_hotplug);

5666 5667 5668 5669 5670
resource_size_t __weak pcibios_default_alignment(void)
{
	return 0;
}

5671 5672
#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
5673
static DEFINE_SPINLOCK(resource_alignment_lock);
5674 5675 5676 5677

/**
 * pci_specified_resource_alignment - get resource alignment specified by user.
 * @dev: the PCI device to get
5678
 * @resize: whether or not to change resources' size when reassigning alignment
5679 5680 5681 5682
 *
 * RETURNS: Resource alignment if it is specified.
 *          Zero if it is not specified.
 */
5683 5684
static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
							bool *resize)
5685
{
5686
	int align_order, count;
5687
	resource_size_t align = pcibios_default_alignment();
5688 5689
	const char *p;
	int ret;
5690 5691 5692

	spin_lock(&resource_alignment_lock);
	p = resource_alignment_param;
5693
	if (!*p && !align)
5694 5695
		goto out;
	if (pci_has_flag(PCI_PROBE_ONLY)) {
5696
		align = 0;
5697 5698 5699 5700
		pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
		goto out;
	}

5701 5702 5703 5704 5705 5706 5707 5708
	while (*p) {
		count = 0;
		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
							p[count] == '@') {
			p += count + 1;
		} else {
			align_order = -1;
		}
5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721

		ret = pci_dev_str_match(dev, p, &p);
		if (ret == 1) {
			*resize = true;
			if (align_order == -1)
				align = PAGE_SIZE;
			else
				align = 1 << align_order;
			break;
		} else if (ret < 0) {
			pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
			       p);
			break;
5722
		}
5723

5724 5725 5726 5727 5728 5729
		if (*p != ';' && *p != ',') {
			/* End of param or invalid format */
			break;
		}
		p++;
	}
5730
out:
5731 5732 5733 5734
	spin_unlock(&resource_alignment_lock);
	return align;
}

5735
static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
5736
					   resource_size_t align, bool resize)
5737 5738 5739 5740 5741 5742 5743 5744
{
	struct resource *r = &dev->resource[bar];
	resource_size_t size;

	if (!(r->flags & IORESOURCE_MEM))
		return;

	if (r->flags & IORESOURCE_PCI_FIXED) {
5745
		pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
5746 5747 5748 5749 5750
			 bar, r, (unsigned long long)align);
		return;
	}

	size = resource_size(r);
5751 5752
	if (size >= align)
		return;
5753

5754
	/*
5755 5756
	 * Increase the alignment of the resource.  There are two ways we
	 * can do this:
5757
	 *
5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779
	 * 1) Increase the size of the resource.  BARs are aligned on their
	 *    size, so when we reallocate space for this resource, we'll
	 *    allocate it with the larger alignment.  This also prevents
	 *    assignment of any other BARs inside the alignment region, so
	 *    if we're requesting page alignment, this means no other BARs
	 *    will share the page.
	 *
	 *    The disadvantage is that this makes the resource larger than
	 *    the hardware BAR, which may break drivers that compute things
	 *    based on the resource size, e.g., to find registers at a
	 *    fixed offset before the end of the BAR.
	 *
	 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
	 *    set r->start to the desired alignment.  By itself this
	 *    doesn't prevent other BARs being put inside the alignment
	 *    region, but if we realign *every* resource of every device in
	 *    the system, none of them will share an alignment region.
	 *
	 * When the user has requested alignment for only some devices via
	 * the "pci=resource_alignment" argument, "resize" is true and we
	 * use the first method.  Otherwise we assume we're aligning all
	 * devices and we use the second.
5780
	 */
5781

5782
	pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
5783
		 bar, r, (unsigned long long)align);
5784

5785 5786 5787 5788 5789 5790 5791 5792 5793
	if (resize) {
		r->start = 0;
		r->end = align - 1;
	} else {
		r->flags &= ~IORESOURCE_SIZEALIGN;
		r->flags |= IORESOURCE_STARTALIGN;
		r->start = align;
		r->end = r->start + size - 1;
	}
5794
	r->flags |= IORESOURCE_UNSET;
5795 5796
}

5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807
/*
 * This function disables memory decoding and releases memory resources
 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
 * It also rounds up size to specified alignment.
 * Later on, the kernel will assign page-aligned memory resource back
 * to the device.
 */
void pci_reassigndev_resource_alignment(struct pci_dev *dev)
{
	int i;
	struct resource *r;
5808
	resource_size_t align;
5809
	u16 command;
5810
	bool resize = false;
5811

5812 5813 5814 5815 5816 5817 5818 5819 5820
	/*
	 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
	 * 3.4.1.11.  Their resources are allocated from the space
	 * described by the VF BARx register in the PF's SR-IOV capability.
	 * We can't influence their alignment here.
	 */
	if (dev->is_virtfn)
		return;

Yinghai Lu's avatar
Yinghai Lu committed
5821
	/* check if specified PCI is target device to reassign */
5822
	align = pci_specified_resource_alignment(dev, &resize);
Yinghai Lu's avatar
Yinghai Lu committed
5823
	if (!align)
5824 5825 5826 5827
		return;

	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
5828
		pci_warn(dev, "Can't reassign resources to host bridge\n");
5829 5830 5831 5832 5833 5834 5835
		return;
	}

	pci_read_config_word(dev, PCI_COMMAND, &command);
	command &= ~PCI_COMMAND_MEMORY;
	pci_write_config_word(dev, PCI_COMMAND, command);

5836
	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
5837
		pci_request_resource_alignment(dev, i, align, resize);
5838

5839 5840
	/*
	 * Need to disable bridge's resource window,
5841 5842 5843 5844 5845 5846 5847 5848 5849
	 * to enable the kernel to reassign new resource
	 * window later on.
	 */
	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
			r = &dev->resource[i];
			if (!(r->flags & IORESOURCE_MEM))
				continue;
5850
			r->flags |= IORESOURCE_UNSET;
5851 5852 5853 5854 5855 5856 5857
			r->end = resource_size(r) - 1;
			r->start = 0;
		}
		pci_disable_bridge_window(dev);
	}
}

5858
static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
5859 5860 5861 5862 5863 5864 5865 5866 5867 5868
{
	if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
		count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
	spin_lock(&resource_alignment_lock);
	strncpy(resource_alignment_param, buf, count);
	resource_alignment_param[count] = '\0';
	spin_unlock(&resource_alignment_lock);
	return count;
}

5869
static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888
{
	size_t count;
	spin_lock(&resource_alignment_lock);
	count = snprintf(buf, size, "%s", resource_alignment_param);
	spin_unlock(&resource_alignment_lock);
	return count;
}

static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
{
	return pci_get_resource_alignment_param(buf, PAGE_SIZE);
}

static ssize_t pci_resource_alignment_store(struct bus_type *bus,
					const char *buf, size_t count)
{
	return pci_set_resource_alignment_param(buf, count);
}

5889
static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
5890 5891 5892 5893 5894 5895 5896 5897 5898
					pci_resource_alignment_store);

static int __init pci_resource_alignment_sysfs_init(void)
{
	return bus_create_file(&pci_bus_type,
					&bus_attr_resource_alignment);
}
late_initcall(pci_resource_alignment_sysfs_init);

Bill Pemberton's avatar
Bill Pemberton committed
5899
static void pci_no_domains(void)
5900 5901 5902 5903 5904 5905
{
#ifdef CONFIG_PCI_DOMAINS
	pci_domains_supported = 0;
#endif
}

5906
#ifdef CONFIG_PCI_DOMAINS_GENERIC
5907 5908
static atomic_t __domain_nr = ATOMIC_INIT(-1);

5909
static int pci_get_new_domain_nr(void)
5910 5911 5912
{
	return atomic_inc_return(&__domain_nr);
}
5913

5914
static int of_pci_bus_find_domain_nr(struct device *parent)
5915 5916
{
	static int use_dt_domains = -1;
5917
	int domain = -1;
5918

5919 5920
	if (parent)
		domain = of_get_pci_domain_nr(parent->of_node);
5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952
	/*
	 * Check DT domain and use_dt_domains values.
	 *
	 * If DT domain property is valid (domain >= 0) and
	 * use_dt_domains != 0, the DT assignment is valid since this means
	 * we have not previously allocated a domain number by using
	 * pci_get_new_domain_nr(); we should also update use_dt_domains to
	 * 1, to indicate that we have just assigned a domain number from
	 * DT.
	 *
	 * If DT domain property value is not valid (ie domain < 0), and we
	 * have not previously assigned a domain number from DT
	 * (use_dt_domains != 1) we should assign a domain number by
	 * using the:
	 *
	 * pci_get_new_domain_nr()
	 *
	 * API and update the use_dt_domains value to keep track of method we
	 * are using to assign domain numbers (use_dt_domains = 0).
	 *
	 * All other combinations imply we have a platform that is trying
	 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
	 * which is a recipe for domain mishandling and it is prevented by
	 * invalidating the domain value (domain = -1) and printing a
	 * corresponding error.
	 */
	if (domain >= 0 && use_dt_domains) {
		use_dt_domains = 1;
	} else if (domain < 0 && use_dt_domains != 1) {
		use_dt_domains = 0;
		domain = pci_get_new_domain_nr();
	} else {
5953 5954 5955
		if (parent)
			pr_err("Node %pOF has ", parent->of_node);
		pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
5956 5957 5958
		domain = -1;
	}

5959
	return domain;
5960
}
5961 5962 5963

int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
{
5964 5965
	return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
			       acpi_pci_bus_find_domain_nr(bus);
5966 5967
}
#endif
5968

5969
/**
5970
 * pci_ext_cfg_avail - can we access extended PCI config space?
5971 5972 5973 5974 5975
 *
 * Returns 1 if we can access PCI extended config space (offsets
 * greater than 0xff). This is the default implementation. Architecture
 * implementations can override this.
 */
5976
int __weak pci_ext_cfg_avail(void)
5977 5978 5979 5980
{
	return 1;
}

5981 5982 5983 5984 5985
void __weak pci_fixup_cardbus(struct pci_bus *bus)
{
}
EXPORT_SYMBOL(pci_fixup_cardbus);

Al Viro's avatar
Al Viro committed
5986
static int __init pci_setup(char *str)
Linus Torvalds's avatar
Linus Torvalds committed
5987 5988 5989 5990 5991 5992
{
	while (str) {
		char *k = strchr(str, ',');
		if (k)
			*k++ = 0;
		if (*str && (str = pcibios_setup(str)) && *str) {
5993 5994
			if (!strcmp(str, "nomsi")) {
				pci_no_msi();
5995 5996 5997
			} else if (!strncmp(str, "noats", 5)) {
				pr_info("PCIe: ATS is disabled\n");
				pcie_ats_disabled = true;
Randy Dunlap's avatar
Randy Dunlap committed
5998 5999
			} else if (!strcmp(str, "noaer")) {
				pci_no_aer();
6000 6001
			} else if (!strncmp(str, "realloc=", 8)) {
				pci_realloc_get_opt(str + 8);
6002
			} else if (!strncmp(str, "realloc", 7)) {
6003
				pci_realloc_get_opt("on");
6004 6005
			} else if (!strcmp(str, "nodomains")) {
				pci_no_domains();
6006 6007
			} else if (!strncmp(str, "noari", 5)) {
				pcie_ari_disabled = true;
6008 6009 6010 6011
			} else if (!strncmp(str, "cbiosize=", 9)) {
				pci_cardbus_io_size = memparse(str + 9, &str);
			} else if (!strncmp(str, "cbmemsize=", 10)) {
				pci_cardbus_mem_size = memparse(str + 10, &str);
6012 6013 6014
			} else if (!strncmp(str, "resource_alignment=", 19)) {
				pci_set_resource_alignment_param(str + 19,
							strlen(str + 19));
6015 6016
			} else if (!strncmp(str, "ecrc=", 5)) {
				pcie_ecrc_get_policy(str + 5);
6017 6018 6019 6020
			} else if (!strncmp(str, "hpiosize=", 9)) {
				pci_hotplug_io_size = memparse(str + 9, &str);
			} else if (!strncmp(str, "hpmemsize=", 10)) {
				pci_hotplug_mem_size = memparse(str + 10, &str);
6021 6022 6023 6024 6025
			} else if (!strncmp(str, "hpbussize=", 10)) {
				pci_hotplug_bus_size =
					simple_strtoul(str + 10, &str, 0);
				if (pci_hotplug_bus_size > 0xff)
					pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6026 6027
			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
				pcie_bus_config = PCIE_BUS_TUNE_OFF;
6028 6029 6030 6031
			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
				pcie_bus_config = PCIE_BUS_SAFE;
			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
				pcie_bus_config = PCIE_BUS_PERFORMANCE;
6032 6033
			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
				pcie_bus_config = PCIE_BUS_PEER2PEER;
6034 6035
			} else if (!strncmp(str, "pcie_scan_all", 13)) {
				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6036 6037
			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
				disable_acs_redir_param = str + 18;
6038 6039 6040 6041
			} else {
				printk(KERN_ERR "PCI: Unknown option `%s'\n",
						str);
			}
Linus Torvalds's avatar
Linus Torvalds committed
6042 6043 6044
		}
		str = k;
	}
6045
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
6046
}
6047
early_param("pci", pci_setup);