Commit d47bde70 authored by Philipp Stanner's avatar Philipp Stanner Committed by Krzysztof Wilczyński

PCI: Add managed pcim_request_region()

These existing functions:

  pci_request_region()
  pci_request_selected_regions()
  pci_request_selected_regions_exclusive()

are "hybrid" functions built on __pci_request_region() and are managed if
pcim_enable_device() has been called, but unmanaged otherwise.

Add these new functions:

  pcim_request_region()
  pcim_request_region_exclusive()

These are *always* managed and use the new pcim_addr_devres tracking
infrastructure instead of find_pci_dr() and struct pci_devres.region_mask.

Implement the hybrid functions using the new "pure" functions and remove
struct pci_devres.region_mask, which is no longer needed.

Link: https://lore.kernel.org/r/20240613115032.29098-6-pstanner@redhat.comSigned-off-by: default avatarPhilipp Stanner <pstanner@redhat.com>
Signed-off-by: default avatarKrzysztof Wilczyński <kwilczynski@kernel.org>
[bhelgaas: commit log]
Signed-off-by: default avatarBjorn Helgaas <bhelgaas@google.com>
parent e354bb84
...@@ -24,18 +24,16 @@ ...@@ -24,18 +24,16 @@
* *
* Consequently, in the new API, region requests performed by the pcim_ * Consequently, in the new API, region requests performed by the pcim_
* functions are automatically cleaned up through the devres callback * functions are automatically cleaned up through the devres callback
* pcim_addr_resource_release(), while requests performed by * pcim_addr_resource_release().
* pcim_enable_device() + pci_*region*() are automatically cleaned up
* through the for-loop in pcim_release().
* *
* TODO 1: * Users of pcim_enable_device() + pci_*region*() are redirected in
* pci.c to the managed functions here in this file. This isn't exactly
* perfect, but the only alternative way would be to port ALL drivers
* using said combination to pcim_ functions.
*
* TODO:
* Remove the legacy table entirely once all calls to pcim_iomap_table() in * Remove the legacy table entirely once all calls to pcim_iomap_table() in
* the kernel have been removed. * the kernel have been removed.
*
* TODO 2:
* Port everyone calling pcim_enable_device() + pci_*region*() to using the
* pcim_ functions. Then, remove all devres functionality from pci_*region*()
* functions and remove the associated cleanups described above in point #2.
*/ */
/* /*
...@@ -402,22 +400,6 @@ static void pcim_release(struct device *gendev, void *res) ...@@ -402,22 +400,6 @@ static void pcim_release(struct device *gendev, void *res)
{ {
struct pci_dev *dev = to_pci_dev(gendev); struct pci_dev *dev = to_pci_dev(gendev);
struct pci_devres *this = res; struct pci_devres *this = res;
int i;
/*
* This is legacy code.
*
* All regions requested by a pcim_ function do get released through
* pcim_addr_resource_release(). Thanks to the hybrid nature of the pci_
* region-request functions, this for-loop has to release the regions
* if they have been requested by such a function.
*
* TODO: Remove this once all users of pcim_enable_device() PLUS
* pci-region-request-functions have been ported to pcim_ functions.
*/
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
if (mask_contains_bar(this->region_mask, i))
pci_release_region(dev, i);
if (this->mwi) if (this->mwi)
pci_clear_mwi(dev); pci_clear_mwi(dev);
...@@ -826,11 +808,29 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name, ...@@ -826,11 +808,29 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
* The region will automatically be released on driver detach. If desired, * The region will automatically be released on driver detach. If desired,
* release manually only with pcim_release_region(). * release manually only with pcim_release_region().
*/ */
static int pcim_request_region(struct pci_dev *pdev, int bar, const char *name) int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
{ {
return _pcim_request_region(pdev, bar, name, 0); return _pcim_request_region(pdev, bar, name, 0);
} }
/**
* pcim_request_region_exclusive - Request a PCI BAR exclusively
* @pdev: PCI device to requestion region for
* @bar: Index of BAR to request
* @name: Name associated with the request
*
* Returns: 0 on success, a negative error code on failure.
*
* Request region specified by @bar exclusively.
*
* The region will automatically be released on driver detach. If desired,
* release manually only with pcim_release_region().
*/
int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *name)
{
return _pcim_request_region(pdev, bar, name, IORESOURCE_EXCLUSIVE);
}
/** /**
* pcim_release_region - Release a PCI BAR * pcim_release_region - Release a PCI BAR
* @pdev: PCI device to operate on * @pdev: PCI device to operate on
...@@ -839,7 +839,7 @@ static int pcim_request_region(struct pci_dev *pdev, int bar, const char *name) ...@@ -839,7 +839,7 @@ static int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
* Release a region manually that was previously requested by * Release a region manually that was previously requested by
* pcim_request_region(). * pcim_request_region().
*/ */
static void pcim_release_region(struct pci_dev *pdev, int bar) void pcim_release_region(struct pci_dev *pdev, int bar)
{ {
struct pcim_addr_devres res_searched; struct pcim_addr_devres res_searched;
......
...@@ -3872,7 +3872,15 @@ EXPORT_SYMBOL(pci_enable_atomic_ops_to_root); ...@@ -3872,7 +3872,15 @@ EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
*/ */
void pci_release_region(struct pci_dev *pdev, int bar) void pci_release_region(struct pci_dev *pdev, int bar)
{ {
struct pci_devres *dr; /*
* This is done for backwards compatibility, because the old PCI devres
* API had a mode in which the function became managed if it had been
* enabled with pcim_enable_device() instead of pci_enable_device().
*/
if (pci_is_managed(pdev)) {
pcim_release_region(pdev, bar);
return;
}
if (pci_resource_len(pdev, bar) == 0) if (pci_resource_len(pdev, bar) == 0)
return; return;
...@@ -3882,21 +3890,6 @@ void pci_release_region(struct pci_dev *pdev, int bar) ...@@ -3882,21 +3890,6 @@ void pci_release_region(struct pci_dev *pdev, int bar)
else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
release_mem_region(pci_resource_start(pdev, bar), release_mem_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar)); pci_resource_len(pdev, bar));
/*
* This devres utility makes this function sometimes managed
* (when pcim_enable_device() has been called before).
*
* This is bad because it conflicts with the pcim_ functions being
* exclusively responsible for managed PCI. Its "sometimes yes,
* sometimes no" nature can cause bugs.
*
* TODO: Remove this once all users that use pcim_enable_device() PLUS
* a region request function have been ported to using pcim_ functions.
*/
dr = find_pci_dr(pdev);
if (dr)
dr->region_mask &= ~(1 << bar);
} }
EXPORT_SYMBOL(pci_release_region); EXPORT_SYMBOL(pci_release_region);
...@@ -3922,7 +3915,12 @@ EXPORT_SYMBOL(pci_release_region); ...@@ -3922,7 +3915,12 @@ EXPORT_SYMBOL(pci_release_region);
static int __pci_request_region(struct pci_dev *pdev, int bar, static int __pci_request_region(struct pci_dev *pdev, int bar,
const char *res_name, int exclusive) const char *res_name, int exclusive)
{ {
struct pci_devres *dr; if (pci_is_managed(pdev)) {
if (exclusive == IORESOURCE_EXCLUSIVE)
return pcim_request_region_exclusive(pdev, bar, res_name);
return pcim_request_region(pdev, bar, res_name);
}
if (pci_resource_len(pdev, bar) == 0) if (pci_resource_len(pdev, bar) == 0)
return 0; return 0;
...@@ -3938,21 +3936,6 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, ...@@ -3938,21 +3936,6 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
goto err_out; goto err_out;
} }
/*
* This devres utility makes this function sometimes managed
* (when pcim_enable_device() has been called before).
*
* This is bad because it conflicts with the pcim_ functions being
* exclusively responsible for managed pci. Its "sometimes yes,
* sometimes no" nature can cause bugs.
*
* TODO: Remove this once all users that use pcim_enable_device() PLUS
* a region request function have been ported to using pcim_ functions.
*/
dr = find_pci_dr(pdev);
if (dr)
dr->region_mask |= 1 << bar;
return 0; return 0;
err_out: err_out:
......
...@@ -826,16 +826,15 @@ struct pci_devres { ...@@ -826,16 +826,15 @@ struct pci_devres {
unsigned int orig_intx:1; unsigned int orig_intx:1;
unsigned int restore_intx:1; unsigned int restore_intx:1;
unsigned int mwi:1; unsigned int mwi:1;
/*
* TODO: remove the region_mask once everyone calling
* pcim_enable_device() + pci_*region*() is ported to pcim_ functions.
*/
u32 region_mask;
}; };
struct pci_devres *find_pci_dr(struct pci_dev *pdev); struct pci_devres *find_pci_dr(struct pci_dev *pdev);
int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
int pcim_request_region_exclusive(struct pci_dev *pdev, int bar,
const char *name);
void pcim_release_region(struct pci_dev *pdev, int bar);
/* /*
* Config Address for PCI Configuration Mechanism #1 * Config Address for PCI Configuration Mechanism #1
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment