Commit b06f58ad authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'driver-core-6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core

Pull driver core updates from Greg KH:
 "Here is the set of driver core updates for 6.7-rc1. Nothing major in
  here at all, just a small number of changes including:

   - minor cleanups and updates from Andy Shevchenko

   - __counted_by addition

   - firmware_loader update for aborting loads cleaner

   - other minor changes, details in the shortlog

   - documentation update

  All of these have been in linux-next for a while with no reported
  issues"

* tag 'driver-core-6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (21 commits)
  firmware_loader: Abort all upcoming firmware load request once reboot triggered
  firmware_loader: Refactor kill_pending_fw_fallback_reqs()
  Documentation: security-bugs.rst: linux-distros relaxed their rules
  driver core: Release all resources during unbind before updating device links
  driver core: class: remove boilerplate code
  driver core: platform: Annotate struct irq_affinity_devres with __counted_by
  resource: Constify resource crosscheck APIs
  resource: Unify next_resource() and next_resource_skip_children()
  resource: Reuse for_each_resource() macro
  PCI: Implement custom llseek for sysfs resource entries
  kernfs: sysfs: support custom llseek method for sysfs entries
  debugfs: Fix __rcu type comparison warning
  device property: Replace custom implementation of COUNT_ARGS()
  drivers: base: test: Make property entry API test modular
  driver core: Add missing parameter description to __fwnode_link_add()
  device property: Clarify usage scope of some struct fwnode_handle members
  devres: rename the first parameter of devm_add_action(_or_reset)
  driver core: platform: Unify the firmware node type check
  driver core: platform: Use temporary variable in platform_device_add()
  driver core: platform: Refactor error path in a couple places
  ...
parents d99b91a9 effd7c70
...@@ -66,15 +66,32 @@ lifted, in perpetuity. ...@@ -66,15 +66,32 @@ lifted, in perpetuity.
Coordination with other groups Coordination with other groups
------------------------------ ------------------------------
The kernel security team strongly recommends that reporters of potential While the kernel security team solely focuses on getting bugs fixed,
security issues NEVER contact the "linux-distros" mailing list until other groups focus on fixing issues in distros and coordinating
AFTER discussing it with the kernel security team. Do not Cc: both disclosure between operating system vendors. Coordination is usually
lists at once. You may contact the linux-distros mailing list after a handled by the "linux-distros" mailing list and disclosure by the
fix has been agreed on and you fully understand the requirements that public "oss-security" mailing list, both of which are closely related
doing so will impose on you and the kernel community. and presented in the linux-distros wiki:
<https://oss-security.openwall.org/wiki/mailing-lists/distros>
The different lists have different goals and the linux-distros rules do
not contribute to actually fixing any potential security problems. Please note that the respective policies and rules are different since
the 3 lists pursue different goals. Coordinating between the kernel
security team and other teams is difficult since for the kernel security
team occasional embargoes (as subject to a maximum allowed number of
days) start from the availability of a fix, while for "linux-distros"
they start from the initial post to the list regardless of the
availability of a fix.
As such, the kernel security team strongly recommends that as a reporter
of a potential security issue you DO NOT contact the "linux-distros"
mailing list UNTIL a fix is accepted by the affected code's maintainers
and you have read the distros wiki page above and you fully understand
the requirements that contacting "linux-distros" will impose on you and
the kernel community. This also means that in general it doesn't make
sense to Cc: both lists at once, except maybe for coordination if and
while an accepted fix has not yet been merged. In other words, until a
fix is accepted do not Cc: "linux-distros", and after it's merged do not
Cc: the kernel security team.
CVE assignment CVE assignment
-------------- --------------
......
...@@ -193,10 +193,8 @@ int class_register(const struct class *cls) ...@@ -193,10 +193,8 @@ int class_register(const struct class *cls)
lockdep_register_key(key); lockdep_register_key(key);
__mutex_init(&cp->mutex, "subsys mutex", key); __mutex_init(&cp->mutex, "subsys mutex", key);
error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name); error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name);
if (error) { if (error)
kfree(cp); goto err_out;
return error;
}
cp->subsys.kobj.kset = class_kset; cp->subsys.kobj.kset = class_kset;
cp->subsys.kobj.ktype = &class_ktype; cp->subsys.kobj.ktype = &class_ktype;
......
...@@ -49,6 +49,7 @@ static bool fw_devlink_best_effort; ...@@ -49,6 +49,7 @@ static bool fw_devlink_best_effort;
* __fwnode_link_add - Create a link between two fwnode_handles. * __fwnode_link_add - Create a link between two fwnode_handles.
* @con: Consumer end of the link. * @con: Consumer end of the link.
* @sup: Supplier end of the link. * @sup: Supplier end of the link.
* @flags: Link flags.
* *
* Create a fwnode link between fwnode handles @con and @sup. The fwnode link * Create a fwnode link between fwnode handles @con and @sup. The fwnode link
* represents the detail that the firmware lists @sup fwnode as supplying a * represents the detail that the firmware lists @sup fwnode as supplying a
......
...@@ -1274,8 +1274,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) ...@@ -1274,8 +1274,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
if (dev->bus && dev->bus->dma_cleanup) if (dev->bus && dev->bus->dma_cleanup)
dev->bus->dma_cleanup(dev); dev->bus->dma_cleanup(dev);
device_links_driver_cleanup(dev);
device_unbind_cleanup(dev); device_unbind_cleanup(dev);
device_links_driver_cleanup(dev);
klist_remove(&dev->p->knode_driver); klist_remove(&dev->p->knode_driver);
device_pm_check_callbacks(dev); device_pm_check_callbacks(dev);
......
...@@ -46,7 +46,7 @@ static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout) ...@@ -46,7 +46,7 @@ static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout)
static LIST_HEAD(pending_fw_head); static LIST_HEAD(pending_fw_head);
void kill_pending_fw_fallback_reqs(bool only_kill_custom) void kill_pending_fw_fallback_reqs(bool kill_all)
{ {
struct fw_priv *fw_priv; struct fw_priv *fw_priv;
struct fw_priv *next; struct fw_priv *next;
...@@ -54,9 +54,13 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom) ...@@ -54,9 +54,13 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom)
mutex_lock(&fw_lock); mutex_lock(&fw_lock);
list_for_each_entry_safe(fw_priv, next, &pending_fw_head, list_for_each_entry_safe(fw_priv, next, &pending_fw_head,
pending_list) { pending_list) {
if (!fw_priv->need_uevent || !only_kill_custom) if (kill_all || !fw_priv->need_uevent)
__fw_load_abort(fw_priv); __fw_load_abort(fw_priv);
} }
if (kill_all)
fw_load_abort_all = true;
mutex_unlock(&fw_lock); mutex_unlock(&fw_lock);
} }
...@@ -86,7 +90,7 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout) ...@@ -86,7 +90,7 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout)
} }
mutex_lock(&fw_lock); mutex_lock(&fw_lock);
if (fw_state_is_aborted(fw_priv)) { if (fw_load_abort_all || fw_state_is_aborted(fw_priv)) {
mutex_unlock(&fw_lock); mutex_unlock(&fw_lock);
retval = -EINTR; retval = -EINTR;
goto out; goto out;
......
...@@ -13,7 +13,7 @@ int firmware_fallback_sysfs(struct firmware *fw, const char *name, ...@@ -13,7 +13,7 @@ int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device, struct device *device,
u32 opt_flags, u32 opt_flags,
int ret); int ret);
void kill_pending_fw_fallback_reqs(bool only_kill_custom); void kill_pending_fw_fallback_reqs(bool kill_all);
void fw_fallback_set_cache_timeout(void); void fw_fallback_set_cache_timeout(void);
void fw_fallback_set_default_timeout(void); void fw_fallback_set_default_timeout(void);
...@@ -28,7 +28,7 @@ static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name, ...@@ -28,7 +28,7 @@ static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
return ret; return ret;
} }
static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { } static inline void kill_pending_fw_fallback_reqs(bool kill_all) { }
static inline void fw_fallback_set_cache_timeout(void) { } static inline void fw_fallback_set_cache_timeout(void) { }
static inline void fw_fallback_set_default_timeout(void) { } static inline void fw_fallback_set_default_timeout(void) { }
#endif /* CONFIG_FW_LOADER_USER_HELPER */ #endif /* CONFIG_FW_LOADER_USER_HELPER */
......
...@@ -86,6 +86,7 @@ struct fw_priv { ...@@ -86,6 +86,7 @@ struct fw_priv {
extern struct mutex fw_lock; extern struct mutex fw_lock;
extern struct firmware_cache fw_cache; extern struct firmware_cache fw_cache;
extern bool fw_load_abort_all;
static inline bool __fw_state_check(struct fw_priv *fw_priv, static inline bool __fw_state_check(struct fw_priv *fw_priv,
enum fw_status status) enum fw_status status)
......
...@@ -93,6 +93,7 @@ static inline struct fw_priv *to_fw_priv(struct kref *ref) ...@@ -93,6 +93,7 @@ static inline struct fw_priv *to_fw_priv(struct kref *ref)
DEFINE_MUTEX(fw_lock); DEFINE_MUTEX(fw_lock);
struct firmware_cache fw_cache; struct firmware_cache fw_cache;
bool fw_load_abort_all;
void fw_state_init(struct fw_priv *fw_priv) void fw_state_init(struct fw_priv *fw_priv)
{ {
...@@ -1524,10 +1525,10 @@ static int fw_pm_notify(struct notifier_block *notify_block, ...@@ -1524,10 +1525,10 @@ static int fw_pm_notify(struct notifier_block *notify_block,
case PM_SUSPEND_PREPARE: case PM_SUSPEND_PREPARE:
case PM_RESTORE_PREPARE: case PM_RESTORE_PREPARE:
/* /*
* kill pending fallback requests with a custom fallback * Here, kill pending fallback requests will only kill
* to avoid stalling suspend. * non-uevent firmware request to avoid stalling suspend.
*/ */
kill_pending_fw_fallback_reqs(true); kill_pending_fw_fallback_reqs(false);
device_cache_fw_images(); device_cache_fw_images();
break; break;
...@@ -1612,7 +1613,7 @@ static int fw_shutdown_notify(struct notifier_block *unused1, ...@@ -1612,7 +1613,7 @@ static int fw_shutdown_notify(struct notifier_block *unused1,
* Kill all pending fallback requests to avoid both stalling shutdown, * Kill all pending fallback requests to avoid both stalling shutdown,
* and avoid a deadlock with the usermode_lock. * and avoid a deadlock with the usermode_lock.
*/ */
kill_pending_fw_fallback_reqs(false); kill_pending_fw_fallback_reqs(true);
return NOTIFY_DONE; return NOTIFY_DONE;
} }
......
...@@ -178,18 +178,19 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num) ...@@ -178,18 +178,19 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
ret = dev->archdata.irqs[num]; ret = dev->archdata.irqs[num];
goto out; goto out;
#else #else
struct fwnode_handle *fwnode = dev_fwnode(&dev->dev);
struct resource *r; struct resource *r;
if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { if (is_of_node(fwnode)) {
ret = of_irq_get(dev->dev.of_node, num); ret = of_irq_get(to_of_node(fwnode), num);
if (ret > 0 || ret == -EPROBE_DEFER) if (ret > 0 || ret == -EPROBE_DEFER)
goto out; goto out;
} }
r = platform_get_resource(dev, IORESOURCE_IRQ, num); r = platform_get_resource(dev, IORESOURCE_IRQ, num);
if (has_acpi_companion(&dev->dev)) { if (is_acpi_device_node(fwnode)) {
if (r && r->flags & IORESOURCE_DISABLED) { if (r && r->flags & IORESOURCE_DISABLED) {
ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r); ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), num, r);
if (ret) if (ret)
goto out; goto out;
} }
...@@ -222,8 +223,8 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num) ...@@ -222,8 +223,8 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
* the device will only expose one IRQ, and this fallback * the device will only expose one IRQ, and this fallback
* allows a common code path across either kind of resource. * allows a common code path across either kind of resource.
*/ */
if (num == 0 && has_acpi_companion(&dev->dev)) { if (num == 0 && is_acpi_device_node(fwnode)) {
ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); ret = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), num);
/* Our callers expect -ENXIO for missing IRQs. */ /* Our callers expect -ENXIO for missing IRQs. */
if (ret >= 0 || ret == -EPROBE_DEFER) if (ret >= 0 || ret == -EPROBE_DEFER)
goto out; goto out;
...@@ -291,7 +292,7 @@ EXPORT_SYMBOL_GPL(platform_irq_count); ...@@ -291,7 +292,7 @@ EXPORT_SYMBOL_GPL(platform_irq_count);
struct irq_affinity_devres { struct irq_affinity_devres {
unsigned int count; unsigned int count;
unsigned int irq[]; unsigned int irq[] __counted_by(count);
}; };
static void platform_disable_acpi_irq(struct platform_device *pdev, int index) static void platform_disable_acpi_irq(struct platform_device *pdev, int index)
...@@ -312,7 +313,7 @@ static void devm_platform_get_irqs_affinity_release(struct device *dev, ...@@ -312,7 +313,7 @@ static void devm_platform_get_irqs_affinity_release(struct device *dev,
for (i = 0; i < ptr->count; i++) { for (i = 0; i < ptr->count; i++) {
irq_dispose_mapping(ptr->irq[i]); irq_dispose_mapping(ptr->irq[i]);
if (has_acpi_companion(dev)) if (is_acpi_device_node(dev_fwnode(dev)))
platform_disable_acpi_irq(to_platform_device(dev), i); platform_disable_acpi_irq(to_platform_device(dev), i);
} }
} }
...@@ -655,23 +656,21 @@ EXPORT_SYMBOL_GPL(platform_device_add_data); ...@@ -655,23 +656,21 @@ EXPORT_SYMBOL_GPL(platform_device_add_data);
*/ */
int platform_device_add(struct platform_device *pdev) int platform_device_add(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev;
u32 i; u32 i;
int ret; int ret;
if (!pdev) if (!dev->parent)
return -EINVAL; dev->parent = &platform_bus;
if (!pdev->dev.parent)
pdev->dev.parent = &platform_bus;
pdev->dev.bus = &platform_bus_type; dev->bus = &platform_bus_type;
switch (pdev->id) { switch (pdev->id) {
default: default:
dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); dev_set_name(dev, "%s.%d", pdev->name, pdev->id);
break; break;
case PLATFORM_DEVID_NONE: case PLATFORM_DEVID_NONE:
dev_set_name(&pdev->dev, "%s", pdev->name); dev_set_name(dev, "%s", pdev->name);
break; break;
case PLATFORM_DEVID_AUTO: case PLATFORM_DEVID_AUTO:
/* /*
...@@ -681,10 +680,10 @@ int platform_device_add(struct platform_device *pdev) ...@@ -681,10 +680,10 @@ int platform_device_add(struct platform_device *pdev)
*/ */
ret = ida_alloc(&platform_devid_ida, GFP_KERNEL); ret = ida_alloc(&platform_devid_ida, GFP_KERNEL);
if (ret < 0) if (ret < 0)
goto err_out; return ret;
pdev->id = ret; pdev->id = ret;
pdev->id_auto = true; pdev->id_auto = true;
dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id); dev_set_name(dev, "%s.%d.auto", pdev->name, pdev->id);
break; break;
} }
...@@ -692,7 +691,7 @@ int platform_device_add(struct platform_device *pdev) ...@@ -692,7 +691,7 @@ int platform_device_add(struct platform_device *pdev)
struct resource *p, *r = &pdev->resource[i]; struct resource *p, *r = &pdev->resource[i];
if (r->name == NULL) if (r->name == NULL)
r->name = dev_name(&pdev->dev); r->name = dev_name(dev);
p = r->parent; p = r->parent;
if (!p) { if (!p) {
...@@ -705,18 +704,20 @@ int platform_device_add(struct platform_device *pdev) ...@@ -705,18 +704,20 @@ int platform_device_add(struct platform_device *pdev)
if (p) { if (p) {
ret = insert_resource(p, r); ret = insert_resource(p, r);
if (ret) { if (ret) {
dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r); dev_err(dev, "failed to claim resource %d: %pR\n", i, r);
goto failed; goto failed;
} }
} }
} }
pr_debug("Registering platform device '%s'. Parent at %s\n", pr_debug("Registering platform device '%s'. Parent at %s\n", dev_name(dev),
dev_name(&pdev->dev), dev_name(pdev->dev.parent)); dev_name(dev->parent));
ret = device_add(&pdev->dev); ret = device_add(dev);
if (ret == 0) if (ret)
return ret; goto failed;
return 0;
failed: failed:
if (pdev->id_auto) { if (pdev->id_auto) {
...@@ -730,7 +731,6 @@ int platform_device_add(struct platform_device *pdev) ...@@ -730,7 +731,6 @@ int platform_device_add(struct platform_device *pdev)
release_resource(r); release_resource(r);
} }
err_out:
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(platform_device_add); EXPORT_SYMBOL_GPL(platform_device_add);
...@@ -1447,21 +1447,22 @@ static void platform_shutdown(struct device *_dev) ...@@ -1447,21 +1447,22 @@ static void platform_shutdown(struct device *_dev)
static int platform_dma_configure(struct device *dev) static int platform_dma_configure(struct device *dev)
{ {
struct platform_driver *drv = to_platform_driver(dev->driver); struct platform_driver *drv = to_platform_driver(dev->driver);
struct fwnode_handle *fwnode = dev_fwnode(dev);
enum dev_dma_attr attr; enum dev_dma_attr attr;
int ret = 0; int ret = 0;
if (dev->of_node) { if (is_of_node(fwnode)) {
ret = of_dma_configure(dev, dev->of_node, true); ret = of_dma_configure(dev, to_of_node(fwnode), true);
} else if (has_acpi_companion(dev)) { } else if (is_acpi_device_node(fwnode)) {
attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); attr = acpi_get_dma_attr(to_acpi_device_node(fwnode));
ret = acpi_dma_configure(dev, attr); ret = acpi_dma_configure(dev, attr);
} }
if (ret || drv->driver_managed_dma)
return ret;
if (!ret && !drv->driver_managed_dma) { ret = iommu_device_use_default_domain(dev);
ret = iommu_device_use_default_domain(dev); if (ret)
if (ret) arch_teardown_dma_ops(dev);
arch_teardown_dma_ops(dev);
}
return ret; return ret;
} }
......
...@@ -14,6 +14,6 @@ config DM_KUNIT_TEST ...@@ -14,6 +14,6 @@ config DM_KUNIT_TEST
depends on KUNIT depends on KUNIT
config DRIVER_PE_KUNIT_TEST config DRIVER_PE_KUNIT_TEST
bool "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS tristate "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
depends on KUNIT=y depends on KUNIT
default KUNIT_ALL_TESTS default KUNIT_ALL_TESTS
...@@ -506,3 +506,7 @@ static struct kunit_suite property_entry_test_suite = { ...@@ -506,3 +506,7 @@ static struct kunit_suite property_entry_test_suite = {
}; };
kunit_test_suite(property_entry_test_suite); kunit_test_suite(property_entry_test_suite);
MODULE_DESCRIPTION("Test module for the property entry API");
MODULE_AUTHOR("Dmitry Torokhov <dtor@chromium.org>");
MODULE_LICENSE("GPL");
...@@ -831,6 +831,19 @@ static const struct attribute_group pci_dev_config_attr_group = { ...@@ -831,6 +831,19 @@ static const struct attribute_group pci_dev_config_attr_group = {
.is_bin_visible = pci_dev_config_attr_is_visible, .is_bin_visible = pci_dev_config_attr_is_visible,
}; };
/*
* llseek operation for mmappable PCI resources.
* May be left unused if the arch doesn't provide them.
*/
static __maybe_unused loff_t
pci_llseek_resource(struct file *filep,
struct kobject *kobj __always_unused,
struct bin_attribute *attr,
loff_t offset, int whence)
{
return fixed_size_llseek(filep, offset, whence, attr->size);
}
#ifdef HAVE_PCI_LEGACY #ifdef HAVE_PCI_LEGACY
/** /**
* pci_read_legacy_io - read byte(s) from legacy I/O port space * pci_read_legacy_io - read byte(s) from legacy I/O port space
...@@ -963,6 +976,8 @@ void pci_create_legacy_files(struct pci_bus *b) ...@@ -963,6 +976,8 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_io->attr.mode = 0600; b->legacy_io->attr.mode = 0600;
b->legacy_io->read = pci_read_legacy_io; b->legacy_io->read = pci_read_legacy_io;
b->legacy_io->write = pci_write_legacy_io; b->legacy_io->write = pci_write_legacy_io;
/* See pci_create_attr() for motivation */
b->legacy_io->llseek = pci_llseek_resource;
b->legacy_io->mmap = pci_mmap_legacy_io; b->legacy_io->mmap = pci_mmap_legacy_io;
b->legacy_io->f_mapping = iomem_get_mapping; b->legacy_io->f_mapping = iomem_get_mapping;
pci_adjust_legacy_attr(b, pci_mmap_io); pci_adjust_legacy_attr(b, pci_mmap_io);
...@@ -977,6 +992,8 @@ void pci_create_legacy_files(struct pci_bus *b) ...@@ -977,6 +992,8 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_mem->size = 1024*1024; b->legacy_mem->size = 1024*1024;
b->legacy_mem->attr.mode = 0600; b->legacy_mem->attr.mode = 0600;
b->legacy_mem->mmap = pci_mmap_legacy_mem; b->legacy_mem->mmap = pci_mmap_legacy_mem;
/* See pci_create_attr() for motivation */
b->legacy_mem->llseek = pci_llseek_resource;
b->legacy_mem->f_mapping = iomem_get_mapping; b->legacy_mem->f_mapping = iomem_get_mapping;
pci_adjust_legacy_attr(b, pci_mmap_mem); pci_adjust_legacy_attr(b, pci_mmap_mem);
error = device_create_bin_file(&b->dev, b->legacy_mem); error = device_create_bin_file(&b->dev, b->legacy_mem);
...@@ -1195,8 +1212,15 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) ...@@ -1195,8 +1212,15 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
res_attr->mmap = pci_mmap_resource_uc; res_attr->mmap = pci_mmap_resource_uc;
} }
} }
if (res_attr->mmap) if (res_attr->mmap) {
res_attr->f_mapping = iomem_get_mapping; res_attr->f_mapping = iomem_get_mapping;
/*
* generic_file_llseek() consults f_mapping->host to determine
* the file size. As iomem_inode knows nothing about the
* attribute, it's not going to work, so override it as well.
*/
res_attr->llseek = pci_llseek_resource;
}
res_attr->attr.name = res_attr_name; res_attr->attr.name = res_attr_name;
res_attr->attr.mode = 0600; res_attr->attr.mode = 0600;
res_attr->size = pci_resource_len(pdev, num); res_attr->size = pci_resource_len(pdev, num);
......
...@@ -939,7 +939,7 @@ static ssize_t debugfs_write_file_str(struct file *file, const char __user *user ...@@ -939,7 +939,7 @@ static ssize_t debugfs_write_file_str(struct file *file, const char __user *user
new[pos + count] = '\0'; new[pos + count] = '\0';
strim(new); strim(new);
rcu_assign_pointer(*(char **)file->private_data, new); rcu_assign_pointer(*(char __rcu **)file->private_data, new);
synchronize_rcu(); synchronize_rcu();
kfree(old); kfree(old);
......
...@@ -854,6 +854,33 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait) ...@@ -854,6 +854,33 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
return ret; return ret;
} }
static loff_t kernfs_fop_llseek(struct file *file, loff_t offset, int whence)
{
struct kernfs_open_file *of = kernfs_of(file);
const struct kernfs_ops *ops;
loff_t ret;
/*
* @of->mutex nests outside active ref and is primarily to ensure that
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
if (!kernfs_get_active(of->kn)) {
mutex_unlock(&of->mutex);
return -ENODEV;
}
ops = kernfs_ops(of->kn);
if (ops->llseek)
ret = ops->llseek(of, offset, whence);
else
ret = generic_file_llseek(file, offset, whence);
kernfs_put_active(of->kn);
mutex_unlock(&of->mutex);
return ret;
}
static void kernfs_notify_workfn(struct work_struct *work) static void kernfs_notify_workfn(struct work_struct *work)
{ {
struct kernfs_node *kn; struct kernfs_node *kn;
...@@ -956,7 +983,7 @@ EXPORT_SYMBOL_GPL(kernfs_notify); ...@@ -956,7 +983,7 @@ EXPORT_SYMBOL_GPL(kernfs_notify);
const struct file_operations kernfs_file_fops = { const struct file_operations kernfs_file_fops = {
.read_iter = kernfs_fop_read_iter, .read_iter = kernfs_fop_read_iter,
.write_iter = kernfs_fop_write_iter, .write_iter = kernfs_fop_write_iter,
.llseek = generic_file_llseek, .llseek = kernfs_fop_llseek,
.mmap = kernfs_fop_mmap, .mmap = kernfs_fop_mmap,
.open = kernfs_fop_open, .open = kernfs_fop_open,
.release = kernfs_fop_release, .release = kernfs_fop_release,
......
...@@ -167,6 +167,18 @@ static int sysfs_kf_bin_mmap(struct kernfs_open_file *of, ...@@ -167,6 +167,18 @@ static int sysfs_kf_bin_mmap(struct kernfs_open_file *of,
return battr->mmap(of->file, kobj, battr, vma); return battr->mmap(of->file, kobj, battr, vma);
} }
static loff_t sysfs_kf_bin_llseek(struct kernfs_open_file *of, loff_t offset,
int whence)
{
struct bin_attribute *battr = of->kn->priv;
struct kobject *kobj = of->kn->parent->priv;
if (battr->llseek)
return battr->llseek(of->file, kobj, battr, offset, whence);
else
return generic_file_llseek(of->file, offset, whence);
}
static int sysfs_kf_bin_open(struct kernfs_open_file *of) static int sysfs_kf_bin_open(struct kernfs_open_file *of)
{ {
struct bin_attribute *battr = of->kn->priv; struct bin_attribute *battr = of->kn->priv;
...@@ -249,6 +261,7 @@ static const struct kernfs_ops sysfs_bin_kfops_mmap = { ...@@ -249,6 +261,7 @@ static const struct kernfs_ops sysfs_bin_kfops_mmap = {
.write = sysfs_kf_bin_write, .write = sysfs_kf_bin_write,
.mmap = sysfs_kf_bin_mmap, .mmap = sysfs_kf_bin_mmap,
.open = sysfs_kf_bin_open, .open = sysfs_kf_bin_open,
.llseek = sysfs_kf_bin_llseek,
}; };
int sysfs_add_file_mode_ns(struct kernfs_node *parent, int sysfs_add_file_mode_ns(struct kernfs_node *parent,
......
...@@ -389,8 +389,8 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data); ...@@ -389,8 +389,8 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
void devm_release_action(struct device *dev, void (*action)(void *), void *data); void devm_release_action(struct device *dev, void (*action)(void *), void *data);
int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name); int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name);
#define devm_add_action(release, action, data) \ #define devm_add_action(dev, action, data) \
__devm_add_action(release, action, data, #action) __devm_add_action(dev, action, data, #action)
static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *), static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *),
void *data, const char *name) void *data, const char *name)
...@@ -403,8 +403,8 @@ static inline int __devm_add_action_or_reset(struct device *dev, void (*action)( ...@@ -403,8 +403,8 @@ static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(
return ret; return ret;
} }
#define devm_add_action_or_reset(release, action, data) \ #define devm_add_action_or_reset(dev, action, data) \
__devm_add_action_or_reset(release, action, data, #action) __devm_add_action_or_reset(dev, action, data, #action)
/** /**
* devm_alloc_percpu - Resource-managed alloc_percpu * devm_alloc_percpu - Resource-managed alloc_percpu
......
...@@ -41,6 +41,8 @@ struct device; ...@@ -41,6 +41,8 @@ struct device;
struct fwnode_handle { struct fwnode_handle {
struct fwnode_handle *secondary; struct fwnode_handle *secondary;
const struct fwnode_operations *ops; const struct fwnode_operations *ops;
/* The below is used solely by device links, don't use otherwise */
struct device *dev; struct device *dev;
struct list_head suppliers; struct list_head suppliers;
struct list_head consumers; struct list_head consumers;
......
...@@ -229,7 +229,7 @@ static inline unsigned long resource_ext_type(const struct resource *res) ...@@ -229,7 +229,7 @@ static inline unsigned long resource_ext_type(const struct resource *res)
return res->flags & IORESOURCE_EXT_TYPE_BITS; return res->flags & IORESOURCE_EXT_TYPE_BITS;
} }
/* True iff r1 completely contains r2 */ /* True iff r1 completely contains r2 */
static inline bool resource_contains(struct resource *r1, struct resource *r2) static inline bool resource_contains(const struct resource *r1, const struct resource *r2)
{ {
if (resource_type(r1) != resource_type(r2)) if (resource_type(r1) != resource_type(r2))
return false; return false;
...@@ -239,13 +239,13 @@ static inline bool resource_contains(struct resource *r1, struct resource *r2) ...@@ -239,13 +239,13 @@ static inline bool resource_contains(struct resource *r1, struct resource *r2)
} }
/* True if any part of r1 overlaps r2 */ /* True if any part of r1 overlaps r2 */
static inline bool resource_overlaps(struct resource *r1, struct resource *r2) static inline bool resource_overlaps(const struct resource *r1, const struct resource *r2)
{ {
return r1->start <= r2->end && r1->end >= r2->start; return r1->start <= r2->end && r1->end >= r2->start;
} }
static inline bool static inline bool resource_intersection(const struct resource *r1, const struct resource *r2,
resource_intersection(struct resource *r1, struct resource *r2, struct resource *r) struct resource *r)
{ {
if (!resource_overlaps(r1, r2)) if (!resource_overlaps(r1, r2))
return false; return false;
...@@ -254,8 +254,8 @@ resource_intersection(struct resource *r1, struct resource *r2, struct resource ...@@ -254,8 +254,8 @@ resource_intersection(struct resource *r1, struct resource *r2, struct resource
return true; return true;
} }
static inline bool static inline bool resource_union(const struct resource *r1, const struct resource *r2,
resource_union(struct resource *r1, struct resource *r2, struct resource *r) struct resource *r)
{ {
if (!resource_overlaps(r1, r2)) if (!resource_overlaps(r1, r2))
return false; return false;
......
...@@ -316,6 +316,7 @@ struct kernfs_ops { ...@@ -316,6 +316,7 @@ struct kernfs_ops {
struct poll_table_struct *pt); struct poll_table_struct *pt);
int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma); int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
loff_t (*llseek)(struct kernfs_open_file *of, loff_t offset, int whence);
}; };
/* /*
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#ifndef _LINUX_PROPERTY_H_ #ifndef _LINUX_PROPERTY_H_
#define _LINUX_PROPERTY_H_ #define _LINUX_PROPERTY_H_
#include <linux/args.h>
#include <linux/bits.h> #include <linux/bits.h>
#include <linux/fwnode.h> #include <linux/fwnode.h>
#include <linux/stddef.h> #include <linux/stddef.h>
...@@ -288,7 +289,7 @@ struct software_node_ref_args { ...@@ -288,7 +289,7 @@ struct software_node_ref_args {
#define SOFTWARE_NODE_REFERENCE(_ref_, ...) \ #define SOFTWARE_NODE_REFERENCE(_ref_, ...) \
(const struct software_node_ref_args) { \ (const struct software_node_ref_args) { \
.node = _ref_, \ .node = _ref_, \
.nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \ .nargs = COUNT_ARGS(__VA_ARGS__), \
.args = { __VA_ARGS__ }, \ .args = { __VA_ARGS__ }, \
} }
......
...@@ -181,6 +181,8 @@ struct bin_attribute { ...@@ -181,6 +181,8 @@ struct bin_attribute {
char *, loff_t, size_t); char *, loff_t, size_t);
ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *,
char *, loff_t, size_t); char *, loff_t, size_t);
loff_t (*llseek)(struct file *, struct kobject *, struct bin_attribute *,
loff_t, int);
int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr, int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
struct vm_area_struct *vma); struct vm_area_struct *vma);
}; };
......
...@@ -56,33 +56,17 @@ struct resource_constraint { ...@@ -56,33 +56,17 @@ struct resource_constraint {
static DEFINE_RWLOCK(resource_lock); static DEFINE_RWLOCK(resource_lock);
static struct resource *next_resource(struct resource *p) static struct resource *next_resource(struct resource *p, bool skip_children)
{ {
if (p->child) if (!skip_children && p->child)
return p->child; return p->child;
while (!p->sibling && p->parent) while (!p->sibling && p->parent)
p = p->parent; p = p->parent;
return p->sibling; return p->sibling;
} }
static struct resource *next_resource_skip_children(struct resource *p)
{
while (!p->sibling && p->parent)
p = p->parent;
return p->sibling;
}
#define for_each_resource(_root, _p, _skip_children) \ #define for_each_resource(_root, _p, _skip_children) \
for ((_p) = (_root)->child; (_p); \ for ((_p) = (_root)->child; (_p); (_p) = next_resource(_p, _skip_children))
(_p) = (_skip_children) ? next_resource_skip_children(_p) : \
next_resource(_p))
static void *r_next(struct seq_file *m, void *v, loff_t *pos)
{
struct resource *p = v;
(*pos)++;
return (void *)next_resource(p);
}
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
...@@ -91,14 +75,28 @@ enum { MAX_IORES_LEVEL = 5 }; ...@@ -91,14 +75,28 @@ enum { MAX_IORES_LEVEL = 5 };
static void *r_start(struct seq_file *m, loff_t *pos) static void *r_start(struct seq_file *m, loff_t *pos)
__acquires(resource_lock) __acquires(resource_lock)
{ {
struct resource *p = pde_data(file_inode(m->file)); struct resource *root = pde_data(file_inode(m->file));
loff_t l = 0; struct resource *p;
loff_t l = *pos;
read_lock(&resource_lock); read_lock(&resource_lock);
for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) for_each_resource(root, p, false) {
; if (l-- == 0)
break;
}
return p; return p;
} }
static void *r_next(struct seq_file *m, void *v, loff_t *pos)
{
struct resource *p = v;
(*pos)++;
return (void *)next_resource(p, false);
}
static void r_stop(struct seq_file *m, void *v) static void r_stop(struct seq_file *m, void *v)
__releases(resource_lock) __releases(resource_lock)
{ {
...@@ -336,7 +334,7 @@ static int find_next_iomem_res(resource_size_t start, resource_size_t end, ...@@ -336,7 +334,7 @@ static int find_next_iomem_res(resource_size_t start, resource_size_t end,
read_lock(&resource_lock); read_lock(&resource_lock);
for (p = iomem_resource.child; p; p = next_resource(p)) { for_each_resource(&iomem_resource, p, false) {
/* If we passed the resource we are looking for, stop */ /* If we passed the resource we are looking for, stop */
if (p->start > end) { if (p->start > end) {
p = NULL; p = NULL;
...@@ -1641,13 +1639,12 @@ __setup("reserve=", reserve_setup); ...@@ -1641,13 +1639,12 @@ __setup("reserve=", reserve_setup);
*/ */
int iomem_map_sanity_check(resource_size_t addr, unsigned long size) int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
{ {
struct resource *p = &iomem_resource;
resource_size_t end = addr + size - 1; resource_size_t end = addr + size - 1;
struct resource *p;
int err = 0; int err = 0;
loff_t l;
read_lock(&resource_lock); read_lock(&resource_lock);
for (p = p->child; p ; p = r_next(NULL, p, &l)) { for_each_resource(&iomem_resource, p, false) {
/* /*
* We can probably skip the resources without * We can probably skip the resources without
* IORESOURCE_IO attribute? * IORESOURCE_IO attribute?
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment