Commit c8107ed9 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 fixes from Martin Schwidefsky:

 - A couple of bug fixes: memory management, perf, cio, dasd and
   scm_blk.

 - A larger change in regard to the CPU topology to improve performance
   for systems running under z/VM or KVM.

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/topology: enable / disable topology dynamically
  s390/topology: alternative topology for topology-less machines
  s390/mm: fix write access check in gup_huge_pmd()
  s390/mm: make pmdp_invalidate() do invalidation only
  s390/cio: recover from bad paths
  s390/scm_blk: consistently use blk_status_t as error type
  s390/dasd: fix race during dasd initialization
  s390/perf: fix bug when creating per-thread event
parents 7a6d0071 51dce386
...@@ -1507,7 +1507,9 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, ...@@ -1507,7 +1507,9 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
static inline void pmdp_invalidate(struct vm_area_struct *vma, static inline void pmdp_invalidate(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp) unsigned long addr, pmd_t *pmdp)
{ {
pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
} }
#define __HAVE_ARCH_PMDP_SET_WRPROTECT #define __HAVE_ARCH_PMDP_SET_WRPROTECT
......
...@@ -404,18 +404,6 @@ static inline void save_vector_registers(void) ...@@ -404,18 +404,6 @@ static inline void save_vector_registers(void)
#endif #endif
} }
static int __init topology_setup(char *str)
{
bool enabled;
int rc;
rc = kstrtobool(str, &enabled);
if (!rc && !enabled)
S390_lowcore.machine_flags &= ~MACHINE_FLAG_TOPOLOGY;
return rc;
}
early_param("topology", topology_setup);
static int __init disable_vector_extension(char *str) static int __init disable_vector_extension(char *str)
{ {
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX; S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
......
...@@ -823,9 +823,12 @@ static int cpumsf_pmu_event_init(struct perf_event *event) ...@@ -823,9 +823,12 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
} }
/* Check online status of the CPU to which the event is pinned */ /* Check online status of the CPU to which the event is pinned */
if ((unsigned int)event->cpu >= nr_cpumask_bits || if (event->cpu >= 0) {
(event->cpu >= 0 && !cpu_online(event->cpu))) if ((unsigned int)event->cpu >= nr_cpumask_bits)
return -ENODEV; return -ENODEV;
if (!cpu_online(event->cpu))
return -ENODEV;
}
/* Force reset of idle/hv excludes regardless of what the /* Force reset of idle/hv excludes regardless of what the
* user requested. * user requested.
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/uaccess.h>
#include <linux/sysctl.h>
#include <linux/cpuset.h> #include <linux/cpuset.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/export.h> #include <linux/export.h>
...@@ -29,12 +31,20 @@ ...@@ -29,12 +31,20 @@
#define PTF_VERTICAL (1UL) #define PTF_VERTICAL (1UL)
#define PTF_CHECK (2UL) #define PTF_CHECK (2UL)
enum {
TOPOLOGY_MODE_HW,
TOPOLOGY_MODE_SINGLE,
TOPOLOGY_MODE_PACKAGE,
TOPOLOGY_MODE_UNINITIALIZED
};
struct mask_info { struct mask_info {
struct mask_info *next; struct mask_info *next;
unsigned char id; unsigned char id;
cpumask_t mask; cpumask_t mask;
}; };
static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
static void set_topology_timer(void); static void set_topology_timer(void);
static void topology_work_fn(struct work_struct *work); static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info; static struct sysinfo_15_1_x *tl_info;
...@@ -59,11 +69,26 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) ...@@ -59,11 +69,26 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
cpumask_t mask; cpumask_t mask;
cpumask_copy(&mask, cpumask_of(cpu)); cpumask_copy(&mask, cpumask_of(cpu));
if (!MACHINE_HAS_TOPOLOGY) switch (topology_mode) {
return mask; case TOPOLOGY_MODE_HW:
for (; info; info = info->next) { while (info) {
if (cpumask_test_cpu(cpu, &info->mask)) if (cpumask_test_cpu(cpu, &info->mask)) {
return info->mask; mask = info->mask;
break;
}
info = info->next;
}
if (cpumask_empty(&mask))
cpumask_copy(&mask, cpumask_of(cpu));
break;
case TOPOLOGY_MODE_PACKAGE:
cpumask_copy(&mask, cpu_present_mask);
break;
default:
/* fallthrough */
case TOPOLOGY_MODE_SINGLE:
cpumask_copy(&mask, cpumask_of(cpu));
break;
} }
return mask; return mask;
} }
...@@ -74,7 +99,7 @@ static cpumask_t cpu_thread_map(unsigned int cpu) ...@@ -74,7 +99,7 @@ static cpumask_t cpu_thread_map(unsigned int cpu)
int i; int i;
cpumask_copy(&mask, cpumask_of(cpu)); cpumask_copy(&mask, cpumask_of(cpu));
if (!MACHINE_HAS_TOPOLOGY) if (topology_mode != TOPOLOGY_MODE_HW)
return mask; return mask;
cpu -= cpu % (smp_cpu_mtid + 1); cpu -= cpu % (smp_cpu_mtid + 1);
for (i = 0; i <= smp_cpu_mtid; i++) for (i = 0; i <= smp_cpu_mtid; i++)
...@@ -184,10 +209,8 @@ static void topology_update_polarization_simple(void) ...@@ -184,10 +209,8 @@ static void topology_update_polarization_simple(void)
{ {
int cpu; int cpu;
mutex_lock(&smp_cpu_state_mutex);
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
smp_cpu_set_polarization(cpu, POLARIZATION_HRZ); smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
mutex_unlock(&smp_cpu_state_mutex);
} }
static int ptf(unsigned long fc) static int ptf(unsigned long fc)
...@@ -223,7 +246,7 @@ int topology_set_cpu_management(int fc) ...@@ -223,7 +246,7 @@ int topology_set_cpu_management(int fc)
static void update_cpu_masks(void) static void update_cpu_masks(void)
{ {
struct cpu_topology_s390 *topo; struct cpu_topology_s390 *topo;
int cpu; int cpu, id;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
topo = &cpu_topology[cpu]; topo = &cpu_topology[cpu];
...@@ -231,12 +254,13 @@ static void update_cpu_masks(void) ...@@ -231,12 +254,13 @@ static void update_cpu_masks(void)
topo->core_mask = cpu_group_map(&socket_info, cpu); topo->core_mask = cpu_group_map(&socket_info, cpu);
topo->book_mask = cpu_group_map(&book_info, cpu); topo->book_mask = cpu_group_map(&book_info, cpu);
topo->drawer_mask = cpu_group_map(&drawer_info, cpu); topo->drawer_mask = cpu_group_map(&drawer_info, cpu);
if (!MACHINE_HAS_TOPOLOGY) { if (topology_mode != TOPOLOGY_MODE_HW) {
id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
topo->thread_id = cpu; topo->thread_id = cpu;
topo->core_id = cpu; topo->core_id = cpu;
topo->socket_id = cpu; topo->socket_id = id;
topo->book_id = cpu; topo->book_id = id;
topo->drawer_id = cpu; topo->drawer_id = id;
if (cpu_present(cpu)) if (cpu_present(cpu))
cpumask_set_cpu(cpu, &cpus_with_topology); cpumask_set_cpu(cpu, &cpus_with_topology);
} }
...@@ -254,6 +278,7 @@ static int __arch_update_cpu_topology(void) ...@@ -254,6 +278,7 @@ static int __arch_update_cpu_topology(void)
struct sysinfo_15_1_x *info = tl_info; struct sysinfo_15_1_x *info = tl_info;
int rc = 0; int rc = 0;
mutex_lock(&smp_cpu_state_mutex);
cpumask_clear(&cpus_with_topology); cpumask_clear(&cpus_with_topology);
if (MACHINE_HAS_TOPOLOGY) { if (MACHINE_HAS_TOPOLOGY) {
rc = 1; rc = 1;
...@@ -263,6 +288,7 @@ static int __arch_update_cpu_topology(void) ...@@ -263,6 +288,7 @@ static int __arch_update_cpu_topology(void)
update_cpu_masks(); update_cpu_masks();
if (!MACHINE_HAS_TOPOLOGY) if (!MACHINE_HAS_TOPOLOGY)
topology_update_polarization_simple(); topology_update_polarization_simple();
mutex_unlock(&smp_cpu_state_mutex);
return rc; return rc;
} }
...@@ -289,6 +315,11 @@ void topology_schedule_update(void) ...@@ -289,6 +315,11 @@ void topology_schedule_update(void)
schedule_work(&topology_work); schedule_work(&topology_work);
} }
static void topology_flush_work(void)
{
flush_work(&topology_work);
}
static void topology_timer_fn(unsigned long ignored) static void topology_timer_fn(unsigned long ignored)
{ {
if (ptf(PTF_CHECK)) if (ptf(PTF_CHECK))
...@@ -459,6 +490,12 @@ void __init topology_init_early(void) ...@@ -459,6 +490,12 @@ void __init topology_init_early(void)
struct sysinfo_15_1_x *info; struct sysinfo_15_1_x *info;
set_sched_topology(s390_topology); set_sched_topology(s390_topology);
if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) {
if (MACHINE_HAS_TOPOLOGY)
topology_mode = TOPOLOGY_MODE_HW;
else
topology_mode = TOPOLOGY_MODE_SINGLE;
}
if (!MACHINE_HAS_TOPOLOGY) if (!MACHINE_HAS_TOPOLOGY)
goto out; goto out;
tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE); tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE);
...@@ -474,12 +511,97 @@ void __init topology_init_early(void) ...@@ -474,12 +511,97 @@ void __init topology_init_early(void)
__arch_update_cpu_topology(); __arch_update_cpu_topology();
} }
static inline int topology_get_mode(int enabled)
{
if (!enabled)
return TOPOLOGY_MODE_SINGLE;
return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
}
static inline int topology_is_enabled(void)
{
return topology_mode != TOPOLOGY_MODE_SINGLE;
}
static int __init topology_setup(char *str)
{
bool enabled;
int rc;
rc = kstrtobool(str, &enabled);
if (rc)
return rc;
topology_mode = topology_get_mode(enabled);
return 0;
}
early_param("topology", topology_setup);
static int topology_ctl_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
unsigned int len;
int new_mode;
char buf[2];
if (!*lenp || *ppos) {
*lenp = 0;
return 0;
}
if (!write) {
strncpy(buf, topology_is_enabled() ? "1\n" : "0\n",
ARRAY_SIZE(buf));
len = strnlen(buf, ARRAY_SIZE(buf));
if (len > *lenp)
len = *lenp;
if (copy_to_user(buffer, buf, len))
return -EFAULT;
goto out;
}
len = *lenp;
if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
return -EFAULT;
if (buf[0] != '0' && buf[0] != '1')
return -EINVAL;
mutex_lock(&smp_cpu_state_mutex);
new_mode = topology_get_mode(buf[0] == '1');
if (topology_mode != new_mode) {
topology_mode = new_mode;
topology_schedule_update();
}
mutex_unlock(&smp_cpu_state_mutex);
topology_flush_work();
out:
*lenp = len;
*ppos += len;
return 0;
}
static struct ctl_table topology_ctl_table[] = {
{
.procname = "topology",
.mode = 0644,
.proc_handler = topology_ctl_handler,
},
{ },
};
static struct ctl_table topology_dir_table[] = {
{
.procname = "s390",
.maxlen = 0,
.mode = 0555,
.child = topology_ctl_table,
},
{ },
};
static int __init topology_init(void) static int __init topology_init(void)
{ {
if (MACHINE_HAS_TOPOLOGY) if (MACHINE_HAS_TOPOLOGY)
set_topology_timer(); set_topology_timer();
else else
topology_update_polarization_simple(); topology_update_polarization_simple();
register_sysctl_table(topology_dir_table);
return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
} }
device_initcall(topology_init); device_initcall(topology_init);
...@@ -56,13 +56,12 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, ...@@ -56,13 +56,12 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr) unsigned long end, int write, struct page **pages, int *nr)
{ {
unsigned long mask, result;
struct page *head, *page; struct page *head, *page;
unsigned long mask;
int refs; int refs;
result = write ? 0 : _SEGMENT_ENTRY_PROTECT; mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID;
mask = result | _SEGMENT_ENTRY_INVALID; if ((pmd_val(pmd) & mask) != 0)
if ((pmd_val(pmd) & mask) != result)
return 0; return 0;
VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT)); VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
......
...@@ -1644,7 +1644,9 @@ void dasd_generic_handle_state_change(struct dasd_device *device) ...@@ -1644,7 +1644,9 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
dasd_schedule_device_bh(device); dasd_schedule_device_bh(device);
if (device->block) { if (device->block) {
dasd_schedule_block_bh(device->block); dasd_schedule_block_bh(device->block);
blk_mq_run_hw_queues(device->block->request_queue, true); if (device->block->request_queue)
blk_mq_run_hw_queues(device->block->request_queue,
true);
} }
} }
EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
...@@ -3759,7 +3761,9 @@ int dasd_generic_path_operational(struct dasd_device *device) ...@@ -3759,7 +3761,9 @@ int dasd_generic_path_operational(struct dasd_device *device)
dasd_schedule_device_bh(device); dasd_schedule_device_bh(device);
if (device->block) { if (device->block) {
dasd_schedule_block_bh(device->block); dasd_schedule_block_bh(device->block);
blk_mq_run_hw_queues(device->block->request_queue, true); if (device->block->request_queue)
blk_mq_run_hw_queues(device->block->request_queue,
true);
} }
if (!device->stopped) if (!device->stopped)
...@@ -4025,7 +4029,9 @@ int dasd_generic_restore_device(struct ccw_device *cdev) ...@@ -4025,7 +4029,9 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
if (device->block) { if (device->block) {
dasd_schedule_block_bh(device->block); dasd_schedule_block_bh(device->block);
blk_mq_run_hw_queues(device->block->request_queue, true); if (device->block->request_queue)
blk_mq_run_hw_queues(device->block->request_queue,
true);
} }
clear_bit(DASD_FLAG_SUSPENDED, &device->flags); clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
......
...@@ -249,7 +249,7 @@ static void scm_request_requeue(struct scm_request *scmrq) ...@@ -249,7 +249,7 @@ static void scm_request_requeue(struct scm_request *scmrq)
static void scm_request_finish(struct scm_request *scmrq) static void scm_request_finish(struct scm_request *scmrq)
{ {
struct scm_blk_dev *bdev = scmrq->bdev; struct scm_blk_dev *bdev = scmrq->bdev;
int *error; blk_status_t *error;
int i; int i;
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
...@@ -415,7 +415,7 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error) ...@@ -415,7 +415,7 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
static void scm_blk_request_done(struct request *req) static void scm_blk_request_done(struct request *req)
{ {
int *error = blk_mq_rq_to_pdu(req); blk_status_t *error = blk_mq_rq_to_pdu(req);
blk_mq_end_request(req, *error); blk_mq_end_request(req, *error);
} }
...@@ -450,7 +450,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) ...@@ -450,7 +450,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
atomic_set(&bdev->queued_reqs, 0); atomic_set(&bdev->queued_reqs, 0);
bdev->tag_set.ops = &scm_mq_ops; bdev->tag_set.ops = &scm_mq_ops;
bdev->tag_set.cmd_size = sizeof(int); bdev->tag_set.cmd_size = sizeof(blk_status_t);
bdev->tag_set.nr_hw_queues = nr_requests; bdev->tag_set.nr_hw_queues = nr_requests;
bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
......
...@@ -1225,10 +1225,16 @@ static int device_is_disconnected(struct ccw_device *cdev) ...@@ -1225,10 +1225,16 @@ static int device_is_disconnected(struct ccw_device *cdev)
static int recovery_check(struct device *dev, void *data) static int recovery_check(struct device *dev, void *data)
{ {
struct ccw_device *cdev = to_ccwdev(dev); struct ccw_device *cdev = to_ccwdev(dev);
struct subchannel *sch;
int *redo = data; int *redo = data;
spin_lock_irq(cdev->ccwlock); spin_lock_irq(cdev->ccwlock);
switch (cdev->private->state) { switch (cdev->private->state) {
case DEV_STATE_ONLINE:
sch = to_subchannel(cdev->dev.parent);
if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
break;
/* fall through */
case DEV_STATE_DISCONNECTED: case DEV_STATE_DISCONNECTED:
CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
cdev->private->dev_id.ssid, cdev->private->dev_id.ssid,
...@@ -1260,7 +1266,7 @@ static void recovery_work_func(struct work_struct *unused) ...@@ -1260,7 +1266,7 @@ static void recovery_work_func(struct work_struct *unused)
} }
spin_unlock_irq(&recovery_lock); spin_unlock_irq(&recovery_lock);
} else } else
CIO_MSG_EVENT(4, "recovery: end\n"); CIO_MSG_EVENT(3, "recovery: end\n");
} }
static DECLARE_WORK(recovery_work, recovery_work_func); static DECLARE_WORK(recovery_work, recovery_work_func);
...@@ -1274,11 +1280,11 @@ static void recovery_func(unsigned long data) ...@@ -1274,11 +1280,11 @@ static void recovery_func(unsigned long data)
schedule_work(&recovery_work); schedule_work(&recovery_work);
} }
static void ccw_device_schedule_recovery(void) void ccw_device_schedule_recovery(void)
{ {
unsigned long flags; unsigned long flags;
CIO_MSG_EVENT(4, "recovery: schedule\n"); CIO_MSG_EVENT(3, "recovery: schedule\n");
spin_lock_irqsave(&recovery_lock, flags); spin_lock_irqsave(&recovery_lock, flags);
if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
recovery_phase = 0; recovery_phase = 0;
......
...@@ -134,6 +134,7 @@ void ccw_device_set_disconnected(struct ccw_device *cdev); ...@@ -134,6 +134,7 @@ void ccw_device_set_disconnected(struct ccw_device *cdev);
void ccw_device_set_notoper(struct ccw_device *cdev); void ccw_device_set_notoper(struct ccw_device *cdev);
void ccw_device_set_timeout(struct ccw_device *, int); void ccw_device_set_timeout(struct ccw_device *, int);
void ccw_device_schedule_recovery(void);
/* Channel measurement facility related */ /* Channel measurement facility related */
void retry_set_schib(struct ccw_device *cdev); void retry_set_schib(struct ccw_device *cdev);
......
...@@ -476,6 +476,17 @@ static void create_fake_irb(struct irb *irb, int type) ...@@ -476,6 +476,17 @@ static void create_fake_irb(struct irb *irb, int type)
} }
} }
static void ccw_device_handle_broken_paths(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm;
if (broken_paths && (cdev->private->path_broken_mask != broken_paths))
ccw_device_schedule_recovery();
cdev->private->path_broken_mask = broken_paths;
}
void ccw_device_verify_done(struct ccw_device *cdev, int err) void ccw_device_verify_done(struct ccw_device *cdev, int err)
{ {
struct subchannel *sch; struct subchannel *sch;
...@@ -508,6 +519,7 @@ void ccw_device_verify_done(struct ccw_device *cdev, int err) ...@@ -508,6 +519,7 @@ void ccw_device_verify_done(struct ccw_device *cdev, int err)
memset(&cdev->private->irb, 0, sizeof(struct irb)); memset(&cdev->private->irb, 0, sizeof(struct irb));
} }
ccw_device_report_path_events(cdev); ccw_device_report_path_events(cdev);
ccw_device_handle_broken_paths(cdev);
break; break;
case -ETIME: case -ETIME:
case -EUSERS: case -EUSERS:
......
...@@ -131,6 +131,8 @@ struct ccw_device_private { ...@@ -131,6 +131,8 @@ struct ccw_device_private {
not operable */ not operable */
u8 path_gone_mask; /* mask of paths, that became unavailable */ u8 path_gone_mask; /* mask of paths, that became unavailable */
u8 path_new_mask; /* mask of paths, that became available */ u8 path_new_mask; /* mask of paths, that became available */
u8 path_broken_mask; /* mask of paths, which were found to be
unusable */
struct { struct {
unsigned int fast:1; /* post with "channel end" */ unsigned int fast:1; /* post with "channel end" */
unsigned int repall:1; /* report every interrupt status */ unsigned int repall:1; /* report every interrupt status */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment