Commit 85736168 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'char-misc-5.15-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver fixes from Greg KH:
 "Here are some small char and misc driver fixes for 5.15-rc3.

  Nothing huge in here, just fixes for a number of small issues that
  have been reported. These include:

   - habanalabs race conditions and other bugs fixed

   - binder driver fixes

   - fpga driver fixes

   - coresight build warning fix

   - nvmem driver fix

   - comedi memory leak fix

   - bcm-vk tty race fix

   - other tiny driver fixes

  All of these have been in linux-next for a while with no reported
  issues"

* tag 'char-misc-5.15-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (21 commits)
  comedi: Fix memory leak in compat_insnlist()
  nvmem: NVMEM_NINTENDO_OTP should depend on WII
  misc: bcm-vk: fix tty registration race
  fpga: dfl: Avoid reads to AFU CSRs during enumeration
  fpga: machxo2-spi: Fix missing error code in machxo2_write_complete()
  fpga: machxo2-spi: Return an error on failure
  habanalabs: expose a single cs seq in staged submissions
  habanalabs: fix wait offset handling
  habanalabs: rate limit multi CS completion errors
  habanalabs/gaudi: fix LBW RR configuration
  habanalabs: Fix spelling mistake "FEADBACK" -> "FEEDBACK"
  habanalabs: fail collective wait when not supported
  habanalabs/gaudi: use direct MSI in single mode
  habanalabs: fix kernel OOPs related to staged cs
  habanalabs: fix potential race in interrupt wait ioctl
  mcb: fix error handling in mcb_alloc_bus()
  misc: genwqe: Fixes DMA mask setting
  coresight: syscfg: Fix compiler warning
  nvmem: core: Add stubs for nvmem_cell_read_variable_le_u32/64 if !CONFIG_NVMEM
  binder: make sure fd closes complete
  ...
parents 9cbef308 bb509a6f
...@@ -1852,6 +1852,7 @@ static void binder_deferred_fd_close(int fd) ...@@ -1852,6 +1852,7 @@ static void binder_deferred_fd_close(int fd)
} }
static void binder_transaction_buffer_release(struct binder_proc *proc, static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_buffer *buffer, struct binder_buffer *buffer,
binder_size_t failed_at, binder_size_t failed_at,
bool is_failure) bool is_failure)
...@@ -2011,8 +2012,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, ...@@ -2011,8 +2012,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
&proc->alloc, &fd, buffer, &proc->alloc, &fd, buffer,
offset, sizeof(fd)); offset, sizeof(fd));
WARN_ON(err); WARN_ON(err);
if (!err) if (!err) {
binder_deferred_fd_close(fd); binder_deferred_fd_close(fd);
/*
* Need to make sure the thread goes
* back to userspace to complete the
* deferred close
*/
if (thread)
thread->looper_need_return = true;
}
} }
} break; } break;
default: default:
...@@ -3038,9 +3047,8 @@ static void binder_transaction(struct binder_proc *proc, ...@@ -3038,9 +3047,8 @@ static void binder_transaction(struct binder_proc *proc,
if (reply) { if (reply) {
binder_enqueue_thread_work(thread, tcomplete); binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc); binder_inner_proc_lock(target_proc);
if (target_thread->is_dead || target_proc->is_frozen) { if (target_thread->is_dead) {
return_error = target_thread->is_dead ? return_error = BR_DEAD_REPLY;
BR_DEAD_REPLY : BR_FROZEN_REPLY;
binder_inner_proc_unlock(target_proc); binder_inner_proc_unlock(target_proc);
goto err_dead_proc_or_thread; goto err_dead_proc_or_thread;
} }
...@@ -3105,7 +3113,7 @@ static void binder_transaction(struct binder_proc *proc, ...@@ -3105,7 +3113,7 @@ static void binder_transaction(struct binder_proc *proc,
err_copy_data_failed: err_copy_data_failed:
binder_free_txn_fixups(t); binder_free_txn_fixups(t);
trace_binder_transaction_failed_buffer_release(t->buffer); trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, binder_transaction_buffer_release(target_proc, NULL, t->buffer,
buffer_offset, true); buffer_offset, true);
if (target_node) if (target_node)
binder_dec_node_tmpref(target_node); binder_dec_node_tmpref(target_node);
...@@ -3184,7 +3192,9 @@ static void binder_transaction(struct binder_proc *proc, ...@@ -3184,7 +3192,9 @@ static void binder_transaction(struct binder_proc *proc,
* Cleanup buffer and free it. * Cleanup buffer and free it.
*/ */
static void static void
binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) binder_free_buf(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_buffer *buffer)
{ {
binder_inner_proc_lock(proc); binder_inner_proc_lock(proc);
if (buffer->transaction) { if (buffer->transaction) {
...@@ -3212,7 +3222,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) ...@@ -3212,7 +3222,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
binder_node_inner_unlock(buf_node); binder_node_inner_unlock(buf_node);
} }
trace_binder_transaction_buffer_release(buffer); trace_binder_transaction_buffer_release(buffer);
binder_transaction_buffer_release(proc, buffer, 0, false); binder_transaction_buffer_release(proc, thread, buffer, 0, false);
binder_alloc_free_buf(&proc->alloc, buffer); binder_alloc_free_buf(&proc->alloc, buffer);
} }
...@@ -3414,7 +3424,7 @@ static int binder_thread_write(struct binder_proc *proc, ...@@ -3414,7 +3424,7 @@ static int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid, (u64)data_ptr, proc->pid, thread->pid, (u64)data_ptr,
buffer->debug_id, buffer->debug_id,
buffer->transaction ? "active" : "finished"); buffer->transaction ? "active" : "finished");
binder_free_buf(proc, buffer); binder_free_buf(proc, thread, buffer);
break; break;
} }
...@@ -4107,7 +4117,7 @@ static int binder_thread_read(struct binder_proc *proc, ...@@ -4107,7 +4117,7 @@ static int binder_thread_read(struct binder_proc *proc,
buffer->transaction = NULL; buffer->transaction = NULL;
binder_cleanup_transaction(t, "fd fixups failed", binder_cleanup_transaction(t, "fd fixups failed",
BR_FAILED_REPLY); BR_FAILED_REPLY);
binder_free_buf(proc, buffer); binder_free_buf(proc, thread, buffer);
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
proc->pid, thread->pid, proc->pid, thread->pid,
...@@ -4648,6 +4658,22 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, ...@@ -4648,6 +4658,22 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
return 0; return 0;
} }
static bool binder_txns_pending_ilocked(struct binder_proc *proc)
{
struct rb_node *n;
struct binder_thread *thread;
if (proc->outstanding_txns > 0)
return true;
for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
thread = rb_entry(n, struct binder_thread, rb_node);
if (thread->transaction_stack)
return true;
}
return false;
}
static int binder_ioctl_freeze(struct binder_freeze_info *info, static int binder_ioctl_freeze(struct binder_freeze_info *info,
struct binder_proc *target_proc) struct binder_proc *target_proc)
{ {
...@@ -4679,8 +4705,13 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info, ...@@ -4679,8 +4705,13 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
(!target_proc->outstanding_txns), (!target_proc->outstanding_txns),
msecs_to_jiffies(info->timeout_ms)); msecs_to_jiffies(info->timeout_ms));
if (!ret && target_proc->outstanding_txns) /* Check pending transactions that wait for reply */
if (ret >= 0) {
binder_inner_proc_lock(target_proc);
if (binder_txns_pending_ilocked(target_proc))
ret = -EAGAIN; ret = -EAGAIN;
binder_inner_proc_unlock(target_proc);
}
if (ret < 0) { if (ret < 0) {
binder_inner_proc_lock(target_proc); binder_inner_proc_lock(target_proc);
...@@ -4696,6 +4727,7 @@ static int binder_ioctl_get_freezer_info( ...@@ -4696,6 +4727,7 @@ static int binder_ioctl_get_freezer_info(
{ {
struct binder_proc *target_proc; struct binder_proc *target_proc;
bool found = false; bool found = false;
__u32 txns_pending;
info->sync_recv = 0; info->sync_recv = 0;
info->async_recv = 0; info->async_recv = 0;
...@@ -4705,7 +4737,9 @@ static int binder_ioctl_get_freezer_info( ...@@ -4705,7 +4737,9 @@ static int binder_ioctl_get_freezer_info(
if (target_proc->pid == info->pid) { if (target_proc->pid == info->pid) {
found = true; found = true;
binder_inner_proc_lock(target_proc); binder_inner_proc_lock(target_proc);
info->sync_recv |= target_proc->sync_recv; txns_pending = binder_txns_pending_ilocked(target_proc);
info->sync_recv |= target_proc->sync_recv |
(txns_pending << 1);
info->async_recv |= target_proc->async_recv; info->async_recv |= target_proc->async_recv;
binder_inner_proc_unlock(target_proc); binder_inner_proc_unlock(target_proc);
} }
......
...@@ -378,6 +378,8 @@ struct binder_ref { ...@@ -378,6 +378,8 @@ struct binder_ref {
* binder transactions * binder transactions
* (protected by @inner_lock) * (protected by @inner_lock)
* @sync_recv: process received sync transactions since last frozen * @sync_recv: process received sync transactions since last frozen
* bit 0: received sync transaction after being frozen
* bit 1: new pending sync transaction during freezing
* (protected by @inner_lock) * (protected by @inner_lock)
* @async_recv: process received async transactions since last frozen * @async_recv: process received async transactions since last frozen
* (protected by @inner_lock) * (protected by @inner_lock)
......
...@@ -3090,6 +3090,7 @@ static int compat_insnlist(struct file *file, unsigned long arg) ...@@ -3090,6 +3090,7 @@ static int compat_insnlist(struct file *file, unsigned long arg)
mutex_lock(&dev->mutex); mutex_lock(&dev->mutex);
rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file); rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file);
mutex_unlock(&dev->mutex); mutex_unlock(&dev->mutex);
kfree(insns);
return rc; return rc;
} }
......
...@@ -1019,16 +1019,18 @@ create_feature_instance(struct build_feature_devs_info *binfo, ...@@ -1019,16 +1019,18 @@ create_feature_instance(struct build_feature_devs_info *binfo,
{ {
unsigned int irq_base, nr_irqs; unsigned int irq_base, nr_irqs;
struct dfl_feature_info *finfo; struct dfl_feature_info *finfo;
u8 revision = 0;
int ret; int ret;
u8 revision;
u64 v; u64 v;
if (fid != FEATURE_ID_AFU) {
v = readq(binfo->ioaddr + ofst); v = readq(binfo->ioaddr + ofst);
revision = FIELD_GET(DFH_REVISION, v); revision = FIELD_GET(DFH_REVISION, v);
/* read feature size and id if inputs are invalid */ /* read feature size and id if inputs are invalid */
size = size ? size : feature_size(v); size = size ? size : feature_size(v);
fid = fid ? fid : feature_id(v); fid = fid ? fid : feature_id(v);
}
if (binfo->len - ofst < size) if (binfo->len - ofst < size)
return -EINVAL; return -EINVAL;
......
...@@ -225,8 +225,10 @@ static int machxo2_write_init(struct fpga_manager *mgr, ...@@ -225,8 +225,10 @@ static int machxo2_write_init(struct fpga_manager *mgr,
goto fail; goto fail;
get_status(spi, &status); get_status(spi, &status);
if (test_bit(FAIL, &status)) if (test_bit(FAIL, &status)) {
ret = -EINVAL;
goto fail; goto fail;
}
dump_status_reg(&status); dump_status_reg(&status);
spi_message_init(&msg); spi_message_init(&msg);
...@@ -313,6 +315,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr, ...@@ -313,6 +315,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
dump_status_reg(&status); dump_status_reg(&status);
if (!test_bit(DONE, &status)) { if (!test_bit(DONE, &status)) {
machxo2_cleanup(mgr); machxo2_cleanup(mgr);
ret = -EINVAL;
goto fail; goto fail;
} }
...@@ -335,6 +338,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr, ...@@ -335,6 +338,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
break; break;
if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) { if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) {
machxo2_cleanup(mgr); machxo2_cleanup(mgr);
ret = -EINVAL;
goto fail; goto fail;
} }
} while (1); } while (1);
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
*/ */
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/slab.h>
#include "coresight-config.h" #include "coresight-config.h"
#include "coresight-etm-perf.h" #include "coresight-etm-perf.h"
......
...@@ -275,8 +275,8 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier) ...@@ -275,8 +275,8 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL); bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL);
if (bus_nr < 0) { if (bus_nr < 0) {
rc = bus_nr; kfree(bus);
goto err_free; return ERR_PTR(bus_nr);
} }
bus->bus_nr = bus_nr; bus->bus_nr = bus_nr;
...@@ -291,12 +291,12 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier) ...@@ -291,12 +291,12 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
dev_set_name(&bus->dev, "mcb:%d", bus_nr); dev_set_name(&bus->dev, "mcb:%d", bus_nr);
rc = device_add(&bus->dev); rc = device_add(&bus->dev);
if (rc) if (rc)
goto err_free; goto err_put;
return bus; return bus;
err_free:
put_device(carrier); err_put:
kfree(bus); put_device(&bus->dev);
return ERR_PTR(rc); return ERR_PTR(rc);
} }
EXPORT_SYMBOL_NS_GPL(mcb_alloc_bus, MCB); EXPORT_SYMBOL_NS_GPL(mcb_alloc_bus, MCB);
......
...@@ -267,13 +267,13 @@ int bcm_vk_tty_init(struct bcm_vk *vk, char *name) ...@@ -267,13 +267,13 @@ int bcm_vk_tty_init(struct bcm_vk *vk, char *name)
struct device *tty_dev; struct device *tty_dev;
tty_port_init(&vk->tty[i].port); tty_port_init(&vk->tty[i].port);
tty_dev = tty_port_register_device(&vk->tty[i].port, tty_drv, tty_dev = tty_port_register_device_attr(&vk->tty[i].port,
i, dev); tty_drv, i, dev, vk,
NULL);
if (IS_ERR(tty_dev)) { if (IS_ERR(tty_dev)) {
err = PTR_ERR(tty_dev); err = PTR_ERR(tty_dev);
goto unwind; goto unwind;
} }
dev_set_drvdata(tty_dev, vk);
vk->tty[i].is_opened = false; vk->tty[i].is_opened = false;
} }
......
...@@ -1090,7 +1090,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd) ...@@ -1090,7 +1090,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
/* check for 64-bit DMA address supported (DAC) */ /* check for 64-bit DMA address supported (DAC) */
/* check for 32-bit DMA address supported (SAC) */ /* check for 32-bit DMA address supported (SAC) */
if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) || if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) &&
dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) { dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
dev_err(&pci_dev->dev, dev_err(&pci_dev->dev,
"err: neither DMA32 nor DMA64 supported\n"); "err: neither DMA32 nor DMA64 supported\n");
......
...@@ -405,7 +405,7 @@ static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs) ...@@ -405,7 +405,7 @@ static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs) static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
{ {
bool next_entry_found = false; bool next_entry_found = false;
struct hl_cs *next; struct hl_cs *next, *first_cs;
if (!cs_needs_timeout(cs)) if (!cs_needs_timeout(cs))
return; return;
...@@ -415,9 +415,16 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs) ...@@ -415,9 +415,16 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
/* We need to handle tdr only once for the complete staged submission. /* We need to handle tdr only once for the complete staged submission.
* Hence, we choose the CS that reaches this function first which is * Hence, we choose the CS that reaches this function first which is
* the CS marked as 'staged_last'. * the CS marked as 'staged_last'.
* In case single staged cs was submitted which has both first and last
* indications, then "cs_find_first" below will return NULL, since we
* removed the cs node from the list before getting here,
* in such cases just continue with the cs to cancel it's TDR work.
*/ */
if (cs->staged_cs && cs->staged_last) if (cs->staged_cs && cs->staged_last) {
cs = hl_staged_cs_find_first(hdev, cs->staged_sequence); first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
if (first_cs)
cs = first_cs;
}
spin_unlock(&hdev->cs_mirror_lock); spin_unlock(&hdev->cs_mirror_lock);
...@@ -1288,6 +1295,12 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, ...@@ -1288,6 +1295,12 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
if (rc) if (rc)
goto free_cs_object; goto free_cs_object;
/* If this is a staged submission we must return the staged sequence
* rather than the internal CS sequence
*/
if (cs->staged_cs)
*cs_seq = cs->staged_sequence;
/* Validate ALL the CS chunks before submitting the CS */ /* Validate ALL the CS chunks before submitting the CS */
for (i = 0 ; i < num_chunks ; i++) { for (i = 0 ; i < num_chunks ; i++) {
struct hl_cs_chunk *chunk = &cs_chunk_array[i]; struct hl_cs_chunk *chunk = &cs_chunk_array[i];
...@@ -1988,6 +2001,15 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, ...@@ -1988,6 +2001,15 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
goto free_cs_chunk_array; goto free_cs_chunk_array;
} }
if (!hdev->nic_ports_mask) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Collective operations not supported when NIC ports are disabled");
rc = -EINVAL;
goto free_cs_chunk_array;
}
collective_engine_id = chunk->collective_engine_id; collective_engine_id = chunk->collective_engine_id;
} }
...@@ -2026,9 +2048,10 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, ...@@ -2026,9 +2048,10 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
spin_unlock(&ctx->sig_mgr.lock); spin_unlock(&ctx->sig_mgr.lock);
if (!handle_found) { if (!handle_found) {
dev_err(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n", /* treat as signal CS already finished */
dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
signal_seq); signal_seq);
rc = -EINVAL; rc = 0;
goto free_cs_chunk_array; goto free_cs_chunk_array;
} }
...@@ -2613,7 +2636,8 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) ...@@ -2613,7 +2636,8 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
* completed after the poll function. * completed after the poll function.
*/ */
if (!mcs_data.completion_bitmap) { if (!mcs_data.completion_bitmap) {
dev_err(hdev->dev, "Multi-CS got completion on wait but no CS completed\n"); dev_warn_ratelimited(hdev->dev,
"Multi-CS got completion on wait but no CS completed\n");
rc = -EFAULT; rc = -EFAULT;
} }
} }
...@@ -2740,10 +2764,20 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, ...@@ -2740,10 +2764,20 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
else else
interrupt = &hdev->user_interrupt[interrupt_offset]; interrupt = &hdev->user_interrupt[interrupt_offset];
/* Add pending user interrupt to relevant list for the interrupt
* handler to monitor
*/
spin_lock_irqsave(&interrupt->wait_list_lock, flags);
list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
/* We check for completion value as interrupt could have been received
* before we added the node to the wait list
*/
if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) { if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
dev_err(hdev->dev, "Failed to copy completion value from user\n"); dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT; rc = -EFAULT;
goto free_fence; goto remove_pending_user_interrupt;
} }
if (completion_value >= target_value) if (completion_value >= target_value)
...@@ -2752,14 +2786,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, ...@@ -2752,14 +2786,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
*status = CS_WAIT_STATUS_BUSY; *status = CS_WAIT_STATUS_BUSY;
if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED)) if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
goto free_fence; goto remove_pending_user_interrupt;
/* Add pending user interrupt to relevant list for the interrupt
* handler to monitor
*/
spin_lock_irqsave(&interrupt->wait_list_lock, flags);
list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
wait_again: wait_again:
/* Wait for interrupt handler to signal completion */ /* Wait for interrupt handler to signal completion */
...@@ -2770,6 +2797,15 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, ...@@ -2770,6 +2797,15 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
* If comparison fails, keep waiting until timeout expires * If comparison fails, keep waiting until timeout expires
*/ */
if (completion_rc > 0) { if (completion_rc > 0) {
spin_lock_irqsave(&interrupt->wait_list_lock, flags);
/* reinit_completion must be called before we check for user
* completion value, otherwise, if interrupt is received after
* the comparison and before the next wait_for_completion,
* we will reach timeout and fail
*/
reinit_completion(&pend->fence.completion);
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) { if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
dev_err(hdev->dev, "Failed to copy completion value from user\n"); dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT; rc = -EFAULT;
...@@ -2780,11 +2816,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, ...@@ -2780,11 +2816,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
if (completion_value >= target_value) { if (completion_value >= target_value) {
*status = CS_WAIT_STATUS_COMPLETED; *status = CS_WAIT_STATUS_COMPLETED;
} else { } else {
spin_lock_irqsave(&interrupt->wait_list_lock, flags);
reinit_completion(&pend->fence.completion);
timeout = completion_rc; timeout = completion_rc;
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
goto wait_again; goto wait_again;
} }
} else if (completion_rc == -ERESTARTSYS) { } else if (completion_rc == -ERESTARTSYS) {
...@@ -2802,7 +2834,6 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, ...@@ -2802,7 +2834,6 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
list_del(&pend->wait_list_node); list_del(&pend->wait_list_node);
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags); spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
free_fence:
kfree(pend); kfree(pend);
hl_ctx_put(ctx); hl_ctx_put(ctx);
......
...@@ -437,6 +437,7 @@ void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev, ...@@ -437,6 +437,7 @@ void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
struct hl_cs_compl *cs_cmpl) struct hl_cs_compl *cs_cmpl)
{ {
struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl; struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl;
u32 offset = 0;
cs_cmpl->hw_sob = handle->hw_sob; cs_cmpl->hw_sob = handle->hw_sob;
...@@ -446,9 +447,13 @@ void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev, ...@@ -446,9 +447,13 @@ void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
* set offset 1 for example he mean to wait only for the first * set offset 1 for example he mean to wait only for the first
* signal only, which will be pre_sob_val, and if he set offset 2 * signal only, which will be pre_sob_val, and if he set offset 2
* then the value required is (pre_sob_val + 1) and so on... * then the value required is (pre_sob_val + 1) and so on...
* if user set wait offset to 0, then treat it as legacy wait cs,
* wait for the next signal.
*/ */
cs_cmpl->sob_val = handle->pre_sob_val + if (job->encaps_sig_wait_offset)
(job->encaps_sig_wait_offset - 1); offset = job->encaps_sig_wait_offset - 1;
cs_cmpl->sob_val = handle->pre_sob_val + offset;
} }
static int init_wait_cs(struct hl_device *hdev, struct hl_cs *cs, static int init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
......
...@@ -395,7 +395,7 @@ static struct hl_hw_obj_name_entry gaudi_so_id_to_str[] = { ...@@ -395,7 +395,7 @@ static struct hl_hw_obj_name_entry gaudi_so_id_to_str[] = {
static struct hl_hw_obj_name_entry gaudi_monitor_id_to_str[] = { static struct hl_hw_obj_name_entry gaudi_monitor_id_to_str[] = {
{ .id = 200, .name = "MON_OBJ_DMA_DOWN_FEEDBACK_RESET" }, { .id = 200, .name = "MON_OBJ_DMA_DOWN_FEEDBACK_RESET" },
{ .id = 201, .name = "MON_OBJ_DMA_UP_FEADBACK_RESET" }, { .id = 201, .name = "MON_OBJ_DMA_UP_FEEDBACK_RESET" },
{ .id = 203, .name = "MON_OBJ_DRAM_TO_SRAM_QUEUE_FENCE" }, { .id = 203, .name = "MON_OBJ_DRAM_TO_SRAM_QUEUE_FENCE" },
{ .id = 204, .name = "MON_OBJ_TPC_0_CLK_GATE" }, { .id = 204, .name = "MON_OBJ_TPC_0_CLK_GATE" },
{ .id = 205, .name = "MON_OBJ_TPC_1_CLK_GATE" }, { .id = 205, .name = "MON_OBJ_TPC_1_CLK_GATE" },
...@@ -5802,6 +5802,7 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev, ...@@ -5802,6 +5802,7 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
{ {
struct gaudi_device *gaudi = hdev->asic_specific; struct gaudi_device *gaudi = hdev->asic_specific;
struct packet_msg_prot *cq_pkt; struct packet_msg_prot *cq_pkt;
u64 msi_addr;
u32 tmp; u32 tmp;
cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2); cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
...@@ -5823,10 +5824,12 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev, ...@@ -5823,10 +5824,12 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
cq_pkt->ctl = cpu_to_le32(tmp); cq_pkt->ctl = cpu_to_le32(tmp);
cq_pkt->value = cpu_to_le32(1); cq_pkt->value = cpu_to_le32(1);
if (!gaudi->multi_msi_mode) if (gaudi->multi_msi_mode)
msi_vec = 0; msi_addr = mmPCIE_MSI_INTR_0 + msi_vec * 4;
else
msi_addr = mmPCIE_CORE_MSI_REQ;
cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_MSI_INTR_0 + msi_vec * 4); cq_pkt->addr = cpu_to_le64(CFG_BASE + msi_addr);
} }
static void gaudi_update_eq_ci(struct hl_device *hdev, u32 val) static void gaudi_update_eq_ci(struct hl_device *hdev, u32 val)
......
...@@ -308,6 +308,8 @@ ...@@ -308,6 +308,8 @@
#define mmPCIE_AUX_FLR_CTRL 0xC07394 #define mmPCIE_AUX_FLR_CTRL 0xC07394
#define mmPCIE_AUX_DBI 0xC07490 #define mmPCIE_AUX_DBI 0xC07490
#define mmPCIE_CORE_MSI_REQ 0xC04100
#define mmPSOC_PCI_PLL_NR 0xC72100 #define mmPSOC_PCI_PLL_NR 0xC72100
#define mmSRAM_W_PLL_NR 0x4C8100 #define mmSRAM_W_PLL_NR 0x4C8100
#define mmPSOC_HBM_PLL_NR 0xC74100 #define mmPSOC_HBM_PLL_NR 0xC74100
......
...@@ -109,6 +109,7 @@ config MTK_EFUSE ...@@ -109,6 +109,7 @@ config MTK_EFUSE
config NVMEM_NINTENDO_OTP config NVMEM_NINTENDO_OTP
tristate "Nintendo Wii and Wii U OTP Support" tristate "Nintendo Wii and Wii U OTP Support"
depends on WII || COMPILE_TEST
help help
This is a driver exposing the OTP of a Nintendo Wii or Wii U console. This is a driver exposing the OTP of a Nintendo Wii or Wii U console.
......
...@@ -150,6 +150,20 @@ static inline int nvmem_cell_read_u64(struct device *dev, ...@@ -150,6 +150,20 @@ static inline int nvmem_cell_read_u64(struct device *dev,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline int nvmem_cell_read_variable_le_u32(struct device *dev,
const char *cell_id,
u32 *val)
{
return -EOPNOTSUPP;
}
static inline int nvmem_cell_read_variable_le_u64(struct device *dev,
const char *cell_id,
u64 *val)
{
return -EOPNOTSUPP;
}
static inline struct nvmem_device *nvmem_device_get(struct device *dev, static inline struct nvmem_device *nvmem_device_get(struct device *dev,
const char *name) const char *name)
{ {
......
...@@ -225,7 +225,14 @@ struct binder_freeze_info { ...@@ -225,7 +225,14 @@ struct binder_freeze_info {
struct binder_frozen_status_info { struct binder_frozen_status_info {
__u32 pid; __u32 pid;
/* process received sync transactions since last frozen
* bit 0: received sync transaction after being frozen
* bit 1: new pending sync transaction during freezing
*/
__u32 sync_recv; __u32 sync_recv;
/* process received async transactions since last frozen */
__u32 async_recv; __u32 async_recv;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment