mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
This is the 4.14.118 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlzVnmsACgkQONu9yGCS aT6XoBAAzvBXcFwl1t1ELq5YJ9qsChTdnjF15ohgVFaWdM4SDKIM/Unv07P4x8kN BLqK+5u9mjj8jc51WT4GMGFNa2u4bwGtk4yTSEiRWKvZMZlihKBu3CSKD+EL38RF h0J8Z8tPHrfuaByggSI+xd0Pwkf0Ofc2JMoRb8u8MlsQzHMMFbY4brqKxgfl8Zi7 izd25V38rioKs2cSWqQVDjelPRQtOWkKclvwT2WHRQXqC3L2mZ/FsFiHNj/CoptD bQLnycDD9aPx/9kgcVhodPA4xBCuV93EUi9cFb/9SFWPB/JaHCkO1GV0v7DL9YrD RZ7rATAOgXYpTGrM3YK388a7hF4u98G+cGW6v4lCe5uOKFQcUMZVCys3b4rmqgQS c920wwmyUJL+i+48Z+LuK4yLHzfL/YH+3AVYUQ2CSjXFibXi0agKOFfvIYnXZCNI 9ut9N/cNkALib0IwWcadx0uCaq8u1meVoTJ2SQtcIhWU4MKNtqPLWA9vMH98U5M2 lP0lVaBePXU6XqrlGC/RnR1fH2iqmqosmmwi+XAMHqqxMfHIg5blxoD2nTEUeGcr ow54Tp6KSo9TsoHOyzDcB7d7iMaTEcLIKFGbpRkcMj/n3itdpdq7XxYQpGzDpqjq /V/8cfSqIwfPQ1rdByXITxfl8pYy0+ybYFVsE032ZAyzUkA+vsw= =OrBm -----END PGP SIGNATURE----- Merge 4.14.118 into android-4.14 Changes in 4.14.118 scsi: libsas: fix a race condition when smp task timeout Drivers: hv: vmbus: Remove the undesired put_cpu_ptr() in hv_synic_cleanup() ubsan: Fix nasty -Wbuiltin-declaration-mismatch GCC-9 warnings staging: greybus: power_supply: fix prop-descriptor request size ASoC: hdmi-codec: fix S/PDIF DAI ASoC:soc-pcm:fix a codec fixup issue in TDM case ASoC: nau8824: fix the issue of the widget with prefix name ASoC: nau8810: fix the issue of widget with prefixed name ASoC: samsung: odroid: Fix clock configuration for 44100 sample rate ASoC: wm_adsp: Add locking to wm_adsp2_bus_error ASoC: cs4270: Set auto-increment bit for register writes IB/hfi1: Eliminate opcode tests on mr deref MIPS: KGDB: fix kgdb support for SMP platforms. ASoC: tlv320aic32x4: Fix Common Pins drm/mediatek: Fix an error code in mtk_hdmi_dt_parse_pdata() perf/x86/intel: Fix handling of wakeup_events for multi-entry PEBS perf/x86/intel: Initialize TFA MSR linux/kernel.h: Use parentheses around argument in u64_to_user_ptr() ASoC: rockchip: pdm: fix regmap_ops hang issue slab: fix a crash by reading /proc/slab_allocators virtio_pci: fix a NULL pointer reference in vp_del_vqs RDMA/vmw_pvrdma: Fix memory leak on pvrdma_pci_remove scsi: csiostor: fix missing data copy in csio_scsi_err_handler() drm/mediatek: fix possible object reference leak ASoC: Intel: kbl: fix wrong number of channels virtio-blk: limit number of hw queues by nr_cpu_ids platform/x86: pmc_atom: Drop __initconst on dmi table iommu/amd: Set exclusion range correctly genirq: Prevent use-after-free and work list corruption usb: dwc3: Fix default lpm_nyet_threshold value USB: serial: f81232: fix interrupt worker not stop USB: cdc-acm: fix unthrottle races usb-storage: Set virt_boundary_mask to avoid SG overflows intel_th: pci: Add Comet Lake support scsi: qla2xxx: Fix incorrect region-size setting in optrom SYSFS routines Bluetooth: hidp: fix buffer overflow Bluetooth: Align minimum encryption key size for LE and BR/EDR connections UAS: fix alignment of scatter/gather segments ASoC: Intel: avoid Oops if DMA setup fails locking/futex: Allow low-level atomic operations to return -EAGAIN arm64: futex: Bound number of LDXR/STXR loops in FUTEX_WAKE_OP Linux 4.14.118 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
db393694d6
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 117
|
||||
SUBLEVEL = 118
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
|
@ -23,26 +23,34 @@
|
||||
|
||||
#include <asm/errno.h>
|
||||
|
||||
#define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
|
||||
|
||||
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
|
||||
do { \
|
||||
unsigned int loops = FUTEX_MAX_LOOPS; \
|
||||
\
|
||||
uaccess_enable(); \
|
||||
asm volatile( \
|
||||
" prfm pstl1strm, %2\n" \
|
||||
"1: ldxr %w1, %2\n" \
|
||||
insn "\n" \
|
||||
"2: stlxr %w0, %w3, %2\n" \
|
||||
" cbnz %w0, 1b\n" \
|
||||
" dmb ish\n" \
|
||||
" cbz %w0, 3f\n" \
|
||||
" sub %w4, %w4, %w0\n" \
|
||||
" cbnz %w4, 1b\n" \
|
||||
" mov %w0, %w7\n" \
|
||||
"3:\n" \
|
||||
" dmb ish\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
"4: mov %w0, %w5\n" \
|
||||
"4: mov %w0, %w6\n" \
|
||||
" b 3b\n" \
|
||||
" .popsection\n" \
|
||||
_ASM_EXTABLE(1b, 4b) \
|
||||
_ASM_EXTABLE(2b, 4b) \
|
||||
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
|
||||
: "r" (oparg), "Ir" (-EFAULT) \
|
||||
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \
|
||||
"+r" (loops) \
|
||||
: "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \
|
||||
: "memory"); \
|
||||
uaccess_disable(); \
|
||||
} while (0)
|
||||
@ -57,23 +65,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
|
||||
|
||||
switch (op) {
|
||||
case FUTEX_OP_SET:
|
||||
__futex_atomic_op("mov %w3, %w4",
|
||||
__futex_atomic_op("mov %w3, %w5",
|
||||
ret, oldval, uaddr, tmp, oparg);
|
||||
break;
|
||||
case FUTEX_OP_ADD:
|
||||
__futex_atomic_op("add %w3, %w1, %w4",
|
||||
__futex_atomic_op("add %w3, %w1, %w5",
|
||||
ret, oldval, uaddr, tmp, oparg);
|
||||
break;
|
||||
case FUTEX_OP_OR:
|
||||
__futex_atomic_op("orr %w3, %w1, %w4",
|
||||
__futex_atomic_op("orr %w3, %w1, %w5",
|
||||
ret, oldval, uaddr, tmp, oparg);
|
||||
break;
|
||||
case FUTEX_OP_ANDN:
|
||||
__futex_atomic_op("and %w3, %w1, %w4",
|
||||
__futex_atomic_op("and %w3, %w1, %w5",
|
||||
ret, oldval, uaddr, tmp, ~oparg);
|
||||
break;
|
||||
case FUTEX_OP_XOR:
|
||||
__futex_atomic_op("eor %w3, %w1, %w4",
|
||||
__futex_atomic_op("eor %w3, %w1, %w5",
|
||||
ret, oldval, uaddr, tmp, oparg);
|
||||
break;
|
||||
default:
|
||||
@ -93,6 +101,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int loops = FUTEX_MAX_LOOPS;
|
||||
u32 val, tmp;
|
||||
u32 __user *uaddr;
|
||||
|
||||
@ -104,20 +113,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
|
||||
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
|
||||
" prfm pstl1strm, %2\n"
|
||||
"1: ldxr %w1, %2\n"
|
||||
" sub %w3, %w1, %w4\n"
|
||||
" cbnz %w3, 3f\n"
|
||||
"2: stlxr %w3, %w5, %2\n"
|
||||
" cbnz %w3, 1b\n"
|
||||
" dmb ish\n"
|
||||
" sub %w3, %w1, %w5\n"
|
||||
" cbnz %w3, 4f\n"
|
||||
"2: stlxr %w3, %w6, %2\n"
|
||||
" cbz %w3, 3f\n"
|
||||
" sub %w4, %w4, %w3\n"
|
||||
" cbnz %w4, 1b\n"
|
||||
" mov %w0, %w8\n"
|
||||
"3:\n"
|
||||
" dmb ish\n"
|
||||
"4:\n"
|
||||
" .pushsection .fixup,\"ax\"\n"
|
||||
"4: mov %w0, %w6\n"
|
||||
" b 3b\n"
|
||||
"5: mov %w0, %w7\n"
|
||||
" b 4b\n"
|
||||
" .popsection\n"
|
||||
_ASM_EXTABLE(1b, 4b)
|
||||
_ASM_EXTABLE(2b, 4b)
|
||||
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
|
||||
: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
|
||||
_ASM_EXTABLE(1b, 5b)
|
||||
_ASM_EXTABLE(2b, 5b)
|
||||
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
|
||||
: "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN)
|
||||
: "memory");
|
||||
uaccess_disable();
|
||||
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sigcontext.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/irq_regs.h>
|
||||
|
||||
static struct hard_trap_info {
|
||||
unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
|
||||
@ -214,7 +215,7 @@ static void kgdb_call_nmi_hook(void *ignored)
|
||||
old_fs = get_fs();
|
||||
set_fs(get_ds());
|
||||
|
||||
kgdb_nmicallback(raw_smp_processor_id(), NULL);
|
||||
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
|
||||
|
||||
set_fs(old_fs);
|
||||
}
|
||||
|
@ -3051,7 +3051,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
||||
return ret;
|
||||
|
||||
if (event->attr.precise_ip) {
|
||||
if (!event->attr.freq) {
|
||||
if (!(event->attr.freq || event->attr.wakeup_events)) {
|
||||
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
|
||||
if (!(event->attr.sample_type &
|
||||
~intel_pmu_free_running_flags(event)))
|
||||
@ -3427,6 +3427,12 @@ static void intel_pmu_cpu_starting(int cpu)
|
||||
|
||||
cpuc->lbr_sel = NULL;
|
||||
|
||||
if (x86_pmu.flags & PMU_FL_TFA) {
|
||||
WARN_ON_ONCE(cpuc->tfa_shadow);
|
||||
cpuc->tfa_shadow = ~0ULL;
|
||||
intel_set_tfa(cpuc, false);
|
||||
}
|
||||
|
||||
if (x86_pmu.version > 1)
|
||||
flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
|
||||
|
||||
|
@ -437,6 +437,8 @@ static int init_vq(struct virtio_blk *vblk)
|
||||
if (err)
|
||||
num_vqs = 1;
|
||||
|
||||
num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
|
||||
|
||||
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
|
||||
if (!vblk->vqs)
|
||||
return -ENOMEM;
|
||||
|
@ -1473,7 +1473,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
|
||||
if (IS_ERR(regmap))
|
||||
ret = PTR_ERR(regmap);
|
||||
if (ret) {
|
||||
ret = PTR_ERR(regmap);
|
||||
dev_err(dev,
|
||||
"Failed to get system configuration registers: %d\n",
|
||||
ret);
|
||||
@ -1509,6 +1508,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
|
||||
of_node_put(remote);
|
||||
|
||||
hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
|
||||
of_node_put(i2c_np);
|
||||
if (!hdmi->ddc_adpt) {
|
||||
dev_err(dev, "Failed to get ddc i2c adapter by node\n");
|
||||
return -EINVAL;
|
||||
|
@ -356,7 +356,6 @@ int hv_synic_cleanup(unsigned int cpu)
|
||||
|
||||
clockevents_unbind_device(hv_cpu->clk_evt, cpu);
|
||||
hv_ce_shutdown(hv_cpu->clk_evt);
|
||||
put_cpu_ptr(hv_cpu);
|
||||
}
|
||||
|
||||
hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
|
||||
|
@ -173,6 +173,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
|
||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||
},
|
||||
{
|
||||
/* Comet Lake */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x02a6),
|
||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||
},
|
||||
{ 0 },
|
||||
};
|
||||
|
||||
|
@ -2309,7 +2309,7 @@ send_last:
|
||||
update_ack_queue(qp, next);
|
||||
}
|
||||
e = &qp->s_ack_queue[qp->r_head_ack_queue];
|
||||
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
|
||||
if (e->rdma_sge.mr) {
|
||||
rvt_put_mr(e->rdma_sge.mr);
|
||||
e->rdma_sge.mr = NULL;
|
||||
}
|
||||
@ -2383,7 +2383,7 @@ send_last:
|
||||
update_ack_queue(qp, next);
|
||||
}
|
||||
e = &qp->s_ack_queue[qp->r_head_ack_queue];
|
||||
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
|
||||
if (e->rdma_sge.mr) {
|
||||
rvt_put_mr(e->rdma_sge.mr);
|
||||
e->rdma_sge.mr = NULL;
|
||||
}
|
||||
|
@ -1055,6 +1055,8 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
|
||||
pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
|
||||
pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
|
||||
pvrdma_free_slots(dev);
|
||||
dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
|
||||
dev->dsrbase);
|
||||
|
||||
iounmap(dev->regs);
|
||||
kfree(dev->sgid_tbl);
|
||||
|
@ -355,7 +355,7 @@ static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
|
||||
static void iommu_set_exclusion_range(struct amd_iommu *iommu)
|
||||
{
|
||||
u64 start = iommu->exclusion_start & PAGE_MASK;
|
||||
u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
|
||||
u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
|
||||
u64 entry;
|
||||
|
||||
if (!iommu->exclusion_start)
|
||||
|
@ -426,7 +426,7 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc)
|
||||
* Some systems need one or more of their pmc_plt_clks to be
|
||||
* marked as critical.
|
||||
*/
|
||||
static const struct dmi_system_id critclk_systems[] __initconst = {
|
||||
static const struct dmi_system_id critclk_systems[] = {
|
||||
{
|
||||
.ident = "MPL CEC1x",
|
||||
.matches = {
|
||||
|
@ -1713,8 +1713,11 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
|
||||
}
|
||||
|
||||
out:
|
||||
if (req->nsge > 0)
|
||||
if (req->nsge > 0) {
|
||||
scsi_dma_unmap(cmnd);
|
||||
if (req->dcopy && (host_status == DID_OK))
|
||||
host_status = csio_scsi_copy_to_sgl(hw, req);
|
||||
}
|
||||
|
||||
cmnd->result = (((host_status) << 16) | scsi_status);
|
||||
cmnd->scsi_done(cmnd);
|
||||
|
@ -47,17 +47,16 @@ static void smp_task_timedout(unsigned long _task)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&task->task_state_lock, flags);
|
||||
if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
|
||||
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
|
||||
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
|
||||
complete(&task->slow_task->completion);
|
||||
}
|
||||
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
||||
|
||||
complete(&task->slow_task->completion);
|
||||
}
|
||||
|
||||
static void smp_task_done(struct sas_task *task)
|
||||
{
|
||||
if (!del_timer(&task->slow_task->timer))
|
||||
return;
|
||||
del_timer(&task->slow_task->timer);
|
||||
complete(&task->slow_task->completion);
|
||||
}
|
||||
|
||||
|
@ -345,7 +345,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
|
||||
}
|
||||
|
||||
ha->optrom_region_start = start;
|
||||
ha->optrom_region_size = start + size;
|
||||
ha->optrom_region_size = size;
|
||||
|
||||
ha->optrom_state = QLA_SREADING;
|
||||
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
|
||||
@ -418,7 +418,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
|
||||
}
|
||||
|
||||
ha->optrom_region_start = start;
|
||||
ha->optrom_region_size = start + size;
|
||||
ha->optrom_region_size = size;
|
||||
|
||||
ha->optrom_state = QLA_SWRITING;
|
||||
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
|
||||
|
@ -521,7 +521,7 @@ static int gb_power_supply_prop_descriptors_get(struct gb_power_supply *gbpsy)
|
||||
|
||||
op = gb_operation_create(connection,
|
||||
GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS,
|
||||
sizeof(req), sizeof(*resp) + props_count *
|
||||
sizeof(*req), sizeof(*resp) + props_count *
|
||||
sizeof(struct gb_power_supply_props_desc),
|
||||
GFP_KERNEL);
|
||||
if (!op)
|
||||
|
@ -482,12 +482,12 @@ static void acm_read_bulk_callback(struct urb *urb)
|
||||
struct acm *acm = rb->instance;
|
||||
unsigned long flags;
|
||||
int status = urb->status;
|
||||
bool stopped = false;
|
||||
bool stalled = false;
|
||||
|
||||
dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
|
||||
rb->index, urb->actual_length, status);
|
||||
|
||||
set_bit(rb->index, &acm->read_urbs_free);
|
||||
|
||||
if (!acm->dev) {
|
||||
dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
|
||||
return;
|
||||
@ -500,15 +500,16 @@ static void acm_read_bulk_callback(struct urb *urb)
|
||||
break;
|
||||
case -EPIPE:
|
||||
set_bit(EVENT_RX_STALL, &acm->flags);
|
||||
schedule_work(&acm->work);
|
||||
return;
|
||||
stalled = true;
|
||||
break;
|
||||
case -ENOENT:
|
||||
case -ECONNRESET:
|
||||
case -ESHUTDOWN:
|
||||
dev_dbg(&acm->data->dev,
|
||||
"%s - urb shutting down with status: %d\n",
|
||||
__func__, status);
|
||||
return;
|
||||
stopped = true;
|
||||
break;
|
||||
default:
|
||||
dev_dbg(&acm->data->dev,
|
||||
"%s - nonzero urb status received: %d\n",
|
||||
@ -517,10 +518,24 @@ static void acm_read_bulk_callback(struct urb *urb)
|
||||
}
|
||||
|
||||
/*
|
||||
* Unthrottle may run on another CPU which needs to see events
|
||||
* in the same order. Submission has an implict barrier
|
||||
* Make sure URB processing is done before marking as free to avoid
|
||||
* racing with unthrottle() on another CPU. Matches the barriers
|
||||
* implied by the test_and_clear_bit() in acm_submit_read_urb().
|
||||
*/
|
||||
smp_mb__before_atomic();
|
||||
set_bit(rb->index, &acm->read_urbs_free);
|
||||
/*
|
||||
* Make sure URB is marked as free before checking the throttled flag
|
||||
* to avoid racing with unthrottle() on another CPU. Matches the
|
||||
* smp_mb() in unthrottle().
|
||||
*/
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (stopped || stalled) {
|
||||
if (stalled)
|
||||
schedule_work(&acm->work);
|
||||
return;
|
||||
}
|
||||
|
||||
/* throttle device if requested by tty */
|
||||
spin_lock_irqsave(&acm->read_lock, flags);
|
||||
@ -854,6 +869,9 @@ static void acm_tty_unthrottle(struct tty_struct *tty)
|
||||
acm->throttle_req = 0;
|
||||
spin_unlock_irq(&acm->read_lock);
|
||||
|
||||
/* Matches the smp_mb__after_atomic() in acm_read_bulk_callback(). */
|
||||
smp_mb();
|
||||
|
||||
if (was_throttled)
|
||||
acm_submit_read_urbs(acm, GFP_KERNEL);
|
||||
}
|
||||
|
@ -1042,7 +1042,7 @@ static void dwc3_get_properties(struct dwc3 *dwc)
|
||||
u8 hird_threshold;
|
||||
|
||||
/* default to highest possible threshold */
|
||||
lpm_nyet_threshold = 0xff;
|
||||
lpm_nyet_threshold = 0xf;
|
||||
|
||||
/* default to -3.5dB de-emphasis */
|
||||
tx_de_emphasis = 1;
|
||||
|
@ -560,9 +560,12 @@ static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
|
||||
|
||||
static void f81232_close(struct usb_serial_port *port)
|
||||
{
|
||||
struct f81232_private *port_priv = usb_get_serial_port_data(port);
|
||||
|
||||
f81232_port_disable(port);
|
||||
usb_serial_generic_close(port);
|
||||
usb_kill_urb(port->interrupt_in_urb);
|
||||
flush_work(&port_priv->interrupt_work);
|
||||
}
|
||||
|
||||
static void f81232_dtr_rts(struct usb_serial_port *port, int on)
|
||||
@ -656,6 +659,40 @@ static int f81232_port_remove(struct usb_serial_port *port)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int f81232_suspend(struct usb_serial *serial, pm_message_t message)
|
||||
{
|
||||
struct usb_serial_port *port = serial->port[0];
|
||||
struct f81232_private *port_priv = usb_get_serial_port_data(port);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
|
||||
usb_kill_urb(port->read_urbs[i]);
|
||||
|
||||
usb_kill_urb(port->interrupt_in_urb);
|
||||
|
||||
if (port_priv)
|
||||
flush_work(&port_priv->interrupt_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int f81232_resume(struct usb_serial *serial)
|
||||
{
|
||||
struct usb_serial_port *port = serial->port[0];
|
||||
int result;
|
||||
|
||||
if (tty_port_initialized(&port->port)) {
|
||||
result = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
|
||||
if (result) {
|
||||
dev_err(&port->dev, "submit interrupt urb failed: %d\n",
|
||||
result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
return usb_serial_generic_resume(serial);
|
||||
}
|
||||
|
||||
static struct usb_serial_driver f81232_device = {
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
@ -679,6 +716,8 @@ static struct usb_serial_driver f81232_device = {
|
||||
.read_int_callback = f81232_read_int_callback,
|
||||
.port_probe = f81232_port_probe,
|
||||
.port_remove = f81232_port_remove,
|
||||
.suspend = f81232_suspend,
|
||||
.resume = f81232_resume,
|
||||
};
|
||||
|
||||
static struct usb_serial_driver * const serial_drivers[] = {
|
||||
|
@ -81,6 +81,7 @@ static const char* host_info(struct Scsi_Host *host)
|
||||
static int slave_alloc (struct scsi_device *sdev)
|
||||
{
|
||||
struct us_data *us = host_to_us(sdev->host);
|
||||
int maxp;
|
||||
|
||||
/*
|
||||
* Set the INQUIRY transfer length to 36. We don't use any of
|
||||
@ -90,20 +91,17 @@ static int slave_alloc (struct scsi_device *sdev)
|
||||
sdev->inquiry_len = 36;
|
||||
|
||||
/*
|
||||
* USB has unusual DMA-alignment requirements: Although the
|
||||
* starting address of each scatter-gather element doesn't matter,
|
||||
* the length of each element except the last must be divisible
|
||||
* by the Bulk maxpacket value. There's currently no way to
|
||||
* express this by block-layer constraints, so we'll cop out
|
||||
* and simply require addresses to be aligned at 512-byte
|
||||
* boundaries. This is okay since most block I/O involves
|
||||
* hardware sectors that are multiples of 512 bytes in length,
|
||||
* and since host controllers up through USB 2.0 have maxpacket
|
||||
* values no larger than 512.
|
||||
*
|
||||
* But it doesn't suffice for Wireless USB, where Bulk maxpacket
|
||||
* values can be as large as 2048. To make that work properly
|
||||
* will require changes to the block layer.
|
||||
* USB has unusual scatter-gather requirements: the length of each
|
||||
* scatterlist element except the last must be divisible by the
|
||||
* Bulk maxpacket value. Fortunately this value is always a
|
||||
* power of 2. Inform the block layer about this requirement.
|
||||
*/
|
||||
maxp = usb_maxpacket(us->pusb_dev, us->recv_bulk_pipe, 0);
|
||||
blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
|
||||
|
||||
/*
|
||||
* Some host controllers may have alignment requirements.
|
||||
* We'll play it safe by requiring 512-byte alignment always.
|
||||
*/
|
||||
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
|
||||
|
||||
|
@ -796,24 +796,33 @@ static int uas_slave_alloc(struct scsi_device *sdev)
|
||||
{
|
||||
struct uas_dev_info *devinfo =
|
||||
(struct uas_dev_info *)sdev->host->hostdata;
|
||||
int maxp;
|
||||
|
||||
sdev->hostdata = devinfo;
|
||||
|
||||
/*
|
||||
* USB has unusual DMA-alignment requirements: Although the
|
||||
* starting address of each scatter-gather element doesn't matter,
|
||||
* the length of each element except the last must be divisible
|
||||
* by the Bulk maxpacket value. There's currently no way to
|
||||
* express this by block-layer constraints, so we'll cop out
|
||||
* and simply require addresses to be aligned at 512-byte
|
||||
* boundaries. This is okay since most block I/O involves
|
||||
* hardware sectors that are multiples of 512 bytes in length,
|
||||
* and since host controllers up through USB 2.0 have maxpacket
|
||||
* values no larger than 512.
|
||||
* We have two requirements here. We must satisfy the requirements
|
||||
* of the physical HC and the demands of the protocol, as we
|
||||
* definitely want no additional memory allocation in this path
|
||||
* ruling out using bounce buffers.
|
||||
*
|
||||
* But it doesn't suffice for Wireless USB, where Bulk maxpacket
|
||||
* values can be as large as 2048. To make that work properly
|
||||
* will require changes to the block layer.
|
||||
* For a transmission on USB to continue we must never send
|
||||
* a package that is smaller than maxpacket. Hence the length of each
|
||||
* scatterlist element except the last must be divisible by the
|
||||
* Bulk maxpacket value.
|
||||
* If the HC does not ensure that through SG,
|
||||
* the upper layer must do that. We must assume nothing
|
||||
* about the capabilities off the HC, so we use the most
|
||||
* pessimistic requirement.
|
||||
*/
|
||||
|
||||
maxp = usb_maxpacket(devinfo->udev, devinfo->data_in_pipe, 0);
|
||||
blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
|
||||
|
||||
/*
|
||||
* The protocol has no requirements on alignment in the strict sense.
|
||||
* Controllers may or may not have alignment restrictions.
|
||||
* As this is not exported, we use an extremely conservative guess.
|
||||
*/
|
||||
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
|
||||
|
||||
|
@ -254,9 +254,11 @@ void vp_del_vqs(struct virtio_device *vdev)
|
||||
for (i = 0; i < vp_dev->msix_used_vectors; ++i)
|
||||
free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
|
||||
|
||||
for (i = 0; i < vp_dev->msix_vectors; i++)
|
||||
if (vp_dev->msix_affinity_masks[i])
|
||||
free_cpumask_var(vp_dev->msix_affinity_masks[i]);
|
||||
if (vp_dev->msix_affinity_masks) {
|
||||
for (i = 0; i < vp_dev->msix_vectors; i++)
|
||||
if (vp_dev->msix_affinity_masks[i])
|
||||
free_cpumask_var(vp_dev->msix_affinity_masks[i]);
|
||||
}
|
||||
|
||||
if (vp_dev->msix_enabled) {
|
||||
/* Disable the vector used for configuration */
|
||||
|
@ -72,8 +72,8 @@
|
||||
|
||||
#define u64_to_user_ptr(x) ( \
|
||||
{ \
|
||||
typecheck(u64, x); \
|
||||
(void __user *)(uintptr_t)x; \
|
||||
typecheck(u64, (x)); \
|
||||
(void __user *)(uintptr_t)(x); \
|
||||
} \
|
||||
)
|
||||
|
||||
|
@ -178,6 +178,9 @@ struct adv_info {
|
||||
|
||||
#define HCI_MAX_SHORT_NAME_LENGTH 10
|
||||
|
||||
/* Min encryption key size to match with SMP */
|
||||
#define HCI_MIN_ENC_KEY_SIZE 7
|
||||
|
||||
/* Default LE RPA expiry time, 15 minutes */
|
||||
#define HCI_DEFAULT_RPA_TIMEOUT (15 * 60)
|
||||
|
||||
|
188
kernel/futex.c
188
kernel/futex.c
@ -1324,13 +1324,15 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
|
||||
|
||||
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
|
||||
{
|
||||
int err;
|
||||
u32 uninitialized_var(curval);
|
||||
|
||||
if (unlikely(should_fail_futex(true)))
|
||||
return -EFAULT;
|
||||
|
||||
if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
|
||||
return -EFAULT;
|
||||
err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
/* If user space value changed, let the caller retry */
|
||||
return curval != uval ? -EAGAIN : 0;
|
||||
@ -1516,10 +1518,8 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
|
||||
if (unlikely(should_fail_futex(true)))
|
||||
ret = -EFAULT;
|
||||
|
||||
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
|
||||
ret = -EFAULT;
|
||||
|
||||
} else if (curval != uval) {
|
||||
ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
|
||||
if (!ret && (curval != uval)) {
|
||||
/*
|
||||
* If a unconditional UNLOCK_PI operation (user space did not
|
||||
* try the TID->0 transition) raced with a waiter setting the
|
||||
@ -1714,32 +1714,32 @@ retry_private:
|
||||
double_lock_hb(hb1, hb2);
|
||||
op_ret = futex_atomic_op_inuser(op, uaddr2);
|
||||
if (unlikely(op_ret < 0)) {
|
||||
|
||||
double_unlock_hb(hb1, hb2);
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
/*
|
||||
* we don't get EFAULT from MMU faults if we don't have an MMU,
|
||||
* but we might get them from range checking
|
||||
*/
|
||||
ret = op_ret;
|
||||
goto out_put_keys;
|
||||
#endif
|
||||
|
||||
if (unlikely(op_ret != -EFAULT)) {
|
||||
if (!IS_ENABLED(CONFIG_MMU) ||
|
||||
unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) {
|
||||
/*
|
||||
* we don't get EFAULT from MMU faults if we don't have
|
||||
* an MMU, but we might get them from range checking
|
||||
*/
|
||||
ret = op_ret;
|
||||
goto out_put_keys;
|
||||
}
|
||||
|
||||
ret = fault_in_user_writeable(uaddr2);
|
||||
if (ret)
|
||||
goto out_put_keys;
|
||||
if (op_ret == -EFAULT) {
|
||||
ret = fault_in_user_writeable(uaddr2);
|
||||
if (ret)
|
||||
goto out_put_keys;
|
||||
}
|
||||
|
||||
if (!(flags & FLAGS_SHARED))
|
||||
if (!(flags & FLAGS_SHARED)) {
|
||||
cond_resched();
|
||||
goto retry_private;
|
||||
}
|
||||
|
||||
put_futex_key(&key2);
|
||||
put_futex_key(&key1);
|
||||
cond_resched();
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@ -2364,7 +2364,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
||||
u32 uval, uninitialized_var(curval), newval;
|
||||
struct task_struct *oldowner, *newowner;
|
||||
u32 newtid;
|
||||
int ret;
|
||||
int ret, err = 0;
|
||||
|
||||
lockdep_assert_held(q->lock_ptr);
|
||||
|
||||
@ -2435,14 +2435,17 @@ retry:
|
||||
if (!pi_state->owner)
|
||||
newtid |= FUTEX_OWNER_DIED;
|
||||
|
||||
if (get_futex_value_locked(&uval, uaddr))
|
||||
goto handle_fault;
|
||||
err = get_futex_value_locked(&uval, uaddr);
|
||||
if (err)
|
||||
goto handle_err;
|
||||
|
||||
for (;;) {
|
||||
newval = (uval & FUTEX_OWNER_DIED) | newtid;
|
||||
|
||||
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
|
||||
goto handle_fault;
|
||||
err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
|
||||
if (err)
|
||||
goto handle_err;
|
||||
|
||||
if (curval == uval)
|
||||
break;
|
||||
uval = curval;
|
||||
@ -2470,23 +2473,37 @@ retry:
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* To handle the page fault we need to drop the locks here. That gives
|
||||
* the other task (either the highest priority waiter itself or the
|
||||
* task which stole the rtmutex) the chance to try the fixup of the
|
||||
* pi_state. So once we are back from handling the fault we need to
|
||||
* check the pi_state after reacquiring the locks and before trying to
|
||||
* do another fixup. When the fixup has been done already we simply
|
||||
* return.
|
||||
* In order to reschedule or handle a page fault, we need to drop the
|
||||
* locks here. In the case of a fault, this gives the other task
|
||||
* (either the highest priority waiter itself or the task which stole
|
||||
* the rtmutex) the chance to try the fixup of the pi_state. So once we
|
||||
* are back from handling the fault we need to check the pi_state after
|
||||
* reacquiring the locks and before trying to do another fixup. When
|
||||
* the fixup has been done already we simply return.
|
||||
*
|
||||
* Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
|
||||
* drop hb->lock since the caller owns the hb -> futex_q relation.
|
||||
* Dropping the pi_mutex->wait_lock requires the state revalidate.
|
||||
*/
|
||||
handle_fault:
|
||||
handle_err:
|
||||
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
||||
spin_unlock(q->lock_ptr);
|
||||
|
||||
ret = fault_in_user_writeable(uaddr);
|
||||
switch (err) {
|
||||
case -EFAULT:
|
||||
ret = fault_in_user_writeable(uaddr);
|
||||
break;
|
||||
|
||||
case -EAGAIN:
|
||||
cond_resched();
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
ret = err;
|
||||
break;
|
||||
}
|
||||
|
||||
spin_lock(q->lock_ptr);
|
||||
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
|
||||
@ -3055,10 +3072,8 @@ retry:
|
||||
* A unconditional UNLOCK_PI op raced against a waiter
|
||||
* setting the FUTEX_WAITERS bit. Try again.
|
||||
*/
|
||||
if (ret == -EAGAIN) {
|
||||
put_futex_key(&key);
|
||||
goto retry;
|
||||
}
|
||||
if (ret == -EAGAIN)
|
||||
goto pi_retry;
|
||||
/*
|
||||
* wake_futex_pi has detected invalid state. Tell user
|
||||
* space.
|
||||
@ -3073,9 +3088,19 @@ retry:
|
||||
* preserve the WAITERS bit not the OWNER_DIED one. We are the
|
||||
* owner.
|
||||
*/
|
||||
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) {
|
||||
if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
|
||||
spin_unlock(&hb->lock);
|
||||
goto pi_faulted;
|
||||
switch (ret) {
|
||||
case -EFAULT:
|
||||
goto pi_faulted;
|
||||
|
||||
case -EAGAIN:
|
||||
goto pi_retry;
|
||||
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
goto out_putkey;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3089,6 +3114,11 @@ out_putkey:
|
||||
put_futex_key(&key);
|
||||
return ret;
|
||||
|
||||
pi_retry:
|
||||
put_futex_key(&key);
|
||||
cond_resched();
|
||||
goto retry;
|
||||
|
||||
pi_faulted:
|
||||
put_futex_key(&key);
|
||||
|
||||
@ -3449,6 +3479,7 @@ err_unlock:
|
||||
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
|
||||
{
|
||||
u32 uval, uninitialized_var(nval), mval;
|
||||
int err;
|
||||
|
||||
/* Futex address must be 32bit aligned */
|
||||
if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
|
||||
@ -3458,42 +3489,57 @@ retry:
|
||||
if (get_user(uval, uaddr))
|
||||
return -1;
|
||||
|
||||
if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
|
||||
/*
|
||||
* Ok, this dying thread is truly holding a futex
|
||||
* of interest. Set the OWNER_DIED bit atomically
|
||||
* via cmpxchg, and if the value had FUTEX_WAITERS
|
||||
* set, wake up a waiter (if any). (We have to do a
|
||||
* futex_wake() even if OWNER_DIED is already set -
|
||||
* to handle the rare but possible case of recursive
|
||||
* thread-death.) The rest of the cleanup is done in
|
||||
* userspace.
|
||||
*/
|
||||
mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
|
||||
/*
|
||||
* We are not holding a lock here, but we want to have
|
||||
* the pagefault_disable/enable() protection because
|
||||
* we want to handle the fault gracefully. If the
|
||||
* access fails we try to fault in the futex with R/W
|
||||
* verification via get_user_pages. get_user() above
|
||||
* does not guarantee R/W access. If that fails we
|
||||
* give up and leave the futex locked.
|
||||
*/
|
||||
if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
|
||||
if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Ok, this dying thread is truly holding a futex
|
||||
* of interest. Set the OWNER_DIED bit atomically
|
||||
* via cmpxchg, and if the value had FUTEX_WAITERS
|
||||
* set, wake up a waiter (if any). (We have to do a
|
||||
* futex_wake() even if OWNER_DIED is already set -
|
||||
* to handle the rare but possible case of recursive
|
||||
* thread-death.) The rest of the cleanup is done in
|
||||
* userspace.
|
||||
*/
|
||||
mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
|
||||
|
||||
/*
|
||||
* We are not holding a lock here, but we want to have
|
||||
* the pagefault_disable/enable() protection because
|
||||
* we want to handle the fault gracefully. If the
|
||||
* access fails we try to fault in the futex with R/W
|
||||
* verification via get_user_pages. get_user() above
|
||||
* does not guarantee R/W access. If that fails we
|
||||
* give up and leave the futex locked.
|
||||
*/
|
||||
if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) {
|
||||
switch (err) {
|
||||
case -EFAULT:
|
||||
if (fault_in_user_writeable(uaddr))
|
||||
return -1;
|
||||
goto retry;
|
||||
}
|
||||
if (nval != uval)
|
||||
|
||||
case -EAGAIN:
|
||||
cond_resched();
|
||||
goto retry;
|
||||
|
||||
/*
|
||||
* Wake robust non-PI futexes here. The wakeup of
|
||||
* PI futexes happens in exit_pi_state():
|
||||
*/
|
||||
if (!pi && (uval & FUTEX_WAITERS))
|
||||
futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
if (nval != uval)
|
||||
goto retry;
|
||||
|
||||
/*
|
||||
* Wake robust non-PI futexes here. The wakeup of
|
||||
* PI futexes happens in exit_pi_state():
|
||||
*/
|
||||
if (!pi && (uval & FUTEX_WAITERS))
|
||||
futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -323,8 +323,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
|
||||
desc->affinity_notify = notify;
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
if (old_notify)
|
||||
if (old_notify) {
|
||||
cancel_work_sync(&old_notify->work);
|
||||
kref_put(&old_notify->kref, old_notify->release);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
49
lib/ubsan.c
49
lib/ubsan.c
@ -86,11 +86,13 @@ static bool is_inline_int(struct type_descriptor *type)
|
||||
return bits <= inline_bits;
|
||||
}
|
||||
|
||||
static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
|
||||
static s_max get_signed_val(struct type_descriptor *type, void *val)
|
||||
{
|
||||
if (is_inline_int(type)) {
|
||||
unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type);
|
||||
return ((s_max)val) << extra_bits >> extra_bits;
|
||||
unsigned long ulong_val = (unsigned long)val;
|
||||
|
||||
return ((s_max)ulong_val) << extra_bits >> extra_bits;
|
||||
}
|
||||
|
||||
if (type_bit_width(type) == 64)
|
||||
@ -99,15 +101,15 @@ static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
|
||||
return *(s_max *)val;
|
||||
}
|
||||
|
||||
static bool val_is_negative(struct type_descriptor *type, unsigned long val)
|
||||
static bool val_is_negative(struct type_descriptor *type, void *val)
|
||||
{
|
||||
return type_is_signed(type) && get_signed_val(type, val) < 0;
|
||||
}
|
||||
|
||||
static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
|
||||
static u_max get_unsigned_val(struct type_descriptor *type, void *val)
|
||||
{
|
||||
if (is_inline_int(type))
|
||||
return val;
|
||||
return (unsigned long)val;
|
||||
|
||||
if (type_bit_width(type) == 64)
|
||||
return *(u64 *)val;
|
||||
@ -116,7 +118,7 @@ static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
|
||||
}
|
||||
|
||||
static void val_to_string(char *str, size_t size, struct type_descriptor *type,
|
||||
unsigned long value)
|
||||
void *value)
|
||||
{
|
||||
if (type_is_int(type)) {
|
||||
if (type_bit_width(type) == 128) {
|
||||
@ -168,8 +170,8 @@ static void ubsan_epilogue(unsigned long *flags)
|
||||
current->in_ubsan--;
|
||||
}
|
||||
|
||||
static void handle_overflow(struct overflow_data *data, unsigned long lhs,
|
||||
unsigned long rhs, char op)
|
||||
static void handle_overflow(struct overflow_data *data, void *lhs,
|
||||
void *rhs, char op)
|
||||
{
|
||||
|
||||
struct type_descriptor *type = data->type;
|
||||
@ -196,8 +198,7 @@ static void handle_overflow(struct overflow_data *data, unsigned long lhs,
|
||||
}
|
||||
|
||||
void __ubsan_handle_add_overflow(struct overflow_data *data,
|
||||
unsigned long lhs,
|
||||
unsigned long rhs)
|
||||
void *lhs, void *rhs)
|
||||
{
|
||||
|
||||
handle_overflow(data, lhs, rhs, '+');
|
||||
@ -205,23 +206,21 @@ void __ubsan_handle_add_overflow(struct overflow_data *data,
|
||||
EXPORT_SYMBOL(__ubsan_handle_add_overflow);
|
||||
|
||||
void __ubsan_handle_sub_overflow(struct overflow_data *data,
|
||||
unsigned long lhs,
|
||||
unsigned long rhs)
|
||||
void *lhs, void *rhs)
|
||||
{
|
||||
handle_overflow(data, lhs, rhs, '-');
|
||||
}
|
||||
EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
|
||||
|
||||
void __ubsan_handle_mul_overflow(struct overflow_data *data,
|
||||
unsigned long lhs,
|
||||
unsigned long rhs)
|
||||
void *lhs, void *rhs)
|
||||
{
|
||||
handle_overflow(data, lhs, rhs, '*');
|
||||
}
|
||||
EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
|
||||
|
||||
void __ubsan_handle_negate_overflow(struct overflow_data *data,
|
||||
unsigned long old_val)
|
||||
void *old_val)
|
||||
{
|
||||
unsigned long flags;
|
||||
char old_val_str[VALUE_LENGTH];
|
||||
@ -242,8 +241,7 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
|
||||
|
||||
|
||||
void __ubsan_handle_divrem_overflow(struct overflow_data *data,
|
||||
unsigned long lhs,
|
||||
unsigned long rhs)
|
||||
void *lhs, void *rhs)
|
||||
{
|
||||
unsigned long flags;
|
||||
char rhs_val_str[VALUE_LENGTH];
|
||||
@ -328,7 +326,7 @@ static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
|
||||
}
|
||||
|
||||
void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
|
||||
unsigned long ptr)
|
||||
void *ptr)
|
||||
{
|
||||
struct type_mismatch_data_common common_data = {
|
||||
.location = &data->location,
|
||||
@ -337,12 +335,12 @@ void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
|
||||
.type_check_kind = data->type_check_kind
|
||||
};
|
||||
|
||||
ubsan_type_mismatch_common(&common_data, ptr);
|
||||
ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
|
||||
}
|
||||
EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
|
||||
|
||||
void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
|
||||
unsigned long ptr)
|
||||
void *ptr)
|
||||
{
|
||||
|
||||
struct type_mismatch_data_common common_data = {
|
||||
@ -352,7 +350,7 @@ void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
|
||||
.type_check_kind = data->type_check_kind
|
||||
};
|
||||
|
||||
ubsan_type_mismatch_common(&common_data, ptr);
|
||||
ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
|
||||
}
|
||||
EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
|
||||
|
||||
@ -376,7 +374,7 @@ void __ubsan_handle_nonnull_return(struct nonnull_return_data *data)
|
||||
EXPORT_SYMBOL(__ubsan_handle_nonnull_return);
|
||||
|
||||
void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data,
|
||||
unsigned long bound)
|
||||
void *bound)
|
||||
{
|
||||
unsigned long flags;
|
||||
char bound_str[VALUE_LENGTH];
|
||||
@ -393,8 +391,7 @@ void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data,
|
||||
}
|
||||
EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive);
|
||||
|
||||
void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
|
||||
unsigned long index)
|
||||
void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
|
||||
{
|
||||
unsigned long flags;
|
||||
char index_str[VALUE_LENGTH];
|
||||
@ -412,7 +409,7 @@ void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
|
||||
EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
|
||||
|
||||
void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
|
||||
unsigned long lhs, unsigned long rhs)
|
||||
void *lhs, void *rhs)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct type_descriptor *rhs_type = data->rhs_type;
|
||||
@ -463,7 +460,7 @@ void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
|
||||
EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
|
||||
|
||||
void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
|
||||
unsigned long val)
|
||||
void *val)
|
||||
{
|
||||
unsigned long flags;
|
||||
char val_str[VALUE_LENGTH];
|
||||
|
@ -4296,7 +4296,8 @@ static void show_symbol(struct seq_file *m, unsigned long address)
|
||||
|
||||
static int leaks_show(struct seq_file *m, void *p)
|
||||
{
|
||||
struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
|
||||
struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
|
||||
root_caches_node);
|
||||
struct page *page;
|
||||
struct kmem_cache_node *n;
|
||||
const char *name;
|
||||
|
@ -1165,6 +1165,14 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
|
||||
!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
|
||||
return 0;
|
||||
|
||||
/* The minimum encryption key size needs to be enforced by the
|
||||
* host stack before establishing any L2CAP connections. The
|
||||
* specification in theory allows a minimum of 1, but to align
|
||||
* BR/EDR and LE transports, a minimum of 7 is chosen.
|
||||
*/
|
||||
if (conn->enc_key_size < HCI_MIN_ENC_KEY_SIZE)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -76,6 +76,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
|
||||
sockfd_put(csock);
|
||||
return err;
|
||||
}
|
||||
ca.name[sizeof(ca.name)-1] = 0;
|
||||
|
||||
err = hidp_connection_add(&ca, csock, isock);
|
||||
if (!err && copy_to_user(argp, &ca, sizeof(ca)))
|
||||
|
@ -643,6 +643,7 @@ static const struct regmap_config cs4270_regmap = {
|
||||
.reg_defaults = cs4270_reg_defaults,
|
||||
.num_reg_defaults = ARRAY_SIZE(cs4270_reg_defaults),
|
||||
.cache_type = REGCACHE_RBTREE,
|
||||
.write_flag_mask = CS4270_I2C_INCR,
|
||||
|
||||
.readable_reg = cs4270_reg_is_readable,
|
||||
.volatile_reg = cs4270_reg_is_volatile,
|
||||
|
@ -536,73 +536,71 @@ static int hdmi_codec_set_fmt(struct snd_soc_dai *dai,
|
||||
{
|
||||
struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
|
||||
struct hdmi_codec_daifmt cf = { 0 };
|
||||
int ret = 0;
|
||||
|
||||
dev_dbg(dai->dev, "%s()\n", __func__);
|
||||
|
||||
if (dai->id == DAI_ID_SPDIF) {
|
||||
cf.fmt = HDMI_SPDIF;
|
||||
} else {
|
||||
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
|
||||
case SND_SOC_DAIFMT_CBM_CFM:
|
||||
cf.bit_clk_master = 1;
|
||||
cf.frame_clk_master = 1;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_CBS_CFM:
|
||||
cf.frame_clk_master = 1;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_CBM_CFS:
|
||||
cf.bit_clk_master = 1;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_CBS_CFS:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (dai->id == DAI_ID_SPDIF)
|
||||
return 0;
|
||||
|
||||
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
|
||||
case SND_SOC_DAIFMT_NB_NF:
|
||||
break;
|
||||
case SND_SOC_DAIFMT_NB_IF:
|
||||
cf.frame_clk_inv = 1;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_IB_NF:
|
||||
cf.bit_clk_inv = 1;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_IB_IF:
|
||||
cf.frame_clk_inv = 1;
|
||||
cf.bit_clk_inv = 1;
|
||||
break;
|
||||
}
|
||||
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
|
||||
case SND_SOC_DAIFMT_CBM_CFM:
|
||||
cf.bit_clk_master = 1;
|
||||
cf.frame_clk_master = 1;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_CBS_CFM:
|
||||
cf.frame_clk_master = 1;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_CBM_CFS:
|
||||
cf.bit_clk_master = 1;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_CBS_CFS:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
|
||||
case SND_SOC_DAIFMT_I2S:
|
||||
cf.fmt = HDMI_I2S;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_DSP_A:
|
||||
cf.fmt = HDMI_DSP_A;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_DSP_B:
|
||||
cf.fmt = HDMI_DSP_B;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_RIGHT_J:
|
||||
cf.fmt = HDMI_RIGHT_J;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_LEFT_J:
|
||||
cf.fmt = HDMI_LEFT_J;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_AC97:
|
||||
cf.fmt = HDMI_AC97;
|
||||
break;
|
||||
default:
|
||||
dev_err(dai->dev, "Invalid DAI interface format\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
|
||||
case SND_SOC_DAIFMT_NB_NF:
|
||||
break;
|
||||
case SND_SOC_DAIFMT_NB_IF:
|
||||
cf.frame_clk_inv = 1;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_IB_NF:
|
||||
cf.bit_clk_inv = 1;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_IB_IF:
|
||||
cf.frame_clk_inv = 1;
|
||||
cf.bit_clk_inv = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
|
||||
case SND_SOC_DAIFMT_I2S:
|
||||
cf.fmt = HDMI_I2S;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_DSP_A:
|
||||
cf.fmt = HDMI_DSP_A;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_DSP_B:
|
||||
cf.fmt = HDMI_DSP_B;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_RIGHT_J:
|
||||
cf.fmt = HDMI_RIGHT_J;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_LEFT_J:
|
||||
cf.fmt = HDMI_LEFT_J;
|
||||
break;
|
||||
case SND_SOC_DAIFMT_AC97:
|
||||
cf.fmt = HDMI_AC97;
|
||||
break;
|
||||
default:
|
||||
dev_err(dai->dev, "Invalid DAI interface format\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hcp->daifmt[dai->id] = cf;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hdmi_codec_digital_mute(struct snd_soc_dai *dai, int mute)
|
||||
@ -784,8 +782,10 @@ static int hdmi_codec_probe(struct platform_device *pdev)
|
||||
i++;
|
||||
}
|
||||
|
||||
if (hcd->spdif)
|
||||
if (hcd->spdif) {
|
||||
hcp->daidrv[i] = hdmi_spdif_dai;
|
||||
hcp->daifmt[DAI_ID_SPDIF].fmt = HDMI_SPDIF;
|
||||
}
|
||||
|
||||
ret = snd_soc_register_codec(dev, &hdmi_codec, hcp->daidrv,
|
||||
dai_count);
|
||||
|
@ -414,9 +414,9 @@ static const struct snd_soc_dapm_widget nau8810_dapm_widgets[] = {
|
||||
SND_SOC_DAPM_MIXER("Mono Mixer", NAU8810_REG_POWER3,
|
||||
NAU8810_MOUTMX_EN_SFT, 0, &nau8810_mono_mixer_controls[0],
|
||||
ARRAY_SIZE(nau8810_mono_mixer_controls)),
|
||||
SND_SOC_DAPM_DAC("DAC", "HiFi Playback", NAU8810_REG_POWER3,
|
||||
SND_SOC_DAPM_DAC("DAC", "Playback", NAU8810_REG_POWER3,
|
||||
NAU8810_DAC_EN_SFT, 0),
|
||||
SND_SOC_DAPM_ADC("ADC", "HiFi Capture", NAU8810_REG_POWER2,
|
||||
SND_SOC_DAPM_ADC("ADC", "Capture", NAU8810_REG_POWER2,
|
||||
NAU8810_ADC_EN_SFT, 0),
|
||||
SND_SOC_DAPM_PGA("SpkN Out", NAU8810_REG_POWER3,
|
||||
NAU8810_NSPK_EN_SFT, 0, NULL, 0),
|
||||
|
@ -634,8 +634,8 @@ static const struct snd_soc_dapm_widget nau8824_dapm_widgets[] = {
|
||||
SND_SOC_DAPM_ADC("ADCR", NULL, NAU8824_REG_ANALOG_ADC_2,
|
||||
NAU8824_ADCR_EN_SFT, 0),
|
||||
|
||||
SND_SOC_DAPM_AIF_OUT("AIFTX", "HiFi Capture", 0, SND_SOC_NOPM, 0, 0),
|
||||
SND_SOC_DAPM_AIF_IN("AIFRX", "HiFi Playback", 0, SND_SOC_NOPM, 0, 0),
|
||||
SND_SOC_DAPM_AIF_OUT("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0),
|
||||
SND_SOC_DAPM_AIF_IN("AIFRX", "Playback", 0, SND_SOC_NOPM, 0, 0),
|
||||
|
||||
SND_SOC_DAPM_DAC("DACL", NULL, NAU8824_REG_RDAC,
|
||||
NAU8824_DACL_EN_SFT, 0),
|
||||
@ -784,6 +784,36 @@ static void nau8824_int_status_clear_all(struct regmap *regmap)
|
||||
}
|
||||
}
|
||||
|
||||
static void nau8824_dapm_disable_pin(struct nau8824 *nau8824, const char *pin)
|
||||
{
|
||||
struct snd_soc_dapm_context *dapm = nau8824->dapm;
|
||||
const char *prefix = dapm->component->name_prefix;
|
||||
char prefixed_pin[80];
|
||||
|
||||
if (prefix) {
|
||||
snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
|
||||
prefix, pin);
|
||||
snd_soc_dapm_disable_pin(dapm, prefixed_pin);
|
||||
} else {
|
||||
snd_soc_dapm_disable_pin(dapm, pin);
|
||||
}
|
||||
}
|
||||
|
||||
static void nau8824_dapm_enable_pin(struct nau8824 *nau8824, const char *pin)
|
||||
{
|
||||
struct snd_soc_dapm_context *dapm = nau8824->dapm;
|
||||
const char *prefix = dapm->component->name_prefix;
|
||||
char prefixed_pin[80];
|
||||
|
||||
if (prefix) {
|
||||
snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
|
||||
prefix, pin);
|
||||
snd_soc_dapm_force_enable_pin(dapm, prefixed_pin);
|
||||
} else {
|
||||
snd_soc_dapm_force_enable_pin(dapm, pin);
|
||||
}
|
||||
}
|
||||
|
||||
static void nau8824_eject_jack(struct nau8824 *nau8824)
|
||||
{
|
||||
struct snd_soc_dapm_context *dapm = nau8824->dapm;
|
||||
@ -792,8 +822,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824)
|
||||
/* Clear all interruption status */
|
||||
nau8824_int_status_clear_all(regmap);
|
||||
|
||||
snd_soc_dapm_disable_pin(dapm, "SAR");
|
||||
snd_soc_dapm_disable_pin(dapm, "MICBIAS");
|
||||
nau8824_dapm_disable_pin(nau8824, "SAR");
|
||||
nau8824_dapm_disable_pin(nau8824, "MICBIAS");
|
||||
snd_soc_dapm_sync(dapm);
|
||||
|
||||
/* Enable the insertion interruption, disable the ejection
|
||||
@ -822,8 +852,8 @@ static void nau8824_jdet_work(struct work_struct *work)
|
||||
struct regmap *regmap = nau8824->regmap;
|
||||
int adc_value, event = 0, event_mask = 0;
|
||||
|
||||
snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
|
||||
snd_soc_dapm_force_enable_pin(dapm, "SAR");
|
||||
nau8824_dapm_enable_pin(nau8824, "MICBIAS");
|
||||
nau8824_dapm_enable_pin(nau8824, "SAR");
|
||||
snd_soc_dapm_sync(dapm);
|
||||
|
||||
msleep(100);
|
||||
@ -834,8 +864,8 @@ static void nau8824_jdet_work(struct work_struct *work)
|
||||
if (adc_value < HEADSET_SARADC_THD) {
|
||||
event |= SND_JACK_HEADPHONE;
|
||||
|
||||
snd_soc_dapm_disable_pin(dapm, "SAR");
|
||||
snd_soc_dapm_disable_pin(dapm, "MICBIAS");
|
||||
nau8824_dapm_disable_pin(nau8824, "SAR");
|
||||
nau8824_dapm_disable_pin(nau8824, "MICBIAS");
|
||||
snd_soc_dapm_sync(dapm);
|
||||
} else {
|
||||
event |= SND_JACK_HEADSET;
|
||||
|
@ -462,6 +462,8 @@ static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = {
|
||||
SND_SOC_DAPM_INPUT("IN2_R"),
|
||||
SND_SOC_DAPM_INPUT("IN3_L"),
|
||||
SND_SOC_DAPM_INPUT("IN3_R"),
|
||||
SND_SOC_DAPM_INPUT("CM_L"),
|
||||
SND_SOC_DAPM_INPUT("CM_R"),
|
||||
};
|
||||
|
||||
static const struct snd_soc_dapm_route aic32x4_dapm_routes[] = {
|
||||
|
@ -3711,11 +3711,13 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
|
||||
struct regmap *regmap = dsp->regmap;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&dsp->pwr_lock);
|
||||
|
||||
ret = regmap_read(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL, &val);
|
||||
if (ret) {
|
||||
adsp_err(dsp,
|
||||
"Failed to read Region Lock Ctrl register: %d\n", ret);
|
||||
return IRQ_HANDLED;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (val & ADSP2_WDT_TIMEOUT_STS_MASK) {
|
||||
@ -3734,7 +3736,7 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
|
||||
adsp_err(dsp,
|
||||
"Failed to read Bus Err Addr register: %d\n",
|
||||
ret);
|
||||
return IRQ_HANDLED;
|
||||
goto error;
|
||||
}
|
||||
|
||||
adsp_err(dsp, "bus error address = 0x%x\n",
|
||||
@ -3747,7 +3749,7 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
|
||||
adsp_err(dsp,
|
||||
"Failed to read Pmem Xmem Err Addr register: %d\n",
|
||||
ret);
|
||||
return IRQ_HANDLED;
|
||||
goto error;
|
||||
}
|
||||
|
||||
adsp_err(dsp, "xmem error address = 0x%x\n",
|
||||
@ -3760,6 +3762,9 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
|
||||
regmap_update_bits(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL,
|
||||
ADSP2_CTRL_ERR_EINT, ADSP2_CTRL_ERR_EINT);
|
||||
|
||||
error:
|
||||
mutex_unlock(&dsp->pwr_lock);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wm_adsp2_bus_error);
|
||||
|
@ -405,7 +405,7 @@ static const struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
|
||||
};
|
||||
|
||||
static const unsigned int dmic_2ch[] = {
|
||||
4,
|
||||
2,
|
||||
};
|
||||
|
||||
static const struct snd_pcm_hw_constraint_list constraints_dmic_2ch = {
|
||||
|
@ -1252,11 +1252,15 @@ struct sst_dsp *sst_dsp_new(struct device *dev,
|
||||
goto irq_err;
|
||||
|
||||
err = sst_dma_new(sst);
|
||||
if (err)
|
||||
dev_warn(dev, "sst_dma_new failed %d\n", err);
|
||||
if (err) {
|
||||
dev_err(dev, "sst_dma_new failed %d\n", err);
|
||||
goto dma_err;
|
||||
}
|
||||
|
||||
return sst;
|
||||
|
||||
dma_err:
|
||||
free_irq(sst->irq, sst);
|
||||
irq_err:
|
||||
if (sst->ops->free)
|
||||
sst->ops->free(sst);
|
||||
|
@ -208,7 +208,9 @@ static int rockchip_pdm_set_fmt(struct snd_soc_dai *cpu_dai,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pm_runtime_get_sync(cpu_dai->dev);
|
||||
regmap_update_bits(pdm->regmap, PDM_CLK_CTRL, mask, val);
|
||||
pm_runtime_put(cpu_dai->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -66,11 +66,11 @@ static int odroid_card_hw_params(struct snd_pcm_substream *substream,
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* We add 1 to the rclk_freq value in order to avoid too low clock
|
||||
* We add 2 to the rclk_freq value in order to avoid too low clock
|
||||
* frequency values due to the EPLL output frequency not being exact
|
||||
* multiple of the audio sampling rate.
|
||||
*/
|
||||
rclk_freq = params_rate(params) * rfs + 1;
|
||||
rclk_freq = params_rate(params) * rfs + 2;
|
||||
|
||||
ret = clk_set_rate(priv->sclk_i2s, rclk_freq);
|
||||
if (ret < 0)
|
||||
|
@ -894,10 +894,13 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
|
||||
codec_params = *params;
|
||||
|
||||
/* fixup params based on TDM slot masks */
|
||||
if (codec_dai->tx_mask)
|
||||
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
|
||||
codec_dai->tx_mask)
|
||||
soc_pcm_codec_params_fixup(&codec_params,
|
||||
codec_dai->tx_mask);
|
||||
if (codec_dai->rx_mask)
|
||||
|
||||
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
|
||||
codec_dai->rx_mask)
|
||||
soc_pcm_codec_params_fixup(&codec_params,
|
||||
codec_dai->rx_mask);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user