mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
This is the 4.14.320 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmSb6+YACgkQONu9yGCS aT6k8xAAwMV8dM19J1KpSGIZc67DMWH4nXoehnuVROhJKZjLfJvQr9Vwab7+2gIy rXeCQgQjxKpPOgmkH67qFoD11prwXYCOj/69qeAyQTyXYjwrF3jEzCds6yBePY2U dU0og8UipoisMqO5HPtLYoFT1kYyOGTB+EmFpy0xTrudYj9ctmlqIe71oAXup766 G2AvOHBggGWdvtkecuz/bbMFrdjIOhzl1Qq68X8GmxhHsFPGt+i3e+MpzcyM+DVi WmBfzYNO3phCYvAKk21hWI7rhp/3yGFNTvZVeC39pwEtLH2Xk+5gjQHYjW/HqHyR xD9Li8A03wYbjy5501Hyt1W3Se0kLAgdDUOZX//1TOrLXLuNn91mfu2N7grhfsML +EwH7asrnQsJUn9Pr+ZKcV3Rruw5JUHuIa4vCmrsrqjqi+LTjFwTpK4Y02zefjhM qLhsIdSOU1DrmAnd5AbQPIeQnqWEe29lsG/K5OWha1qr94x37CqaFB3DeKzNnxjO qJ0ByN3GIi1KbBv2ymIkpAhOLbPEni+Kakmi5+YdPGsnsPAXHzGUf0JxBi6U6Pj5 P/VXAq5b5ZN4VbGWZa+PORahRBPy2BMlNfi6VeWsYDh4YZfoCjidy4mmrE86c+iY WMJpszU7QF6ZSqf0QXVCJEjJwVVHgvObCM2Hguyzy6oGb5usJ/w= =TkTj -----END PGP SIGNATURE----- Merge 4.14.320 into android-4.14-stable Changes in 4.14.320 serial: lantiq: add missing interrupt ack nilfs2: reject devices with insufficient block count nilfs2: fix buffer corruption due to concurrent device reads Drivers: hv: vmbus: Fix vmbus_wait_for_unload() to scan present CPUs cgroup: Do not corrupt task iteration when rebinding subsystem nilfs2: prevent general protection fault in nilfs_clear_dirty_page() xfrm: Linearize the skb after offloading if needed. net: qca_spi: Avoid high load if QCA7000 is not available mmc: mtk-sd: fix deferred probing mmc: omap: fix deferred probing mmc: omap_hsmmc: fix deferred probing mmc: usdhi60rol0: fix deferred probing be2net: Extend xmit workaround to BE3 chip netfilter: nf_tables: disallow element updates of bound anonymous sets scsi: target: iscsi: Prevent login threads from racing between each other HID: wacom: Add error check to wacom_parse_and_register() arm64: Add missing Set/Way CMO encodings nfcsim.c: Fix error checking for debugfs_create_dir fbdev: imsttfb: Release framebuffer and dealloc cmap on error path usb: gadget: udc: fix NULL dereference in remove() s390/cio: unregister device when the only path is gone drm/exynos: vidi: fix a wrong error return drm/exynos: fix race condition UAF in exynos_g2d_exec_ioctl drm/radeon: fix race condition UAF in radeon_gem_set_domain_ioctl x86/apic: Fix kernel panic when booting with intremap=off and x2apic_phys i2c: imx-lpi2c: fix type char overflow issue when calculating the clock cycle Linux 4.14.320 Change-Id: Ie099bcd37431671a217ea32b54bef6d5f29c122d Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
ecca894374
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 14
|
PATCHLEVEL = 14
|
||||||
SUBLEVEL = 319
|
SUBLEVEL = 320
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Petit Gorille
|
NAME = Petit Gorille
|
||||||
|
|
||||||
|
@ -108,8 +108,14 @@
|
|||||||
#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
|
#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
|
||||||
|
|
||||||
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
|
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
|
||||||
|
#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4)
|
||||||
|
#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6)
|
||||||
#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
|
#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
|
||||||
|
#define SYS_DC_CGSW sys_insn(1, 0, 7, 10, 4)
|
||||||
|
#define SYS_DC_CGDSW sys_insn(1, 0, 7, 10, 6)
|
||||||
#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
|
#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
|
||||||
|
#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4)
|
||||||
|
#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6)
|
||||||
|
|
||||||
#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2)
|
#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2)
|
||||||
#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0)
|
#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0)
|
||||||
|
@ -95,7 +95,10 @@ static void init_x2apic_ldr(void)
|
|||||||
|
|
||||||
static int x2apic_phys_probe(void)
|
static int x2apic_phys_probe(void)
|
||||||
{
|
{
|
||||||
if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
|
if (!x2apic_mode)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (x2apic_phys || x2apic_fadt_phys())
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
return apic == &apic_x2apic_phys;
|
return apic == &apic_x2apic_phys;
|
||||||
|
@ -1387,7 +1387,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
|
|||||||
/* Let the runqueue know that there is work to do. */
|
/* Let the runqueue know that there is work to do. */
|
||||||
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
|
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
|
||||||
|
|
||||||
if (runqueue_node->async)
|
if (req->async)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
wait_for_completion(&runqueue_node->complete);
|
wait_for_completion(&runqueue_node->complete);
|
||||||
|
@ -480,8 +480,6 @@ static int vidi_remove(struct platform_device *pdev)
|
|||||||
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
|
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
|
||||||
kfree(ctx->raw_edid);
|
kfree(ctx->raw_edid);
|
||||||
ctx->raw_edid = NULL;
|
ctx->raw_edid = NULL;
|
||||||
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
component_del(&pdev->dev, &vidi_component_ops);
|
component_del(&pdev->dev, &vidi_component_ops);
|
||||||
|
@ -378,7 +378,6 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||||||
struct radeon_device *rdev = dev->dev_private;
|
struct radeon_device *rdev = dev->dev_private;
|
||||||
struct drm_radeon_gem_set_domain *args = data;
|
struct drm_radeon_gem_set_domain *args = data;
|
||||||
struct drm_gem_object *gobj;
|
struct drm_gem_object *gobj;
|
||||||
struct radeon_bo *robj;
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
/* for now if someone requests domain CPU -
|
/* for now if someone requests domain CPU -
|
||||||
@ -391,13 +390,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||||||
up_read(&rdev->exclusive_lock);
|
up_read(&rdev->exclusive_lock);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
robj = gem_to_radeon_bo(gobj);
|
|
||||||
|
|
||||||
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
|
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
|
||||||
|
|
||||||
drm_gem_object_put_unlocked(gobj);
|
drm_gem_object_put_unlocked(gobj);
|
||||||
up_read(&rdev->exclusive_lock);
|
up_read(&rdev->exclusive_lock);
|
||||||
r = radeon_gem_handle_lockup(robj->rdev, r);
|
r = radeon_gem_handle_lockup(rdev, r);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2251,8 +2251,13 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
|
|||||||
goto fail_quirks;
|
goto fail_quirks;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
|
if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR) {
|
||||||
error = hid_hw_open(hdev);
|
error = hid_hw_open(hdev);
|
||||||
|
if (error) {
|
||||||
|
hid_err(hdev, "hw open failed\n");
|
||||||
|
goto fail_quirks;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
wacom_set_shared_values(wacom_wac);
|
wacom_set_shared_values(wacom_wac);
|
||||||
devres_close_group(&hdev->dev, wacom);
|
devres_close_group(&hdev->dev, wacom);
|
||||||
|
@ -803,11 +803,22 @@ static void vmbus_wait_for_unload(void)
|
|||||||
if (completion_done(&vmbus_connection.unload_event))
|
if (completion_done(&vmbus_connection.unload_event))
|
||||||
goto completed;
|
goto completed;
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
for_each_present_cpu(cpu) {
|
||||||
struct hv_per_cpu_context *hv_cpu
|
struct hv_per_cpu_context *hv_cpu
|
||||||
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In a CoCo VM the synic_message_page is not allocated
|
||||||
|
* in hv_synic_alloc(). Instead it is set/cleared in
|
||||||
|
* hv_synic_enable_regs() and hv_synic_disable_regs()
|
||||||
|
* such that it is set only when the CPU is online. If
|
||||||
|
* not all present CPUs are online, the message page
|
||||||
|
* might be NULL, so skip such CPUs.
|
||||||
|
*/
|
||||||
page_addr = hv_cpu->synic_message_page;
|
page_addr = hv_cpu->synic_message_page;
|
||||||
|
if (!page_addr)
|
||||||
|
continue;
|
||||||
|
|
||||||
msg = (struct hv_message *)page_addr
|
msg = (struct hv_message *)page_addr
|
||||||
+ VMBUS_MESSAGE_SINT;
|
+ VMBUS_MESSAGE_SINT;
|
||||||
|
|
||||||
@ -841,11 +852,14 @@ completed:
|
|||||||
* maybe-pending messages on all CPUs to be able to receive new
|
* maybe-pending messages on all CPUs to be able to receive new
|
||||||
* messages after we reconnect.
|
* messages after we reconnect.
|
||||||
*/
|
*/
|
||||||
for_each_online_cpu(cpu) {
|
for_each_present_cpu(cpu) {
|
||||||
struct hv_per_cpu_context *hv_cpu
|
struct hv_per_cpu_context *hv_cpu
|
||||||
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||||
|
|
||||||
page_addr = hv_cpu->synic_message_page;
|
page_addr = hv_cpu->synic_message_page;
|
||||||
|
if (!page_addr)
|
||||||
|
continue;
|
||||||
|
|
||||||
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
|
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
|
||||||
msg->header.message_type = HVMSG_NONE;
|
msg->header.message_type = HVMSG_NONE;
|
||||||
}
|
}
|
||||||
|
@ -215,8 +215,8 @@ static void lpi2c_imx_stop(struct lpi2c_imx_struct *lpi2c_imx)
|
|||||||
/* CLKLO = I2C_CLK_RATIO * CLKHI, SETHOLD = CLKHI, DATAVD = CLKHI/2 */
|
/* CLKLO = I2C_CLK_RATIO * CLKHI, SETHOLD = CLKHI, DATAVD = CLKHI/2 */
|
||||||
static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
|
static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
|
||||||
{
|
{
|
||||||
u8 prescale, filt, sethold, clkhi, clklo, datavd;
|
u8 prescale, filt, sethold, datavd;
|
||||||
unsigned int clk_rate, clk_cycle;
|
unsigned int clk_rate, clk_cycle, clkhi, clklo;
|
||||||
enum lpi2c_imx_pincfg pincfg;
|
enum lpi2c_imx_pincfg pincfg;
|
||||||
unsigned int temp;
|
unsigned int temp;
|
||||||
|
|
||||||
|
@ -1663,7 +1663,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
host->irq = platform_get_irq(pdev, 0);
|
host->irq = platform_get_irq(pdev, 0);
|
||||||
if (host->irq < 0) {
|
if (host->irq < 0) {
|
||||||
ret = -EINVAL;
|
ret = host->irq;
|
||||||
goto host_free;
|
goto host_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1348,7 +1348,7 @@ static int mmc_omap_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
irq = platform_get_irq(pdev, 0);
|
irq = platform_get_irq(pdev, 0);
|
||||||
if (irq < 0)
|
if (irq < 0)
|
||||||
return -ENXIO;
|
return irq;
|
||||||
|
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
host->virt_base = devm_ioremap_resource(&pdev->dev, res);
|
host->virt_base = devm_ioremap_resource(&pdev->dev, res);
|
||||||
|
@ -2023,9 +2023,11 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
irq = platform_get_irq(pdev, 0);
|
if (!res)
|
||||||
if (res == NULL || irq < 0)
|
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
irq = platform_get_irq(pdev, 0);
|
||||||
|
if (irq < 0)
|
||||||
|
return irq;
|
||||||
|
|
||||||
base = devm_ioremap_resource(&pdev->dev, res);
|
base = devm_ioremap_resource(&pdev->dev, res);
|
||||||
if (IS_ERR(base))
|
if (IS_ERR(base))
|
||||||
|
@ -1749,8 +1749,10 @@ static int usdhi6_probe(struct platform_device *pdev)
|
|||||||
irq_cd = platform_get_irq_byname(pdev, "card detect");
|
irq_cd = platform_get_irq_byname(pdev, "card detect");
|
||||||
irq_sd = platform_get_irq_byname(pdev, "data");
|
irq_sd = platform_get_irq_byname(pdev, "data");
|
||||||
irq_sdio = platform_get_irq_byname(pdev, "SDIO");
|
irq_sdio = platform_get_irq_byname(pdev, "SDIO");
|
||||||
if (irq_sd < 0 || irq_sdio < 0)
|
if (irq_sd < 0)
|
||||||
return -ENODEV;
|
return irq_sd;
|
||||||
|
if (irq_sdio < 0)
|
||||||
|
return irq_sdio;
|
||||||
|
|
||||||
mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev);
|
mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev);
|
||||||
if (!mmc)
|
if (!mmc)
|
||||||
|
@ -1129,8 +1129,8 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
|
|||||||
eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
|
eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
|
||||||
VLAN_ETH_HLEN : ETH_HLEN;
|
VLAN_ETH_HLEN : ETH_HLEN;
|
||||||
if (skb->len <= 60 &&
|
if (skb->len <= 60 &&
|
||||||
(lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
|
(lancer_chip(adapter) || BE3_chip(adapter) ||
|
||||||
is_ipv4_pkt(skb)) {
|
skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
|
||||||
ip = (struct iphdr *)ip_hdr(skb);
|
ip = (struct iphdr *)ip_hdr(skb);
|
||||||
pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
|
pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
|
||||||
}
|
}
|
||||||
|
@ -553,8 +553,7 @@ qcaspi_spi_thread(void *data)
|
|||||||
while (!kthread_should_stop()) {
|
while (!kthread_should_stop()) {
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
if ((qca->intr_req == qca->intr_svc) &&
|
if ((qca->intr_req == qca->intr_svc) &&
|
||||||
(qca->txr.skb[qca->txr.head] == NULL) &&
|
!qca->txr.skb[qca->txr.head])
|
||||||
(qca->sync == QCASPI_SYNC_READY))
|
|
||||||
schedule();
|
schedule();
|
||||||
|
|
||||||
set_current_state(TASK_RUNNING);
|
set_current_state(TASK_RUNNING);
|
||||||
|
@ -345,10 +345,6 @@ static struct dentry *nfcsim_debugfs_root;
|
|||||||
static void nfcsim_debugfs_init(void)
|
static void nfcsim_debugfs_init(void)
|
||||||
{
|
{
|
||||||
nfcsim_debugfs_root = debugfs_create_dir("nfcsim", NULL);
|
nfcsim_debugfs_root = debugfs_create_dir("nfcsim", NULL);
|
||||||
|
|
||||||
if (!nfcsim_debugfs_root)
|
|
||||||
pr_err("Could not create debugfs entry\n");
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nfcsim_debugfs_remove(void)
|
static void nfcsim_debugfs_remove(void)
|
||||||
|
@ -1357,6 +1357,7 @@ void ccw_device_set_notoper(struct ccw_device *cdev)
|
|||||||
enum io_sch_action {
|
enum io_sch_action {
|
||||||
IO_SCH_UNREG,
|
IO_SCH_UNREG,
|
||||||
IO_SCH_ORPH_UNREG,
|
IO_SCH_ORPH_UNREG,
|
||||||
|
IO_SCH_UNREG_CDEV,
|
||||||
IO_SCH_ATTACH,
|
IO_SCH_ATTACH,
|
||||||
IO_SCH_UNREG_ATTACH,
|
IO_SCH_UNREG_ATTACH,
|
||||||
IO_SCH_ORPH_ATTACH,
|
IO_SCH_ORPH_ATTACH,
|
||||||
@ -1389,7 +1390,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
|
|||||||
}
|
}
|
||||||
if ((sch->schib.pmcw.pam & sch->opm) == 0) {
|
if ((sch->schib.pmcw.pam & sch->opm) == 0) {
|
||||||
if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
|
if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
|
||||||
return IO_SCH_UNREG;
|
return IO_SCH_UNREG_CDEV;
|
||||||
return IO_SCH_DISC;
|
return IO_SCH_DISC;
|
||||||
}
|
}
|
||||||
if (device_is_disconnected(cdev))
|
if (device_is_disconnected(cdev))
|
||||||
@ -1451,6 +1452,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
|
|||||||
case IO_SCH_ORPH_ATTACH:
|
case IO_SCH_ORPH_ATTACH:
|
||||||
ccw_device_set_disconnected(cdev);
|
ccw_device_set_disconnected(cdev);
|
||||||
break;
|
break;
|
||||||
|
case IO_SCH_UNREG_CDEV:
|
||||||
case IO_SCH_UNREG_ATTACH:
|
case IO_SCH_UNREG_ATTACH:
|
||||||
case IO_SCH_UNREG:
|
case IO_SCH_UNREG:
|
||||||
if (!cdev)
|
if (!cdev)
|
||||||
@ -1484,6 +1486,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
|
|||||||
if (rc)
|
if (rc)
|
||||||
goto out;
|
goto out;
|
||||||
break;
|
break;
|
||||||
|
case IO_SCH_UNREG_CDEV:
|
||||||
case IO_SCH_UNREG_ATTACH:
|
case IO_SCH_UNREG_ATTACH:
|
||||||
spin_lock_irqsave(sch->lock, flags);
|
spin_lock_irqsave(sch->lock, flags);
|
||||||
if (cdev->private->flags.resuming) {
|
if (cdev->private->flags.resuming) {
|
||||||
|
@ -1067,6 +1067,7 @@ int iscsi_target_locate_portal(
|
|||||||
iscsi_target_set_sock_callbacks(conn);
|
iscsi_target_set_sock_callbacks(conn);
|
||||||
|
|
||||||
login->np = np;
|
login->np = np;
|
||||||
|
conn->tpg = NULL;
|
||||||
|
|
||||||
login_req = (struct iscsi_login_req *) login->req;
|
login_req = (struct iscsi_login_req *) login->req;
|
||||||
payload_length = ntoh24(login_req->dlength);
|
payload_length = ntoh24(login_req->dlength);
|
||||||
@ -1136,7 +1137,6 @@ int iscsi_target_locate_portal(
|
|||||||
*/
|
*/
|
||||||
sessiontype = strncmp(s_buf, DISCOVERY, 9);
|
sessiontype = strncmp(s_buf, DISCOVERY, 9);
|
||||||
if (!sessiontype) {
|
if (!sessiontype) {
|
||||||
conn->tpg = iscsit_global->discovery_tpg;
|
|
||||||
if (!login->leading_connection)
|
if (!login->leading_connection)
|
||||||
goto get_target;
|
goto get_target;
|
||||||
|
|
||||||
@ -1153,9 +1153,11 @@ int iscsi_target_locate_portal(
|
|||||||
* Serialize access across the discovery struct iscsi_portal_group to
|
* Serialize access across the discovery struct iscsi_portal_group to
|
||||||
* process login attempt.
|
* process login attempt.
|
||||||
*/
|
*/
|
||||||
|
conn->tpg = iscsit_global->discovery_tpg;
|
||||||
if (iscsit_access_np(np, conn->tpg) < 0) {
|
if (iscsit_access_np(np, conn->tpg) < 0) {
|
||||||
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
|
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
|
||||||
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
|
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
|
||||||
|
conn->tpg = NULL;
|
||||||
ret = -1;
|
ret = -1;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -263,6 +263,7 @@ lqasc_err_int(int irq, void *_port)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct uart_port *port = (struct uart_port *)_port;
|
struct uart_port *port = (struct uart_port *)_port;
|
||||||
spin_lock_irqsave(<q_asc_lock, flags);
|
spin_lock_irqsave(<q_asc_lock, flags);
|
||||||
|
ltq_w32(ASC_IRNCR_EIR, port->membase + LTQ_ASC_IRNCR);
|
||||||
/* clear any pending interrupts */
|
/* clear any pending interrupts */
|
||||||
ltq_w32_mask(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE |
|
ltq_w32_mask(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE |
|
||||||
ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE);
|
ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE);
|
||||||
|
@ -175,6 +175,9 @@ static int udc_pci_probe(
|
|||||||
retval = -ENODEV;
|
retval = -ENODEV;
|
||||||
goto err_probe;
|
goto err_probe;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
udc = dev;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_probe:
|
err_probe:
|
||||||
|
@ -1452,9 +1452,13 @@ static void init_imstt(struct fb_info *info)
|
|||||||
FBINFO_HWACCEL_FILLRECT |
|
FBINFO_HWACCEL_FILLRECT |
|
||||||
FBINFO_HWACCEL_YPAN;
|
FBINFO_HWACCEL_YPAN;
|
||||||
|
|
||||||
fb_alloc_cmap(&info->cmap, 0, 0);
|
if (fb_alloc_cmap(&info->cmap, 0, 0)) {
|
||||||
|
framebuffer_release(info);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
if (register_framebuffer(info) < 0) {
|
if (register_framebuffer(info) < 0) {
|
||||||
|
fb_dealloc_cmap(&info->cmap);
|
||||||
framebuffer_release(info);
|
framebuffer_release(info);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -381,7 +381,15 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
|
|||||||
struct page *page = pvec.pages[i];
|
struct page *page = pvec.pages[i];
|
||||||
|
|
||||||
lock_page(page);
|
lock_page(page);
|
||||||
nilfs_clear_dirty_page(page, silent);
|
|
||||||
|
/*
|
||||||
|
* This page may have been removed from the address
|
||||||
|
* space by truncation or invalidation when the lock
|
||||||
|
* was acquired. Skip processing in that case.
|
||||||
|
*/
|
||||||
|
if (likely(page->mapping == mapping))
|
||||||
|
nilfs_clear_dirty_page(page, silent);
|
||||||
|
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
}
|
}
|
||||||
pagevec_release(&pvec);
|
pagevec_release(&pvec);
|
||||||
|
@ -110,6 +110,12 @@ int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf)
|
|||||||
if (unlikely(!bh))
|
if (unlikely(!bh))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
lock_buffer(bh);
|
||||||
|
if (!buffer_uptodate(bh)) {
|
||||||
|
memset(bh->b_data, 0, bh->b_size);
|
||||||
|
set_buffer_uptodate(bh);
|
||||||
|
}
|
||||||
|
unlock_buffer(bh);
|
||||||
nilfs_segbuf_add_segsum_buffer(segbuf, bh);
|
nilfs_segbuf_add_segsum_buffer(segbuf, bh);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -993,10 +993,13 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
|
|||||||
unsigned int isz, srsz;
|
unsigned int isz, srsz;
|
||||||
|
|
||||||
bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
|
bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
|
||||||
|
|
||||||
|
lock_buffer(bh_sr);
|
||||||
raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
|
raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
|
||||||
isz = nilfs->ns_inode_size;
|
isz = nilfs->ns_inode_size;
|
||||||
srsz = NILFS_SR_BYTES(isz);
|
srsz = NILFS_SR_BYTES(isz);
|
||||||
|
|
||||||
|
raw_sr->sr_sum = 0; /* Ensure initialization within this update */
|
||||||
raw_sr->sr_bytes = cpu_to_le16(srsz);
|
raw_sr->sr_bytes = cpu_to_le16(srsz);
|
||||||
raw_sr->sr_nongc_ctime
|
raw_sr->sr_nongc_ctime
|
||||||
= cpu_to_le64(nilfs_doing_gc() ?
|
= cpu_to_le64(nilfs_doing_gc() ?
|
||||||
@ -1010,6 +1013,8 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
|
|||||||
nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
|
nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
|
||||||
NILFS_SR_SUFILE_OFFSET(isz), 1);
|
NILFS_SR_SUFILE_OFFSET(isz), 1);
|
||||||
memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
|
memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
|
||||||
|
set_buffer_uptodate(bh_sr);
|
||||||
|
unlock_buffer(bh_sr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nilfs_redirty_inodes(struct list_head *head)
|
static void nilfs_redirty_inodes(struct list_head *head)
|
||||||
@ -1787,6 +1792,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
|
|||||||
list_for_each_entry(segbuf, logs, sb_list) {
|
list_for_each_entry(segbuf, logs, sb_list) {
|
||||||
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
|
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
|
||||||
b_assoc_buffers) {
|
b_assoc_buffers) {
|
||||||
|
clear_buffer_uptodate(bh);
|
||||||
if (bh->b_page != bd_page) {
|
if (bh->b_page != bd_page) {
|
||||||
if (bd_page)
|
if (bd_page)
|
||||||
end_page_writeback(bd_page);
|
end_page_writeback(bd_page);
|
||||||
@ -1798,6 +1804,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
|
|||||||
b_assoc_buffers) {
|
b_assoc_buffers) {
|
||||||
clear_buffer_async_write(bh);
|
clear_buffer_async_write(bh);
|
||||||
if (bh == segbuf->sb_super_root) {
|
if (bh == segbuf->sb_super_root) {
|
||||||
|
clear_buffer_uptodate(bh);
|
||||||
if (bh->b_page != bd_page) {
|
if (bh->b_page != bd_page) {
|
||||||
end_page_writeback(bd_page);
|
end_page_writeback(bd_page);
|
||||||
bd_page = bh->b_page;
|
bd_page = bh->b_page;
|
||||||
|
@ -384,10 +384,31 @@ static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
nsbp = (void *)nsbh->b_data + offset;
|
nsbp = (void *)nsbh->b_data + offset;
|
||||||
memset(nsbp, 0, nilfs->ns_blocksize);
|
|
||||||
|
lock_buffer(nsbh);
|
||||||
|
if (sb2i >= 0) {
|
||||||
|
/*
|
||||||
|
* The position of the second superblock only changes by 4KiB,
|
||||||
|
* which is larger than the maximum superblock data size
|
||||||
|
* (= 1KiB), so there is no need to use memmove() to allow
|
||||||
|
* overlap between source and destination.
|
||||||
|
*/
|
||||||
|
memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Zero fill after copy to avoid overwriting in case of move
|
||||||
|
* within the same block.
|
||||||
|
*/
|
||||||
|
memset(nsbh->b_data, 0, offset);
|
||||||
|
memset((void *)nsbp + nilfs->ns_sbsize, 0,
|
||||||
|
nsbh->b_size - offset - nilfs->ns_sbsize);
|
||||||
|
} else {
|
||||||
|
memset(nsbh->b_data, 0, nsbh->b_size);
|
||||||
|
}
|
||||||
|
set_buffer_uptodate(nsbh);
|
||||||
|
unlock_buffer(nsbh);
|
||||||
|
|
||||||
if (sb2i >= 0) {
|
if (sb2i >= 0) {
|
||||||
memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize);
|
|
||||||
brelse(nilfs->ns_sbh[sb2i]);
|
brelse(nilfs->ns_sbh[sb2i]);
|
||||||
nilfs->ns_sbh[sb2i] = nsbh;
|
nilfs->ns_sbh[sb2i] = nsbh;
|
||||||
nilfs->ns_sbp[sb2i] = nsbp;
|
nilfs->ns_sbp[sb2i] = nsbp;
|
||||||
|
@ -384,6 +384,18 @@ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs)
|
|||||||
100));
|
100));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* nilfs_max_segment_count - calculate the maximum number of segments
|
||||||
|
* @nilfs: nilfs object
|
||||||
|
*/
|
||||||
|
static u64 nilfs_max_segment_count(struct the_nilfs *nilfs)
|
||||||
|
{
|
||||||
|
u64 max_count = U64_MAX;
|
||||||
|
|
||||||
|
do_div(max_count, nilfs->ns_blocks_per_segment);
|
||||||
|
return min_t(u64, max_count, ULONG_MAX);
|
||||||
|
}
|
||||||
|
|
||||||
void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
|
void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
|
||||||
{
|
{
|
||||||
nilfs->ns_nsegments = nsegs;
|
nilfs->ns_nsegments = nsegs;
|
||||||
@ -393,6 +405,8 @@ void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
|
|||||||
static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
|
static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
|
||||||
struct nilfs_super_block *sbp)
|
struct nilfs_super_block *sbp)
|
||||||
{
|
{
|
||||||
|
u64 nsegments, nblocks;
|
||||||
|
|
||||||
if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) {
|
if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) {
|
||||||
nilfs_msg(nilfs->ns_sb, KERN_ERR,
|
nilfs_msg(nilfs->ns_sb, KERN_ERR,
|
||||||
"unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).",
|
"unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).",
|
||||||
@ -439,7 +453,35 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments));
|
nsegments = le64_to_cpu(sbp->s_nsegments);
|
||||||
|
if (nsegments > nilfs_max_segment_count(nilfs)) {
|
||||||
|
nilfs_msg(nilfs->ns_sb, KERN_ERR,
|
||||||
|
"segment count %llu exceeds upper limit (%llu segments)",
|
||||||
|
(unsigned long long)nsegments,
|
||||||
|
(unsigned long long)nilfs_max_segment_count(nilfs));
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
nblocks = (u64)i_size_read(nilfs->ns_sb->s_bdev->bd_inode) >>
|
||||||
|
nilfs->ns_sb->s_blocksize_bits;
|
||||||
|
if (nblocks) {
|
||||||
|
u64 min_block_count = nsegments * nilfs->ns_blocks_per_segment;
|
||||||
|
/*
|
||||||
|
* To avoid failing to mount early device images without a
|
||||||
|
* second superblock, exclude that block count from the
|
||||||
|
* "min_block_count" calculation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (nblocks < min_block_count) {
|
||||||
|
nilfs_msg(nilfs->ns_sb, KERN_ERR,
|
||||||
|
"total number of segment blocks %llu exceeds device size (%llu blocks)",
|
||||||
|
(unsigned long long)min_block_count,
|
||||||
|
(unsigned long long)nblocks);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nilfs_set_nsegments(nilfs, nsegments);
|
||||||
nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
|
nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1617,7 +1617,7 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
|
|||||||
{
|
{
|
||||||
struct cgroup *dcgrp = &dst_root->cgrp;
|
struct cgroup *dcgrp = &dst_root->cgrp;
|
||||||
struct cgroup_subsys *ss;
|
struct cgroup_subsys *ss;
|
||||||
int ssid, i, ret;
|
int ssid, ret;
|
||||||
u16 dfl_disable_ss_mask = 0;
|
u16 dfl_disable_ss_mask = 0;
|
||||||
|
|
||||||
lockdep_assert_held(&cgroup_mutex);
|
lockdep_assert_held(&cgroup_mutex);
|
||||||
@ -1661,7 +1661,8 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
|
|||||||
struct cgroup_root *src_root = ss->root;
|
struct cgroup_root *src_root = ss->root;
|
||||||
struct cgroup *scgrp = &src_root->cgrp;
|
struct cgroup *scgrp = &src_root->cgrp;
|
||||||
struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
|
struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
|
||||||
struct css_set *cset;
|
struct css_set *cset, *cset_pos;
|
||||||
|
struct css_task_iter *it;
|
||||||
|
|
||||||
WARN_ON(!css || cgroup_css(dcgrp, ss));
|
WARN_ON(!css || cgroup_css(dcgrp, ss));
|
||||||
|
|
||||||
@ -1679,9 +1680,22 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
|
|||||||
css->cgroup = dcgrp;
|
css->cgroup = dcgrp;
|
||||||
|
|
||||||
spin_lock_irq(&css_set_lock);
|
spin_lock_irq(&css_set_lock);
|
||||||
hash_for_each(css_set_table, i, cset, hlist)
|
WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
|
||||||
|
list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
|
||||||
|
e_cset_node[ss->id]) {
|
||||||
list_move_tail(&cset->e_cset_node[ss->id],
|
list_move_tail(&cset->e_cset_node[ss->id],
|
||||||
&dcgrp->e_csets[ss->id]);
|
&dcgrp->e_csets[ss->id]);
|
||||||
|
/*
|
||||||
|
* all css_sets of scgrp together in same order to dcgrp,
|
||||||
|
* patch in-flight iterators to preserve correct iteration.
|
||||||
|
* since the iterator is always advanced right away and
|
||||||
|
* finished when it->cset_pos meets it->cset_head, so only
|
||||||
|
* update it->cset_head is enough here.
|
||||||
|
*/
|
||||||
|
list_for_each_entry(it, &cset->task_iters, iters_node)
|
||||||
|
if (it->cset_head == &scgrp->e_csets[ss->id])
|
||||||
|
it->cset_head = &dcgrp->e_csets[ss->id];
|
||||||
|
}
|
||||||
spin_unlock_irq(&css_set_lock);
|
spin_unlock_irq(&css_set_lock);
|
||||||
|
|
||||||
/* default hierarchy doesn't enable controllers by default */
|
/* default hierarchy doesn't enable controllers by default */
|
||||||
|
@ -268,6 +268,9 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
|
|||||||
|
|
||||||
secpath_reset(skb);
|
secpath_reset(skb);
|
||||||
|
|
||||||
|
if (skb_needs_linearize(skb, skb->dev->features) &&
|
||||||
|
__skb_linearize(skb))
|
||||||
|
return -ENOMEM;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -304,6 +304,9 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
|
|||||||
|
|
||||||
secpath_reset(skb);
|
secpath_reset(skb);
|
||||||
|
|
||||||
|
if (skb_needs_linearize(skb, skb->dev->features) &&
|
||||||
|
__skb_linearize(skb))
|
||||||
|
return -ENOMEM;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4220,7 +4220,8 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
|
|||||||
return PTR_ERR(set);
|
return PTR_ERR(set);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
|
if (!list_empty(&set->bindings) &&
|
||||||
|
(set->flags & (NFT_SET_CONSTANT | NFT_SET_ANONYMOUS)))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
|
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
|
||||||
@ -4399,7 +4400,9 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
|
|||||||
genmask);
|
genmask);
|
||||||
if (IS_ERR(set))
|
if (IS_ERR(set))
|
||||||
return PTR_ERR(set);
|
return PTR_ERR(set);
|
||||||
if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
|
|
||||||
|
if (!list_empty(&set->bindings) &&
|
||||||
|
(set->flags & (NFT_SET_CONSTANT | NFT_SET_ANONYMOUS)))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) {
|
if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user