mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
Merge android-4.14.128 (334aa9b) into msm-4.14
* refs/heads/tmp-334aa9b: Linux 4.14.128 rtc: pcf8523: don't return invalid date when battery is low x86/kasan: Fix boot with 5-level paging and KASAN x86/microcode, cpuhotplug: Add a microcode loader CPU hotplug callback RAS/CEC: Fix binary search function USB: serial: option: add Telit 0x1260 and 0x1261 compositions USB: serial: option: add support for Simcom SIM7500/SIM7600 RNDIS mode USB: serial: pl2303: add Allied Telesis VT-Kit3 USB: usb-storage: Add new ID to ums-realtek USB: Fix chipmunk-like voice when using Logitech C270 for recording audio. usb: dwc2: host: Fix wMaxPacketSize handling (fix webcam regression) usb: dwc2: Fix DMA cache alignment issues drm/vmwgfx: NULL pointer dereference from vmw_cmd_dx_view_define() drm/vmwgfx: integer underflow in vmw_cmd_dx_set_shader() leading to an invalid read KVM: s390: fix memory slot handling for KVM_SET_USER_MEMORY_REGION KVM: x86/pmu: do not mask the value that is written to fixed PMUs usbnet: ipheth: fix racing condition selftests/timers: Add missing fflush(stdout) calls libnvdimm: Fix compilation warnings with W=1 scsi: bnx2fc: fix incorrect cast to u64 on shift operation platform/x86: pmc_atom: Add several Beckhoff Automation boards to critclk_systems DMI table platform/x86: pmc_atom: Add Lex 3I380D industrial PC to critclk_systems DMI table nvme: remove the ifdef around nvme_nvm_ioctl arm64/mm: Inhibit huge-vmap with ptdump scsi: lpfc: add check for loss of ndlp when sending RRQ scsi: qedi: remove set but not used variables 'cdev' and 'udev' scsi: qedi: remove memset/memcpy to nfunc and use func instead Drivers: misc: fix out-of-bounds access in function param_set_kgdbts_var s390/kasan: fix strncpy_from_user kasan checks Revert "ALSA: seq: Protect in-kernel ioctl calls with mutex" ALSA: seq: Fix race of get-subscription call vs port-delete ioctls ALSA: seq: Protect in-kernel ioctl calls with mutex x86/uaccess, kcov: Disable stack protector drm/i915/sdvo: Implement proper HDMI audio support for SDVO ASoC: fsl_asrc: Fix the issue about unsupported rate ASoC: cs42xx8: Add regcache mask dirty cgroup: Use css_tryget() instead of css_tryget_online() in task_get_css() bcache: fix stack corruption by PRECEDING_KEY() i2c: acorn: fix i2c warning iommu/arm-smmu: Avoid constant zero in TLBI writes media: v4l2-ioctl: clear fields in s_parm ptrace: restore smp_rmb() in __ptrace_may_access() signal/ptrace: Don't leak unitialized kernel memory with PTRACE_PEEK_SIGINFO mm/vmscan.c: fix trying to reclaim unevictable LRU page fs/ocfs2: fix race in ocfs2_dentry_attach_lock() mm/list_lru.c: fix memory leak in __memcg_init_list_lru_node libata: Extend quirks for the ST1000LM024 drives with NOLPM quirk ALSA: firewire-motu: fix destruction of data for isochronous resources ALSA: hda/realtek - Update headset mode for ALC256 ALSA: oxfw: allow PCM capture for Stanton SCS.1m HID: wacom: Sync INTUOSP2_BT touch state after each frame if necessary HID: wacom: Correct button numbering 2nd-gen Intuos Pro over Bluetooth nouveau: Fix build with CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT disabled drm/nouveau: add kconfig option to turn off nouveau legacy contexts. (v3) ANDROID: kernel: cgroup: cpuset: Clear cpus_requested for empty buf ANDROID: kernel: cgroup: cpuset: Add missing allocation of cpus_requested in alloc_trial_cpuset BACKPORT: security: Implement Clang's stack initialization BACKPORT: security: Create "kernel hardening" config area Change-Id: I486d2f64b7c34a2d23bde24b7c8c01caae6a1611 Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
This commit is contained in:
commit
d6c7df0d69
7
Makefile
7
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 127
|
||||
SUBLEVEL = 128
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
@ -784,6 +784,11 @@ KBUILD_CFLAGS += -fomit-frame-pointer
|
||||
endif
|
||||
endif
|
||||
|
||||
# Initialize all stack variables with a pattern, if desired.
|
||||
ifdef CONFIG_INIT_STACK_ALL
|
||||
KBUILD_CFLAGS += -ftrivial-auto-var-init=pattern
|
||||
endif
|
||||
|
||||
KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
|
||||
|
||||
ifdef CONFIG_DEBUG_INFO
|
||||
|
@ -1350,13 +1350,18 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
|
||||
|
||||
int __init arch_ioremap_pud_supported(void)
|
||||
{
|
||||
/* only 4k granule supports level 1 block mappings */
|
||||
return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
|
||||
/*
|
||||
* Only 4k granule supports level 1 block mappings.
|
||||
* SW table walks can't handle removal of intermediate entries.
|
||||
*/
|
||||
return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
|
||||
!IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
|
||||
}
|
||||
|
||||
int __init arch_ioremap_pmd_supported(void)
|
||||
{
|
||||
return 1;
|
||||
/* See arch_ioremap_pud_supported() */
|
||||
return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
|
||||
}
|
||||
|
||||
int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
|
||||
|
@ -67,8 +67,10 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
unsigned long __must_check
|
||||
raw_copy_to_user(void __user *to, const void *from, unsigned long n);
|
||||
|
||||
#ifndef CONFIG_KASAN
|
||||
#define INLINE_COPY_FROM_USER
|
||||
#define INLINE_COPY_TO_USER
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
|
||||
|
||||
|
@ -3913,21 +3913,28 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
const struct kvm_memory_slot *new,
|
||||
enum kvm_mr_change change)
|
||||
{
|
||||
int rc;
|
||||
int rc = 0;
|
||||
|
||||
/* If the basics of the memslot do not change, we do not want
|
||||
* to update the gmap. Every update causes several unnecessary
|
||||
* segment translation exceptions. This is usually handled just
|
||||
* fine by the normal fault handler + gmap, but it will also
|
||||
* cause faults on the prefix page of running guest CPUs.
|
||||
*/
|
||||
if (old->userspace_addr == mem->userspace_addr &&
|
||||
old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
|
||||
old->npages * PAGE_SIZE == mem->memory_size)
|
||||
return;
|
||||
|
||||
rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
|
||||
mem->guest_phys_addr, mem->memory_size);
|
||||
switch (change) {
|
||||
case KVM_MR_DELETE:
|
||||
rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
|
||||
old->npages * PAGE_SIZE);
|
||||
break;
|
||||
case KVM_MR_MOVE:
|
||||
rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
|
||||
old->npages * PAGE_SIZE);
|
||||
if (rc)
|
||||
break;
|
||||
/* FALLTHROUGH */
|
||||
case KVM_MR_CREATE:
|
||||
rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
|
||||
mem->guest_phys_addr, mem->memory_size);
|
||||
break;
|
||||
case KVM_MR_FLAGS_ONLY:
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
|
||||
}
|
||||
if (rc)
|
||||
pr_warn("failed to commit memory region\n");
|
||||
return;
|
||||
|
@ -873,7 +873,7 @@ int __init microcode_init(void)
|
||||
goto out_ucode_group;
|
||||
|
||||
register_syscore_ops(&mc_syscore_ops);
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online",
|
||||
mc_cpu_online, mc_cpu_down_prep);
|
||||
|
||||
pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
|
||||
|
@ -235,11 +235,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
|
||||
(pmc = get_fixed_pmc(pmu, msr))) {
|
||||
if (!msr_info->host_initiated)
|
||||
data = (s64)(s32)data;
|
||||
pmc->counter += data - pmc_read_counter(pmc);
|
||||
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
|
||||
if (msr_info->host_initiated)
|
||||
pmc->counter = data;
|
||||
else
|
||||
pmc->counter = (s32)data;
|
||||
return 0;
|
||||
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
|
||||
pmc->counter = data;
|
||||
return 0;
|
||||
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
|
||||
if (data == pmc->eventsel)
|
||||
|
@ -194,7 +194,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
|
||||
if (!IS_ENABLED(CONFIG_X86_5LEVEL))
|
||||
return (p4d_t *)pgd;
|
||||
|
||||
p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
|
||||
p4d = pgd_val(*pgd) & PTE_PFN_MASK;
|
||||
p4d += __START_KERNEL_map - phys_base;
|
||||
return (p4d_t *)p4d + p4d_index(addr);
|
||||
}
|
||||
|
@ -4472,9 +4472,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
|
||||
ATA_HORKAGE_FIRMWARE_WARN },
|
||||
|
||||
/* drives which fail FPDMA_AA activation (some may freeze afterwards) */
|
||||
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
||||
{ "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
||||
/* drives which fail FPDMA_AA activation (some may freeze afterwards)
|
||||
the ST disks also have LPM issues */
|
||||
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA |
|
||||
ATA_HORKAGE_NOLPM, },
|
||||
{ "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA |
|
||||
ATA_HORKAGE_NOLPM, },
|
||||
{ "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
||||
|
||||
/* Blacklist entries taken from Silicon Image 3124/3132
|
||||
|
@ -928,6 +928,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
|
||||
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
|
||||
}
|
||||
|
||||
static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo,
|
||||
u8 audio_state)
|
||||
{
|
||||
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_AUDIO_STAT,
|
||||
&audio_state, 1);
|
||||
}
|
||||
|
||||
#if 0
|
||||
static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
|
||||
{
|
||||
@ -1359,11 +1366,6 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
|
||||
else
|
||||
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
|
||||
|
||||
if (crtc_state->has_audio) {
|
||||
WARN_ON_ONCE(INTEL_GEN(dev_priv) < 4);
|
||||
sdvox |= SDVO_AUDIO_ENABLE;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 4) {
|
||||
/* done in crtc_mode_set as the dpll_md reg must be written early */
|
||||
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
|
||||
@ -1492,8 +1494,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
|
||||
if (sdvox & HDMI_COLOR_RANGE_16_235)
|
||||
pipe_config->limited_color_range = true;
|
||||
|
||||
if (sdvox & SDVO_AUDIO_ENABLE)
|
||||
pipe_config->has_audio = true;
|
||||
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT,
|
||||
&val, 1)) {
|
||||
u8 mask = SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT;
|
||||
|
||||
if ((val & mask) == mask)
|
||||
pipe_config->has_audio = true;
|
||||
}
|
||||
|
||||
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
|
||||
&val, 1)) {
|
||||
@ -1506,6 +1513,32 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
|
||||
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
|
||||
}
|
||||
|
||||
static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo)
|
||||
{
|
||||
intel_sdvo_set_audio_state(intel_sdvo, 0);
|
||||
}
|
||||
|
||||
static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->base.adjusted_mode;
|
||||
struct drm_connector *connector = conn_state->connector;
|
||||
u8 *eld = connector->eld;
|
||||
|
||||
eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
|
||||
|
||||
intel_sdvo_set_audio_state(intel_sdvo, 0);
|
||||
|
||||
intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD,
|
||||
SDVO_HBUF_TX_DISABLED,
|
||||
eld, drm_eld_size(eld));
|
||||
|
||||
intel_sdvo_set_audio_state(intel_sdvo, SDVO_AUDIO_ELD_VALID |
|
||||
SDVO_AUDIO_PRESENCE_DETECT);
|
||||
}
|
||||
|
||||
static void intel_disable_sdvo(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *old_crtc_state,
|
||||
struct drm_connector_state *conn_state)
|
||||
@ -1515,6 +1548,9 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
u32 temp;
|
||||
|
||||
if (old_crtc_state->has_audio)
|
||||
intel_sdvo_disable_audio(intel_sdvo);
|
||||
|
||||
intel_sdvo_set_active_outputs(intel_sdvo, 0);
|
||||
if (0)
|
||||
intel_sdvo_set_encoder_power_state(intel_sdvo,
|
||||
@ -1598,6 +1634,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
|
||||
intel_sdvo_set_encoder_power_state(intel_sdvo,
|
||||
DRM_MODE_DPMS_ON);
|
||||
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
|
||||
|
||||
if (pipe_config->has_audio)
|
||||
intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
@ -2468,7 +2507,6 @@ static bool
|
||||
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_sdvo->base.base;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct drm_connector *connector;
|
||||
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
|
||||
struct intel_connector *intel_connector;
|
||||
@ -2504,9 +2542,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
|
||||
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
|
||||
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
|
||||
|
||||
/* gen3 doesn't do the hdmi bits in the SDVO register */
|
||||
if (INTEL_GEN(dev_priv) >= 4 &&
|
||||
intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
|
||||
if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
|
||||
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
|
||||
intel_sdvo->is_hdmi = true;
|
||||
}
|
||||
|
@ -707,6 +707,9 @@ struct intel_sdvo_enhancements_arg {
|
||||
#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
|
||||
#define SDVO_CMD_SET_AUDIO_STAT 0x91
|
||||
#define SDVO_CMD_GET_AUDIO_STAT 0x92
|
||||
#define SDVO_AUDIO_ELD_VALID (1 << 0)
|
||||
#define SDVO_AUDIO_PRESENCE_DETECT (1 << 1)
|
||||
#define SDVO_AUDIO_CP_READY (1 << 2)
|
||||
#define SDVO_CMD_SET_HBUF_INDEX 0x93
|
||||
#define SDVO_HBUF_INDEX_ELD 0
|
||||
#define SDVO_HBUF_INDEX_AVI_IF 1
|
||||
|
@ -16,10 +16,21 @@ config DRM_NOUVEAU
|
||||
select INPUT if ACPI && X86
|
||||
select THERMAL if ACPI && X86
|
||||
select ACPI_VIDEO if ACPI && X86
|
||||
select DRM_VM
|
||||
help
|
||||
Choose this option for open-source NVIDIA support.
|
||||
|
||||
config NOUVEAU_LEGACY_CTX_SUPPORT
|
||||
bool "Nouveau legacy context support"
|
||||
depends on DRM_NOUVEAU
|
||||
select DRM_VM
|
||||
default y
|
||||
help
|
||||
There was a version of the nouveau DDX that relied on legacy
|
||||
ctx ioctls not erroring out. But that was back in time a long
|
||||
ways, so offer a way to disable it now. For uapi compat with
|
||||
old nouveau ddx this should be on by default, but modern distros
|
||||
should consider turning it off.
|
||||
|
||||
config NOUVEAU_PLATFORM_DRIVER
|
||||
bool "Nouveau (NVIDIA) SoC GPUs"
|
||||
depends on DRM_NOUVEAU && ARCH_TEGRA
|
||||
|
@ -967,8 +967,11 @@ nouveau_driver_fops = {
|
||||
static struct drm_driver
|
||||
driver_stub = {
|
||||
.driver_features =
|
||||
DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
|
||||
DRIVER_KMS_LEGACY_CONTEXT,
|
||||
DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER
|
||||
#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
|
||||
| DRIVER_KMS_LEGACY_CONTEXT
|
||||
#endif
|
||||
,
|
||||
|
||||
.load = nouveau_drm_load,
|
||||
.unload = nouveau_drm_unload,
|
||||
|
@ -273,7 +273,11 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
|
||||
|
||||
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
|
||||
#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
|
||||
return drm_legacy_mmap(filp, vma);
|
||||
#else
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
||||
return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
|
||||
}
|
||||
|
@ -2495,7 +2495,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
|
||||
|
||||
cmd = container_of(header, typeof(*cmd), header);
|
||||
|
||||
if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
|
||||
if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
|
||||
cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
|
||||
DRM_ERROR("Illegal shader type %u.\n",
|
||||
(unsigned) cmd->body.type);
|
||||
return -EINVAL;
|
||||
@ -2734,6 +2735,10 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
|
||||
if (view_type == vmw_view_max)
|
||||
return -EINVAL;
|
||||
cmd = container_of(header, typeof(*cmd), header);
|
||||
if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
|
||||
DRM_ERROR("Invalid surface id.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
||||
user_surface_converter,
|
||||
&cmd->sid, &srf_node);
|
||||
|
@ -1328,11 +1328,17 @@ static void wacom_intuos_pro2_bt_touch(struct wacom_wac *wacom)
|
||||
if (wacom->num_contacts_left <= 0) {
|
||||
wacom->num_contacts_left = 0;
|
||||
wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom);
|
||||
input_sync(touch_input);
|
||||
}
|
||||
}
|
||||
|
||||
input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
|
||||
input_sync(touch_input);
|
||||
if (wacom->num_contacts_left == 0) {
|
||||
// Be careful that we don't accidentally call input_sync with
|
||||
// only a partial set of fingers of processed
|
||||
input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
|
||||
input_sync(touch_input);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
|
||||
@ -1340,7 +1346,7 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
|
||||
struct input_dev *pad_input = wacom->pad_input;
|
||||
unsigned char *data = wacom->data;
|
||||
|
||||
int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
|
||||
int buttons = data[282] | ((data[281] & 0x40) << 2);
|
||||
int ring = data[285] & 0x7F;
|
||||
bool ringstatus = data[285] & 0x80;
|
||||
bool prox = buttons || ringstatus;
|
||||
@ -3650,7 +3656,7 @@ static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group)
|
||||
static bool wacom_is_led_toggled(struct wacom *wacom, int button_count,
|
||||
int mask, int group)
|
||||
{
|
||||
int button_per_group;
|
||||
int group_button;
|
||||
|
||||
/*
|
||||
* 21UX2 has LED group 1 to the left and LED group 0
|
||||
@ -3660,9 +3666,12 @@ static bool wacom_is_led_toggled(struct wacom *wacom, int button_count,
|
||||
if (wacom->wacom_wac.features.type == WACOM_21UX2)
|
||||
group = 1 - group;
|
||||
|
||||
button_per_group = button_count/wacom->led.count;
|
||||
group_button = group * (button_count/wacom->led.count);
|
||||
|
||||
return mask & (1 << (group * button_per_group));
|
||||
if (wacom->wacom_wac.features.type == INTUOSP2_BT)
|
||||
group_button = 8;
|
||||
|
||||
return mask & (1 << group_button);
|
||||
}
|
||||
|
||||
static void wacom_update_led(struct wacom *wacom, int button_count, int mask,
|
||||
|
@ -83,6 +83,7 @@ static struct i2c_algo_bit_data ioc_data = {
|
||||
|
||||
static struct i2c_adapter ioc_ops = {
|
||||
.nr = 0,
|
||||
.name = "ioc",
|
||||
.algo_data = &ioc_data,
|
||||
};
|
||||
|
||||
|
@ -65,6 +65,15 @@
|
||||
#include "io-pgtable.h"
|
||||
#include "arm-smmu-regs.h"
|
||||
|
||||
/*
|
||||
* Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
|
||||
* global register space are still, in fact, using a hypervisor to mediate it
|
||||
* by trapping and emulating register accesses. Sadly, some deployed versions
|
||||
* of said trapping code have bugs wherein they go horribly wrong for stores
|
||||
* using r31 (i.e. XZR/WZR) as the source register.
|
||||
*/
|
||||
#define QCOM_DUMMY_VAL -1
|
||||
|
||||
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
|
||||
|
||||
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
|
||||
@ -1102,7 +1111,7 @@ static int __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
|
||||
unsigned int spin_cnt, delay;
|
||||
u32 sync_inv_ack, tbu_pwr_status, sync_inv_progress;
|
||||
|
||||
writel_relaxed(0, sync);
|
||||
writel_relaxed(QCOM_DUMMY_VAL, sync);
|
||||
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
|
||||
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
|
||||
if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
|
||||
@ -4057,8 +4066,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
||||
}
|
||||
|
||||
/* Invalidate the TLB, just in case */
|
||||
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
|
||||
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
|
||||
writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH);
|
||||
writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
|
||||
|
||||
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
|
||||
|
||||
|
@ -825,12 +825,22 @@ unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
|
||||
struct bset *i = bset_tree_last(b)->data;
|
||||
struct bkey *m, *prev = NULL;
|
||||
struct btree_iter iter;
|
||||
struct bkey preceding_key_on_stack = ZERO_KEY;
|
||||
struct bkey *preceding_key_p = &preceding_key_on_stack;
|
||||
|
||||
BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
|
||||
|
||||
m = bch_btree_iter_init(b, &iter, b->ops->is_extents
|
||||
? PRECEDING_KEY(&START_KEY(k))
|
||||
: PRECEDING_KEY(k));
|
||||
/*
|
||||
* If k has preceding key, preceding_key_p will be set to address
|
||||
* of k's preceding key; otherwise preceding_key_p will be set
|
||||
* to NULL inside preceding_key().
|
||||
*/
|
||||
if (b->ops->is_extents)
|
||||
preceding_key(&START_KEY(k), &preceding_key_p);
|
||||
else
|
||||
preceding_key(k, &preceding_key_p);
|
||||
|
||||
m = bch_btree_iter_init(b, &iter, preceding_key_p);
|
||||
|
||||
if (b->ops->insert_fixup(b, k, &iter, replace_key))
|
||||
return status;
|
||||
|
@ -418,20 +418,26 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
|
||||
return __bch_cut_back(where, k);
|
||||
}
|
||||
|
||||
#define PRECEDING_KEY(_k) \
|
||||
({ \
|
||||
struct bkey *_ret = NULL; \
|
||||
\
|
||||
if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
|
||||
_ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
|
||||
\
|
||||
if (!_ret->low) \
|
||||
_ret->high--; \
|
||||
_ret->low--; \
|
||||
} \
|
||||
\
|
||||
_ret; \
|
||||
})
|
||||
/*
|
||||
* Pointer '*preceding_key_p' points to a memory object to store preceding
|
||||
* key of k. If the preceding key does not exist, set '*preceding_key_p' to
|
||||
* NULL. So the caller of preceding_key() needs to take care of memory
|
||||
* which '*preceding_key_p' pointed to before calling preceding_key().
|
||||
* Currently the only caller of preceding_key() is bch_btree_insert_key(),
|
||||
* and it points to an on-stack variable, so the memory release is handled
|
||||
* by stackframe itself.
|
||||
*/
|
||||
static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p)
|
||||
{
|
||||
if (KEY_INODE(k) || KEY_OFFSET(k)) {
|
||||
(**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0);
|
||||
if (!(*preceding_key_p)->low)
|
||||
(*preceding_key_p)->high--;
|
||||
(*preceding_key_p)->low--;
|
||||
} else {
|
||||
(*preceding_key_p) = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
|
||||
{
|
||||
|
@ -2032,7 +2032,22 @@ static int v4l_s_parm(const struct v4l2_ioctl_ops *ops,
|
||||
struct v4l2_streamparm *p = arg;
|
||||
int ret = check_fmt(file, p->type);
|
||||
|
||||
return ret ? ret : ops->vidioc_s_parm(file, fh, p);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Note: extendedmode is never used in drivers */
|
||||
if (V4L2_TYPE_IS_OUTPUT(p->type)) {
|
||||
memset(p->parm.output.reserved, 0,
|
||||
sizeof(p->parm.output.reserved));
|
||||
p->parm.output.extendedmode = 0;
|
||||
p->parm.output.outputmode &= V4L2_MODE_HIGHQUALITY;
|
||||
} else {
|
||||
memset(p->parm.capture.reserved, 0,
|
||||
sizeof(p->parm.capture.reserved));
|
||||
p->parm.capture.extendedmode = 0;
|
||||
p->parm.capture.capturemode &= V4L2_MODE_HIGHQUALITY;
|
||||
}
|
||||
return ops->vidioc_s_parm(file, fh, p);
|
||||
}
|
||||
|
||||
static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
|
||||
|
@ -1135,7 +1135,7 @@ static void kgdbts_put_char(u8 chr)
|
||||
static int param_set_kgdbts_var(const char *kmessage,
|
||||
const struct kernel_param *kp)
|
||||
{
|
||||
int len = strlen(kmessage);
|
||||
size_t len = strlen(kmessage);
|
||||
|
||||
if (len >= MAX_CONFIG_LEN) {
|
||||
printk(KERN_ERR "kgdbts: config string too long\n");
|
||||
@ -1155,7 +1155,7 @@ static int param_set_kgdbts_var(const char *kmessage,
|
||||
|
||||
strcpy(config, kmessage);
|
||||
/* Chop out \n char as a result of echo */
|
||||
if (config[len - 1] == '\n')
|
||||
if (len && config[len - 1] == '\n')
|
||||
config[len - 1] = '\0';
|
||||
|
||||
/* Go and configure with the new params. */
|
||||
|
@ -437,17 +437,18 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
|
||||
dev);
|
||||
dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
|
||||
|
||||
netif_stop_queue(net);
|
||||
retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC);
|
||||
if (retval) {
|
||||
dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n",
|
||||
__func__, retval);
|
||||
dev->net->stats.tx_errors++;
|
||||
dev_kfree_skb_any(skb);
|
||||
netif_wake_queue(net);
|
||||
} else {
|
||||
dev->net->stats.tx_packets++;
|
||||
dev->net->stats.tx_bytes += skb->len;
|
||||
dev_consume_skb_any(skb);
|
||||
netif_stop_queue(net);
|
||||
}
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
@ -608,7 +608,7 @@ static struct attribute *nd_device_attributes[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
/**
|
||||
/*
|
||||
* nd_device_attribute_group - generic attributes for all devices on an nd bus
|
||||
*/
|
||||
struct attribute_group nd_device_attribute_group = {
|
||||
@ -637,7 +637,7 @@ static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a,
|
||||
return a->mode;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* nd_numa_attribute_group - NUMA attributes for all devices on an nd bus
|
||||
*/
|
||||
struct attribute_group nd_numa_attribute_group = {
|
||||
|
@ -25,6 +25,8 @@ static guid_t nvdimm_btt2_guid;
|
||||
static guid_t nvdimm_pfn_guid;
|
||||
static guid_t nvdimm_dax_guid;
|
||||
|
||||
static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
|
||||
|
||||
static u32 best_seq(u32 a, u32 b)
|
||||
{
|
||||
a &= NSINDEX_SEQ_MASK;
|
||||
|
@ -38,8 +38,6 @@ enum {
|
||||
ND_NSINDEX_INIT = 0x1,
|
||||
};
|
||||
|
||||
static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
|
||||
|
||||
/**
|
||||
* struct nd_namespace_index - label set superblock
|
||||
* @sig: NAMESPACE_INDEX\0
|
||||
|
@ -1042,10 +1042,8 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
case NVME_IOCTL_SUBMIT_IO:
|
||||
return nvme_submit_io(ns, (void __user *)arg);
|
||||
default:
|
||||
#ifdef CONFIG_NVM
|
||||
if (ns->ndev)
|
||||
return nvme_nvm_ioctl(ns, cmd, arg);
|
||||
#endif
|
||||
if (is_sed_ioctl(cmd))
|
||||
return sed_ioctl(ns->ctrl->opal_dev, cmd,
|
||||
(void __user *) arg);
|
||||
|
@ -428,12 +428,45 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc)
|
||||
*/
|
||||
static const struct dmi_system_id critclk_systems[] = {
|
||||
{
|
||||
/* pmc_plt_clk0 is used for an external HSIC USB HUB */
|
||||
.ident = "MPL CEC1x",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* pmc_plt_clk0 - 3 are used for the 4 ethernet controllers */
|
||||
.ident = "Lex 3I380D",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "3I380D"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* pmc_plt_clk* - are used for ethernet controllers */
|
||||
.ident = "Beckhoff CB3163",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* pmc_plt_clk* - are used for ethernet controllers */
|
||||
.ident = "Beckhoff CB6263",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "CB6263"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* pmc_plt_clk* - are used for ethernet controllers */
|
||||
.ident = "Beckhoff CB6363",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "CB6363"),
|
||||
},
|
||||
},
|
||||
{ /*sentinel*/ }
|
||||
};
|
||||
|
||||
|
@ -185,32 +185,38 @@ static void cec_timer_fn(unsigned long data)
|
||||
*/
|
||||
static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to)
|
||||
{
|
||||
int min = 0, max = ca->n - 1;
|
||||
u64 this_pfn;
|
||||
int min = 0, max = ca->n;
|
||||
|
||||
while (min < max) {
|
||||
int tmp = (max + min) >> 1;
|
||||
while (min <= max) {
|
||||
int i = (min + max) >> 1;
|
||||
|
||||
this_pfn = PFN(ca->array[tmp]);
|
||||
this_pfn = PFN(ca->array[i]);
|
||||
|
||||
if (this_pfn < pfn)
|
||||
min = tmp + 1;
|
||||
min = i + 1;
|
||||
else if (this_pfn > pfn)
|
||||
max = tmp;
|
||||
else {
|
||||
min = tmp;
|
||||
break;
|
||||
max = i - 1;
|
||||
else if (this_pfn == pfn) {
|
||||
if (to)
|
||||
*to = i;
|
||||
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* When the loop terminates without finding @pfn, min has the index of
|
||||
* the element slot where the new @pfn should be inserted. The loop
|
||||
* terminates when min > max, which means the min index points to the
|
||||
* bigger element while the max index to the smaller element, in-between
|
||||
* which the new @pfn belongs to.
|
||||
*
|
||||
* For more details, see exercise 1, Section 6.2.1 in TAOCP, vol. 3.
|
||||
*/
|
||||
if (to)
|
||||
*to = min;
|
||||
|
||||
this_pfn = PFN(ca->array[min]);
|
||||
|
||||
if (this_pfn == pfn)
|
||||
return min;
|
||||
|
||||
return -ENOKEY;
|
||||
}
|
||||
|
||||
|
@ -82,6 +82,18 @@ static int pcf8523_write(struct i2c_client *client, u8 reg, u8 value)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pcf8523_voltage_low(struct i2c_client *client)
|
||||
{
|
||||
u8 value;
|
||||
int err;
|
||||
|
||||
err = pcf8523_read(client, REG_CONTROL3, &value);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
return !!(value & REG_CONTROL3_BLF);
|
||||
}
|
||||
|
||||
static int pcf8523_select_capacitance(struct i2c_client *client, bool high)
|
||||
{
|
||||
u8 value;
|
||||
@ -164,6 +176,14 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm)
|
||||
struct i2c_msg msgs[2];
|
||||
int err;
|
||||
|
||||
err = pcf8523_voltage_low(client);
|
||||
if (err < 0) {
|
||||
return err;
|
||||
} else if (err > 0) {
|
||||
dev_err(dev, "low voltage detected, time is unreliable\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
msgs[0].addr = client->addr;
|
||||
msgs[0].flags = 0;
|
||||
msgs[0].len = 1;
|
||||
@ -248,17 +268,13 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct i2c_client *client = to_i2c_client(dev);
|
||||
u8 value;
|
||||
int ret = 0, err;
|
||||
int ret;
|
||||
|
||||
switch (cmd) {
|
||||
case RTC_VL_READ:
|
||||
err = pcf8523_read(client, REG_CONTROL3, &value);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (value & REG_CONTROL3_BLF)
|
||||
ret = 1;
|
||||
ret = pcf8523_voltage_low(client);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (copy_to_user((void __user *)arg, &ret, sizeof(int)))
|
||||
return -EFAULT;
|
||||
|
@ -830,7 +830,7 @@ ret_err_rqe:
|
||||
((u64)err_entry->data.err_warn_bitmap_hi << 32) |
|
||||
(u64)err_entry->data.err_warn_bitmap_lo;
|
||||
for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
|
||||
if (err_warn_bit_map & (u64) (1 << i)) {
|
||||
if (err_warn_bit_map & ((u64)1 << i)) {
|
||||
err_warn = i;
|
||||
break;
|
||||
}
|
||||
|
@ -7065,7 +7065,10 @@ int
|
||||
lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
|
||||
{
|
||||
struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
|
||||
rrq->nlp_DID);
|
||||
rrq->nlp_DID);
|
||||
if (!ndlp)
|
||||
return 1;
|
||||
|
||||
if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
|
||||
return lpfc_issue_els_rrq(rrq->vport, ndlp,
|
||||
rrq->nlp_DID, rrq);
|
||||
|
@ -16,10 +16,6 @@ qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
|
||||
{
|
||||
va_list va;
|
||||
struct va_format vaf;
|
||||
char nfunc[32];
|
||||
|
||||
memset(nfunc, 0, sizeof(nfunc));
|
||||
memcpy(nfunc, func, sizeof(nfunc) - 1);
|
||||
|
||||
va_start(va, fmt);
|
||||
|
||||
@ -28,9 +24,9 @@ qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
|
||||
|
||||
if (likely(qedi) && likely(qedi->pdev))
|
||||
pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
|
||||
nfunc, line, qedi->host_no, &vaf);
|
||||
func, line, qedi->host_no, &vaf);
|
||||
else
|
||||
pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
|
||||
pr_err("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
|
||||
|
||||
va_end(va);
|
||||
}
|
||||
@ -41,10 +37,6 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
|
||||
{
|
||||
va_list va;
|
||||
struct va_format vaf;
|
||||
char nfunc[32];
|
||||
|
||||
memset(nfunc, 0, sizeof(nfunc));
|
||||
memcpy(nfunc, func, sizeof(nfunc) - 1);
|
||||
|
||||
va_start(va, fmt);
|
||||
|
||||
@ -56,9 +48,9 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
|
||||
|
||||
if (likely(qedi) && likely(qedi->pdev))
|
||||
pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
|
||||
nfunc, line, qedi->host_no, &vaf);
|
||||
func, line, qedi->host_no, &vaf);
|
||||
else
|
||||
pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
|
||||
pr_warn("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
|
||||
|
||||
ret:
|
||||
va_end(va);
|
||||
@ -70,10 +62,6 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
|
||||
{
|
||||
va_list va;
|
||||
struct va_format vaf;
|
||||
char nfunc[32];
|
||||
|
||||
memset(nfunc, 0, sizeof(nfunc));
|
||||
memcpy(nfunc, func, sizeof(nfunc) - 1);
|
||||
|
||||
va_start(va, fmt);
|
||||
|
||||
@ -85,10 +73,10 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
|
||||
|
||||
if (likely(qedi) && likely(qedi->pdev))
|
||||
pr_notice("[%s]:[%s:%d]:%d: %pV",
|
||||
dev_name(&qedi->pdev->dev), nfunc, line,
|
||||
dev_name(&qedi->pdev->dev), func, line,
|
||||
qedi->host_no, &vaf);
|
||||
else
|
||||
pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
|
||||
pr_notice("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
|
||||
|
||||
ret:
|
||||
va_end(va);
|
||||
@ -100,10 +88,6 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
|
||||
{
|
||||
va_list va;
|
||||
struct va_format vaf;
|
||||
char nfunc[32];
|
||||
|
||||
memset(nfunc, 0, sizeof(nfunc));
|
||||
memcpy(nfunc, func, sizeof(nfunc) - 1);
|
||||
|
||||
va_start(va, fmt);
|
||||
|
||||
@ -115,9 +99,9 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
|
||||
|
||||
if (likely(qedi) && likely(qedi->pdev))
|
||||
pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
|
||||
nfunc, line, qedi->host_no, &vaf);
|
||||
func, line, qedi->host_no, &vaf);
|
||||
else
|
||||
pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
|
||||
pr_info("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
|
||||
|
||||
ret:
|
||||
va_end(va);
|
||||
|
@ -817,8 +817,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
|
||||
struct qedi_endpoint *qedi_ep;
|
||||
struct sockaddr_in *addr;
|
||||
struct sockaddr_in6 *addr6;
|
||||
struct qed_dev *cdev = NULL;
|
||||
struct qedi_uio_dev *udev = NULL;
|
||||
struct iscsi_path path_req;
|
||||
u32 msg_type = ISCSI_KEVENT_IF_DOWN;
|
||||
u32 iscsi_cid = QEDI_CID_RESERVED;
|
||||
@ -838,8 +836,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
|
||||
}
|
||||
|
||||
qedi = iscsi_host_priv(shost);
|
||||
cdev = qedi->cdev;
|
||||
udev = qedi->udev;
|
||||
|
||||
if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) ||
|
||||
test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
|
||||
|
@ -70,6 +70,9 @@ static const struct usb_device_id usb_quirk_list[] = {
|
||||
/* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
|
||||
{ USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
|
||||
|
||||
/* Logitech HD Webcam C270 */
|
||||
{ USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
|
||||
|
||||
/* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
|
||||
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
|
||||
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
|
||||
|
@ -2652,8 +2652,10 @@ static void dwc2_free_dma_aligned_buffer(struct urb *urb)
|
||||
return;
|
||||
|
||||
/* Restore urb->transfer_buffer from the end of the allocated area */
|
||||
memcpy(&stored_xfer_buffer, urb->transfer_buffer +
|
||||
urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
|
||||
memcpy(&stored_xfer_buffer,
|
||||
PTR_ALIGN(urb->transfer_buffer + urb->transfer_buffer_length,
|
||||
dma_get_cache_alignment()),
|
||||
sizeof(urb->transfer_buffer));
|
||||
|
||||
if (usb_urb_dir_in(urb))
|
||||
memcpy(stored_xfer_buffer, urb->transfer_buffer,
|
||||
@ -2680,6 +2682,7 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
|
||||
* DMA
|
||||
*/
|
||||
kmalloc_size = urb->transfer_buffer_length +
|
||||
(dma_get_cache_alignment() - 1) +
|
||||
sizeof(urb->transfer_buffer);
|
||||
|
||||
kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
|
||||
@ -2690,7 +2693,8 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
|
||||
* Position value of original urb->transfer_buffer pointer to the end
|
||||
* of allocation for later referencing
|
||||
*/
|
||||
memcpy(kmalloc_ptr + urb->transfer_buffer_length,
|
||||
memcpy(PTR_ALIGN(kmalloc_ptr + urb->transfer_buffer_length,
|
||||
dma_get_cache_alignment()),
|
||||
&urb->transfer_buffer, sizeof(urb->transfer_buffer));
|
||||
|
||||
if (usb_urb_dir_out(urb))
|
||||
@ -2775,7 +2779,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
|
||||
chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info);
|
||||
chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info);
|
||||
chan->speed = qh->dev_speed;
|
||||
chan->max_packet = dwc2_max_packet(qh->maxp);
|
||||
chan->max_packet = qh->maxp;
|
||||
|
||||
chan->xfer_started = 0;
|
||||
chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
|
||||
@ -2853,7 +2857,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
|
||||
* This value may be modified when the transfer is started
|
||||
* to reflect the actual transfer length
|
||||
*/
|
||||
chan->multi_count = dwc2_hb_mult(qh->maxp);
|
||||
chan->multi_count = qh->maxp_mult;
|
||||
|
||||
if (hsotg->params.dma_desc_enable) {
|
||||
chan->desc_list_addr = qh->desc_list_dma;
|
||||
@ -3952,19 +3956,21 @@ static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg,
|
||||
|
||||
static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg,
|
||||
struct dwc2_hcd_urb *urb, u8 dev_addr,
|
||||
u8 ep_num, u8 ep_type, u8 ep_dir, u16 mps)
|
||||
u8 ep_num, u8 ep_type, u8 ep_dir,
|
||||
u16 maxp, u16 maxp_mult)
|
||||
{
|
||||
if (dbg_perio() ||
|
||||
ep_type == USB_ENDPOINT_XFER_BULK ||
|
||||
ep_type == USB_ENDPOINT_XFER_CONTROL)
|
||||
dev_vdbg(hsotg->dev,
|
||||
"addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, mps=%d\n",
|
||||
dev_addr, ep_num, ep_dir, ep_type, mps);
|
||||
"addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, maxp=%d (%d mult)\n",
|
||||
dev_addr, ep_num, ep_dir, ep_type, maxp, maxp_mult);
|
||||
urb->pipe_info.dev_addr = dev_addr;
|
||||
urb->pipe_info.ep_num = ep_num;
|
||||
urb->pipe_info.pipe_type = ep_type;
|
||||
urb->pipe_info.pipe_dir = ep_dir;
|
||||
urb->pipe_info.mps = mps;
|
||||
urb->pipe_info.maxp = maxp;
|
||||
urb->pipe_info.maxp_mult = maxp_mult;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4055,8 +4061,9 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
|
||||
dwc2_hcd_is_pipe_in(&urb->pipe_info) ?
|
||||
"IN" : "OUT");
|
||||
dev_dbg(hsotg->dev,
|
||||
" Max packet size: %d\n",
|
||||
dwc2_hcd_get_mps(&urb->pipe_info));
|
||||
" Max packet size: %d (%d mult)\n",
|
||||
dwc2_hcd_get_maxp(&urb->pipe_info),
|
||||
dwc2_hcd_get_maxp_mult(&urb->pipe_info));
|
||||
dev_dbg(hsotg->dev,
|
||||
" transfer_buffer: %p\n",
|
||||
urb->buf);
|
||||
@ -4669,8 +4676,10 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
|
||||
}
|
||||
|
||||
dev_vdbg(hsotg->dev, " Speed: %s\n", speed);
|
||||
dev_vdbg(hsotg->dev, " Max packet size: %d\n",
|
||||
usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
|
||||
dev_vdbg(hsotg->dev, " Max packet size: %d (%d mult)\n",
|
||||
usb_endpoint_maxp(&urb->ep->desc),
|
||||
usb_endpoint_maxp_mult(&urb->ep->desc));
|
||||
|
||||
dev_vdbg(hsotg->dev, " Data buffer length: %d\n",
|
||||
urb->transfer_buffer_length);
|
||||
dev_vdbg(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
|
||||
@ -4753,8 +4762,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
|
||||
dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe),
|
||||
usb_pipeendpoint(urb->pipe), ep_type,
|
||||
usb_pipein(urb->pipe),
|
||||
usb_maxpacket(urb->dev, urb->pipe,
|
||||
!(usb_pipein(urb->pipe))));
|
||||
usb_endpoint_maxp(&ep->desc),
|
||||
usb_endpoint_maxp_mult(&ep->desc));
|
||||
|
||||
buf = urb->transfer_buffer;
|
||||
|
||||
|
@ -170,7 +170,8 @@ struct dwc2_hcd_pipe_info {
|
||||
u8 ep_num;
|
||||
u8 pipe_type;
|
||||
u8 pipe_dir;
|
||||
u16 mps;
|
||||
u16 maxp;
|
||||
u16 maxp_mult;
|
||||
};
|
||||
|
||||
struct dwc2_hcd_iso_packet_desc {
|
||||
@ -263,6 +264,7 @@ struct dwc2_hs_transfer_time {
|
||||
* - USB_ENDPOINT_XFER_ISOC
|
||||
* @ep_is_in: Endpoint direction
|
||||
* @maxp: Value from wMaxPacketSize field of Endpoint Descriptor
|
||||
* @maxp_mult: Multiplier for maxp
|
||||
* @dev_speed: Device speed. One of the following values:
|
||||
* - USB_SPEED_LOW
|
||||
* - USB_SPEED_FULL
|
||||
@ -335,6 +337,7 @@ struct dwc2_qh {
|
||||
u8 ep_type;
|
||||
u8 ep_is_in;
|
||||
u16 maxp;
|
||||
u16 maxp_mult;
|
||||
u8 dev_speed;
|
||||
u8 data_toggle;
|
||||
u8 ping_state;
|
||||
@ -489,9 +492,14 @@ static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe)
|
||||
return pipe->pipe_type;
|
||||
}
|
||||
|
||||
static inline u16 dwc2_hcd_get_mps(struct dwc2_hcd_pipe_info *pipe)
|
||||
static inline u16 dwc2_hcd_get_maxp(struct dwc2_hcd_pipe_info *pipe)
|
||||
{
|
||||
return pipe->mps;
|
||||
return pipe->maxp;
|
||||
}
|
||||
|
||||
static inline u16 dwc2_hcd_get_maxp_mult(struct dwc2_hcd_pipe_info *pipe)
|
||||
{
|
||||
return pipe->maxp_mult;
|
||||
}
|
||||
|
||||
static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe)
|
||||
@ -606,12 +614,6 @@ static inline bool dbg_urb(struct urb *urb)
|
||||
static inline bool dbg_perio(void) { return false; }
|
||||
#endif
|
||||
|
||||
/* High bandwidth multiplier as encoded in highspeed endpoint descriptors */
|
||||
#define dwc2_hb_mult(wmaxpacketsize) (1 + (((wmaxpacketsize) >> 11) & 0x03))
|
||||
|
||||
/* Packet size for any kind of endpoint descriptor */
|
||||
#define dwc2_max_packet(wmaxpacketsize) ((wmaxpacketsize) & 0x07ff)
|
||||
|
||||
/*
|
||||
* Returns true if frame1 index is greater than frame2 index. The comparison
|
||||
* is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the
|
||||
|
@ -1579,8 +1579,9 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
|
||||
|
||||
dev_err(hsotg->dev, " Speed: %s\n", speed);
|
||||
|
||||
dev_err(hsotg->dev, " Max packet size: %d\n",
|
||||
dwc2_hcd_get_mps(&urb->pipe_info));
|
||||
dev_err(hsotg->dev, " Max packet size: %d (mult %d)\n",
|
||||
dwc2_hcd_get_maxp(&urb->pipe_info),
|
||||
dwc2_hcd_get_maxp_mult(&urb->pipe_info));
|
||||
dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length);
|
||||
dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
|
||||
urb->buf, (unsigned long)urb->dma);
|
||||
|
@ -703,7 +703,7 @@ static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg,
|
||||
static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg,
|
||||
struct dwc2_qh *qh)
|
||||
{
|
||||
int bytecount = dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
|
||||
int bytecount = qh->maxp_mult * qh->maxp;
|
||||
int ls_search_slice;
|
||||
int err = 0;
|
||||
int host_interval_in_sched;
|
||||
@ -1327,7 +1327,7 @@ static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
|
||||
u32 max_channel_xfer_size;
|
||||
int status = 0;
|
||||
|
||||
max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
|
||||
max_xfer_size = qh->maxp * qh->maxp_mult;
|
||||
max_channel_xfer_size = hsotg->params.max_transfer_size;
|
||||
|
||||
if (max_xfer_size > max_channel_xfer_size) {
|
||||
@ -1460,8 +1460,9 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
|
||||
u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
|
||||
bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED &&
|
||||
dev_speed != USB_SPEED_HIGH);
|
||||
int maxp = dwc2_hcd_get_mps(&urb->pipe_info);
|
||||
int bytecount = dwc2_hb_mult(maxp) * dwc2_max_packet(maxp);
|
||||
int maxp = dwc2_hcd_get_maxp(&urb->pipe_info);
|
||||
int maxp_mult = dwc2_hcd_get_maxp_mult(&urb->pipe_info);
|
||||
int bytecount = maxp_mult * maxp;
|
||||
char *speed, *type;
|
||||
|
||||
/* Initialize QH */
|
||||
@ -1473,6 +1474,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
|
||||
|
||||
qh->data_toggle = DWC2_HC_PID_DATA0;
|
||||
qh->maxp = maxp;
|
||||
qh->maxp_mult = maxp_mult;
|
||||
INIT_LIST_HEAD(&qh->qtd_list);
|
||||
INIT_LIST_HEAD(&qh->qh_list_entry);
|
||||
|
||||
|
@ -1174,6 +1174,10 @@ static const struct usb_device_id option_ids[] = {
|
||||
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
|
||||
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
|
||||
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
|
||||
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */
|
||||
.driver_info = NCTRL(0) | RSVD(1) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
|
||||
@ -1775,6 +1779,8 @@ static const struct usb_device_id option_ids[] = {
|
||||
{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
|
||||
.driver_info = RSVD(5) | RSVD(6) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */
|
||||
.driver_info = RSVD(7) },
|
||||
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
|
||||
.driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
|
||||
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
|
||||
|
@ -109,6 +109,7 @@ static const struct usb_device_id id_table[] = {
|
||||
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
|
||||
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
|
||||
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
|
||||
{ USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
|
||||
|
@ -160,3 +160,6 @@
|
||||
#define SMART_VENDOR_ID 0x0b8c
|
||||
#define SMART_PRODUCT_ID 0x2303
|
||||
|
||||
/* Allied Telesis VT-Kit3 */
|
||||
#define AT_VENDOR_ID 0x0caa
|
||||
#define AT_VTKIT3_PRODUCT_ID 0x3001
|
||||
|
@ -29,6 +29,11 @@ UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999,
|
||||
"USB Card Reader",
|
||||
USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
|
||||
|
||||
UNUSUAL_DEV(0x0bda, 0x0153, 0x0000, 0x9999,
|
||||
"Realtek",
|
||||
"USB Card Reader",
|
||||
USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
|
||||
|
||||
UNUSUAL_DEV(0x0bda, 0x0158, 0x0000, 0x9999,
|
||||
"Realtek",
|
||||
"USB Card Reader",
|
||||
|
@ -310,6 +310,18 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry,
|
||||
|
||||
out_attach:
|
||||
spin_lock(&dentry_attach_lock);
|
||||
if (unlikely(dentry->d_fsdata && !alias)) {
|
||||
/* d_fsdata is set by a racing thread which is doing
|
||||
* the same thing as this thread is doing. Leave the racing
|
||||
* thread going ahead and we return here.
|
||||
*/
|
||||
spin_unlock(&dentry_attach_lock);
|
||||
iput(dl->dl_inode);
|
||||
ocfs2_lock_res_free(&dl->dl_lockres);
|
||||
kfree(dl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dentry->d_fsdata = dl;
|
||||
dl->dl_count++;
|
||||
spin_unlock(&dentry_attach_lock);
|
||||
|
@ -484,7 +484,7 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
|
||||
*
|
||||
* Find the css for the (@task, @subsys_id) combination, increment a
|
||||
* reference on and return it. This function is guaranteed to return a
|
||||
* valid css.
|
||||
* valid css. The returned css may already have been offlined.
|
||||
*/
|
||||
static inline struct cgroup_subsys_state *
|
||||
task_get_css(struct task_struct *task, int subsys_id)
|
||||
@ -494,7 +494,13 @@ task_get_css(struct task_struct *task, int subsys_id)
|
||||
rcu_read_lock();
|
||||
while (true) {
|
||||
css = task_css(task, subsys_id);
|
||||
if (likely(css_tryget_online(css)))
|
||||
/*
|
||||
* Can't use css_tryget_online() here. A task which has
|
||||
* PF_EXITING set may stay associated with an offline css.
|
||||
* If such task calls this function, css_tryget_online()
|
||||
* will keep failing.
|
||||
*/
|
||||
if (likely(css_tryget(css)))
|
||||
break;
|
||||
cpu_relax();
|
||||
}
|
||||
|
@ -105,6 +105,7 @@ enum cpuhp_state {
|
||||
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
|
||||
CPUHP_AP_IRQ_BCM2836_STARTING,
|
||||
CPUHP_AP_ARM_MVEBU_COHERENCY,
|
||||
CPUHP_AP_MICROCODE_LOADER,
|
||||
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
|
||||
CPUHP_AP_PERF_X86_STARTING,
|
||||
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
|
||||
|
@ -30,6 +30,7 @@ KCOV_INSTRUMENT_extable.o := n
|
||||
# Don't self-instrument.
|
||||
KCOV_INSTRUMENT_kcov.o := n
|
||||
KASAN_SANITIZE_kcov.o := n
|
||||
CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
|
||||
|
||||
# cond_syscall is currently not LTO compatible
|
||||
CFLAGS_sys_ni.o = $(DISABLE_LTO)
|
||||
|
@ -433,14 +433,19 @@ static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
|
||||
|
||||
if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL))
|
||||
goto free_cs;
|
||||
if (!alloc_cpumask_var(&trial->cpus_requested, GFP_KERNEL))
|
||||
goto free_allowed;
|
||||
if (!alloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL))
|
||||
goto free_cpus;
|
||||
|
||||
cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
|
||||
cpumask_copy(trial->cpus_requested, cs->cpus_requested);
|
||||
cpumask_copy(trial->effective_cpus, cs->effective_cpus);
|
||||
return trial;
|
||||
|
||||
free_cpus:
|
||||
free_cpumask_var(trial->cpus_requested);
|
||||
free_allowed:
|
||||
free_cpumask_var(trial->cpus_allowed);
|
||||
free_cs:
|
||||
kfree(trial);
|
||||
@ -454,6 +459,7 @@ free_cs:
|
||||
static void free_trial_cpuset(struct cpuset *trial)
|
||||
{
|
||||
free_cpumask_var(trial->effective_cpus);
|
||||
free_cpumask_var(trial->cpus_requested);
|
||||
free_cpumask_var(trial->cpus_allowed);
|
||||
kfree(trial);
|
||||
}
|
||||
@ -983,24 +989,24 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
|
||||
return -EACCES;
|
||||
|
||||
/*
|
||||
* An empty cpus_allowed is ok only if the cpuset has no tasks.
|
||||
* An empty cpus_requested is ok only if the cpuset has no tasks.
|
||||
* Since cpulist_parse() fails on an empty mask, we special case
|
||||
* that parsing. The validate_change() call ensures that cpusets
|
||||
* with tasks have cpus.
|
||||
*/
|
||||
if (!*buf) {
|
||||
cpumask_clear(trialcs->cpus_allowed);
|
||||
cpumask_clear(trialcs->cpus_requested);
|
||||
} else {
|
||||
retval = cpulist_parse(buf, trialcs->cpus_requested);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
|
||||
if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
|
||||
return -EINVAL;
|
||||
|
||||
cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
|
||||
}
|
||||
|
||||
if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
|
||||
return -EINVAL;
|
||||
|
||||
cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
|
||||
|
||||
/* Nothing to do if the cpus didn't change */
|
||||
if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested))
|
||||
return 0;
|
||||
|
@ -448,6 +448,15 @@ int commit_creds(struct cred *new)
|
||||
if (task->mm)
|
||||
set_dumpable(task->mm, suid_dumpable);
|
||||
task->pdeath_signal = 0;
|
||||
/*
|
||||
* If a task drops privileges and becomes nondumpable,
|
||||
* the dumpability change must become visible before
|
||||
* the credential change; otherwise, a __ptrace_may_access()
|
||||
* racing with this change may be able to attach to a task it
|
||||
* shouldn't be able to attach to (as if the task had dropped
|
||||
* privileges without becoming nondumpable).
|
||||
* Pairs with a read barrier in __ptrace_may_access().
|
||||
*/
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
|
@ -323,6 +323,16 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
|
||||
return -EPERM;
|
||||
ok:
|
||||
rcu_read_unlock();
|
||||
/*
|
||||
* If a task drops privileges and becomes nondumpable (through a syscall
|
||||
* like setresuid()) while we are trying to access it, we must ensure
|
||||
* that the dumpability is read after the credentials; otherwise,
|
||||
* we may be able to attach to a task that we shouldn't be able to
|
||||
* attach to (as if the task had dropped privileges without becoming
|
||||
* nondumpable).
|
||||
* Pairs with a write barrier in commit_creds().
|
||||
*/
|
||||
smp_rmb();
|
||||
mm = task->mm;
|
||||
if (mm &&
|
||||
((get_dumpable(mm) != SUID_DUMP_USER) &&
|
||||
@ -704,6 +714,10 @@ static int ptrace_peek_siginfo(struct task_struct *child,
|
||||
if (arg.nr < 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Ensure arg.off fits in an unsigned long */
|
||||
if (arg.off > ULONG_MAX)
|
||||
return 0;
|
||||
|
||||
if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
|
||||
pending = &child->signal->shared_pending;
|
||||
else
|
||||
@ -711,18 +725,20 @@ static int ptrace_peek_siginfo(struct task_struct *child,
|
||||
|
||||
for (i = 0; i < arg.nr; ) {
|
||||
siginfo_t info;
|
||||
s32 off = arg.off + i;
|
||||
unsigned long off = arg.off + i;
|
||||
bool found = false;
|
||||
|
||||
spin_lock_irq(&child->sighand->siglock);
|
||||
list_for_each_entry(q, &pending->list, list) {
|
||||
if (!off--) {
|
||||
found = true;
|
||||
copy_siginfo(&info, &q->info);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&child->sighand->siglock);
|
||||
|
||||
if (off >= 0) /* beyond the end of the list */
|
||||
if (!found) /* beyond the end of the list */
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
@ -313,7 +313,7 @@ static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
|
||||
}
|
||||
return 0;
|
||||
fail:
|
||||
__memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
|
||||
__memcg_destroy_list_lru_node(memcg_lrus, begin, i);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -1405,7 +1405,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
|
||||
|
||||
list_for_each_entry_safe(page, next, page_list, lru) {
|
||||
if (page_is_file_cache(page) && !PageDirty(page) &&
|
||||
!__PageMovable(page)) {
|
||||
!__PageMovable(page) && !PageUnevictable(page)) {
|
||||
ClearPageActive(page);
|
||||
list_move(&page->lru, &clean_pages);
|
||||
}
|
||||
|
@ -276,5 +276,7 @@ config DEFAULT_SECURITY
|
||||
default "apparmor" if DEFAULT_SECURITY_APPARMOR
|
||||
default "" if DEFAULT_SECURITY_DAC
|
||||
|
||||
source "security/Kconfig.hardening"
|
||||
|
||||
endmenu
|
||||
|
||||
|
41
security/Kconfig.hardening
Normal file
41
security/Kconfig.hardening
Normal file
@ -0,0 +1,41 @@
|
||||
menu "Kernel hardening options"
|
||||
|
||||
menu "Memory initialization"
|
||||
|
||||
choice
|
||||
prompt "Initialize kernel stack variables at function entry"
|
||||
default INIT_STACK_NONE
|
||||
help
|
||||
This option enables initialization of stack variables at
|
||||
function entry time. This has the possibility to have the
|
||||
greatest coverage (since all functions can have their
|
||||
variables initialized), but the performance impact depends
|
||||
on the function calling complexity of a given workload's
|
||||
syscalls.
|
||||
|
||||
This chooses the level of coverage over classes of potentially
|
||||
uninitialized variables. The selected class will be
|
||||
initialized before use in a function.
|
||||
|
||||
config INIT_STACK_NONE
|
||||
bool "no automatic initialization (weakest)"
|
||||
help
|
||||
Disable automatic stack variable initialization.
|
||||
This leaves the kernel vulnerable to the standard
|
||||
classes of uninitialized stack variable exploits
|
||||
and information exposures.
|
||||
|
||||
config INIT_STACK_ALL
|
||||
bool "0xAA-init everything on the stack (strongest)"
|
||||
help
|
||||
Initializes everything on the stack with a 0xAA
|
||||
pattern. This is intended to eliminate all classes
|
||||
of uninitialized stack variable exploits and information
|
||||
exposures, even variables that were warned to have been
|
||||
left uninitialized.
|
||||
|
||||
endchoice
|
||||
|
||||
endmenu
|
||||
|
||||
endmenu
|
@ -1904,20 +1904,14 @@ static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
|
||||
int result;
|
||||
struct snd_seq_client *sender = NULL;
|
||||
struct snd_seq_client_port *sport = NULL;
|
||||
struct snd_seq_subscribers *p;
|
||||
|
||||
result = -EINVAL;
|
||||
if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
|
||||
goto __end;
|
||||
if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
|
||||
goto __end;
|
||||
p = snd_seq_port_get_subscription(&sport->c_src, &subs->dest);
|
||||
if (p) {
|
||||
result = 0;
|
||||
*subs = p->info;
|
||||
} else
|
||||
result = -ENOENT;
|
||||
|
||||
result = snd_seq_port_get_subscription(&sport->c_src, &subs->dest,
|
||||
subs);
|
||||
__end:
|
||||
if (sport)
|
||||
snd_seq_port_unlock(sport);
|
||||
|
@ -635,20 +635,23 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
|
||||
|
||||
|
||||
/* get matched subscriber */
|
||||
struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
|
||||
struct snd_seq_addr *dest_addr)
|
||||
int snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
|
||||
struct snd_seq_addr *dest_addr,
|
||||
struct snd_seq_port_subscribe *subs)
|
||||
{
|
||||
struct snd_seq_subscribers *s, *found = NULL;
|
||||
struct snd_seq_subscribers *s;
|
||||
int err = -ENOENT;
|
||||
|
||||
down_read(&src_grp->list_mutex);
|
||||
list_for_each_entry(s, &src_grp->list_head, src_list) {
|
||||
if (addr_match(dest_addr, &s->info.dest)) {
|
||||
found = s;
|
||||
*subs = s->info;
|
||||
err = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
up_read(&src_grp->list_mutex);
|
||||
return found;
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -135,7 +135,8 @@ int snd_seq_port_subscribe(struct snd_seq_client_port *port,
|
||||
struct snd_seq_port_subscribe *info);
|
||||
|
||||
/* get matched subscriber */
|
||||
struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
|
||||
struct snd_seq_addr *dest_addr);
|
||||
int snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
|
||||
struct snd_seq_addr *dest_addr,
|
||||
struct snd_seq_port_subscribe *subs);
|
||||
|
||||
#endif
|
||||
|
@ -345,7 +345,7 @@ static void destroy_stream(struct snd_motu *motu,
|
||||
}
|
||||
|
||||
amdtp_stream_destroy(stream);
|
||||
fw_iso_resources_free(resources);
|
||||
fw_iso_resources_destroy(resources);
|
||||
}
|
||||
|
||||
int snd_motu_stream_init_duplex(struct snd_motu *motu)
|
||||
|
@ -176,9 +176,6 @@ static int detect_quirks(struct snd_oxfw *oxfw)
|
||||
oxfw->midi_input_ports = 0;
|
||||
oxfw->midi_output_ports = 0;
|
||||
|
||||
/* Output stream exists but no data channels are useful. */
|
||||
oxfw->has_output = false;
|
||||
|
||||
return snd_oxfw_scs1x_add(oxfw);
|
||||
}
|
||||
|
||||
|
@ -3936,18 +3936,19 @@ static struct coef_fw alc225_pre_hsmode[] = {
|
||||
static void alc_headset_mode_unplugged(struct hda_codec *codec)
|
||||
{
|
||||
static struct coef_fw coef0255[] = {
|
||||
WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
|
||||
WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
|
||||
UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
|
||||
WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
|
||||
WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
|
||||
{}
|
||||
};
|
||||
static struct coef_fw coef0255_1[] = {
|
||||
WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
|
||||
{}
|
||||
};
|
||||
static struct coef_fw coef0256[] = {
|
||||
WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
|
||||
WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
|
||||
WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
|
||||
WRITE_COEFEX(0x57, 0x03, 0x09a3), /* Direct Drive HP Amp control */
|
||||
UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
|
||||
{}
|
||||
};
|
||||
static struct coef_fw coef0233[] = {
|
||||
@ -4010,13 +4011,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
|
||||
|
||||
switch (codec->core.vendor_id) {
|
||||
case 0x10ec0255:
|
||||
alc_process_coef_fw(codec, coef0255_1);
|
||||
alc_process_coef_fw(codec, coef0255);
|
||||
break;
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
alc_process_coef_fw(codec, coef0256);
|
||||
alc_process_coef_fw(codec, coef0255);
|
||||
break;
|
||||
case 0x10ec0234:
|
||||
case 0x10ec0274:
|
||||
@ -4066,6 +4065,12 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
|
||||
WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */
|
||||
{}
|
||||
};
|
||||
static struct coef_fw coef0256[] = {
|
||||
UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14), /* Direct Drive HP Amp control(Set to verb control)*/
|
||||
WRITE_COEFEX(0x57, 0x03, 0x09a3),
|
||||
WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */
|
||||
{}
|
||||
};
|
||||
static struct coef_fw coef0233[] = {
|
||||
UPDATE_COEF(0x35, 0, 1<<14),
|
||||
WRITE_COEF(0x06, 0x2100),
|
||||
@ -4113,14 +4118,19 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
|
||||
};
|
||||
|
||||
switch (codec->core.vendor_id) {
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0255:
|
||||
case 0x10ec0256:
|
||||
alc_write_coef_idx(codec, 0x45, 0xc489);
|
||||
snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
|
||||
alc_process_coef_fw(codec, coef0255);
|
||||
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
|
||||
break;
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
alc_write_coef_idx(codec, 0x45, 0xc489);
|
||||
snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
|
||||
alc_process_coef_fw(codec, coef0256);
|
||||
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
|
||||
break;
|
||||
case 0x10ec0234:
|
||||
case 0x10ec0274:
|
||||
case 0x10ec0294:
|
||||
@ -4199,6 +4209,14 @@ static void alc_headset_mode_default(struct hda_codec *codec)
|
||||
WRITE_COEF(0x49, 0x0049),
|
||||
{}
|
||||
};
|
||||
static struct coef_fw coef0256[] = {
|
||||
WRITE_COEF(0x45, 0xc489),
|
||||
WRITE_COEFEX(0x57, 0x03, 0x0da3),
|
||||
WRITE_COEF(0x49, 0x0049),
|
||||
UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
|
||||
WRITE_COEF(0x06, 0x6100),
|
||||
{}
|
||||
};
|
||||
static struct coef_fw coef0233[] = {
|
||||
WRITE_COEF(0x06, 0x2100),
|
||||
WRITE_COEF(0x32, 0x4ea3),
|
||||
@ -4246,11 +4264,16 @@ static void alc_headset_mode_default(struct hda_codec *codec)
|
||||
alc_process_coef_fw(codec, alc225_pre_hsmode);
|
||||
alc_process_coef_fw(codec, coef0225);
|
||||
break;
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0255:
|
||||
case 0x10ec0256:
|
||||
alc_process_coef_fw(codec, coef0255);
|
||||
break;
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
alc_write_coef_idx(codec, 0x1b, 0x0e4b);
|
||||
alc_write_coef_idx(codec, 0x45, 0xc089);
|
||||
msleep(50);
|
||||
alc_process_coef_fw(codec, coef0256);
|
||||
break;
|
||||
case 0x10ec0234:
|
||||
case 0x10ec0274:
|
||||
case 0x10ec0294:
|
||||
@ -4294,8 +4317,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
|
||||
};
|
||||
static struct coef_fw coef0256[] = {
|
||||
WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
|
||||
WRITE_COEF(0x1b, 0x0c6b),
|
||||
WRITE_COEFEX(0x57, 0x03, 0x8ea6),
|
||||
WRITE_COEF(0x1b, 0x0e6b),
|
||||
{}
|
||||
};
|
||||
static struct coef_fw coef0233[] = {
|
||||
@ -4410,8 +4432,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
|
||||
};
|
||||
static struct coef_fw coef0256[] = {
|
||||
WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
|
||||
WRITE_COEF(0x1b, 0x0c6b),
|
||||
WRITE_COEFEX(0x57, 0x03, 0x8ea6),
|
||||
WRITE_COEF(0x1b, 0x0e6b),
|
||||
{}
|
||||
};
|
||||
static struct coef_fw coef0233[] = {
|
||||
@ -4540,14 +4561,38 @@ static void alc_determine_headset_type(struct hda_codec *codec)
|
||||
};
|
||||
|
||||
switch (codec->core.vendor_id) {
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0255:
|
||||
case 0x10ec0256:
|
||||
alc_process_coef_fw(codec, coef0255);
|
||||
msleep(300);
|
||||
val = alc_read_coef_idx(codec, 0x46);
|
||||
is_ctia = (val & 0x0070) == 0x0070;
|
||||
break;
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
alc_write_coef_idx(codec, 0x1b, 0x0e4b);
|
||||
alc_write_coef_idx(codec, 0x06, 0x6104);
|
||||
alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3);
|
||||
|
||||
snd_hda_codec_write(codec, 0x21, 0,
|
||||
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
|
||||
msleep(80);
|
||||
snd_hda_codec_write(codec, 0x21, 0,
|
||||
AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
|
||||
|
||||
alc_process_coef_fw(codec, coef0255);
|
||||
msleep(300);
|
||||
val = alc_read_coef_idx(codec, 0x46);
|
||||
is_ctia = (val & 0x0070) == 0x0070;
|
||||
|
||||
alc_write_coefex_idx(codec, 0x57, 0x3, 0x0da3);
|
||||
alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
|
||||
|
||||
snd_hda_codec_write(codec, 0x21, 0,
|
||||
AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
|
||||
msleep(80);
|
||||
snd_hda_codec_write(codec, 0x21, 0,
|
||||
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
|
||||
break;
|
||||
case 0x10ec0234:
|
||||
case 0x10ec0274:
|
||||
case 0x10ec0294:
|
||||
|
@ -559,6 +559,7 @@ static int cs42xx8_runtime_resume(struct device *dev)
|
||||
msleep(5);
|
||||
|
||||
regcache_cache_only(cs42xx8->regmap, false);
|
||||
regcache_mark_dirty(cs42xx8->regmap);
|
||||
|
||||
ret = regcache_sync(cs42xx8->regmap);
|
||||
if (ret) {
|
||||
|
@ -286,8 +286,8 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((outrate > 8000 && outrate < 30000) &&
|
||||
(outrate/inrate > 24 || inrate/outrate > 8)) {
|
||||
if ((outrate >= 8000 && outrate <= 30000) &&
|
||||
(outrate > 24 * inrate || inrate > 8 * outrate)) {
|
||||
pair_err("exceed supported ratio range [1/24, 8] for \
|
||||
inrate/outrate: %d/%d\n", inrate, outrate);
|
||||
return -EINVAL;
|
||||
|
@ -136,6 +136,7 @@ int check_tick_adj(long tickval)
|
||||
|
||||
eppm = get_ppm_drift();
|
||||
printf("%lld usec, %lld ppm", systick + (systick * eppm / MILLION), eppm);
|
||||
fflush(stdout);
|
||||
|
||||
tx1.modes = 0;
|
||||
adjtimex(&tx1);
|
||||
|
@ -101,6 +101,7 @@ int main(void)
|
||||
}
|
||||
clear_time_state();
|
||||
printf(".");
|
||||
fflush(stdout);
|
||||
}
|
||||
printf("[OK]\n");
|
||||
return ksft_exit_pass();
|
||||
|
@ -102,6 +102,7 @@ int main(int argc, char **argv)
|
||||
int ret;
|
||||
|
||||
printf("Mqueue latency : ");
|
||||
fflush(stdout);
|
||||
|
||||
ret = mqueue_lat_test();
|
||||
if (ret < 0) {
|
||||
|
@ -142,6 +142,7 @@ int main(int argc, char **argv)
|
||||
continue;
|
||||
|
||||
printf("Nanosleep %-31s ", clockstring(clockid));
|
||||
fflush(stdout);
|
||||
|
||||
length = 10;
|
||||
while (length <= (NSEC_PER_SEC * 10)) {
|
||||
|
@ -155,6 +155,7 @@ int main(int argc, char **argv)
|
||||
continue;
|
||||
|
||||
printf("nsleep latency %-26s ", clockstring(clockid));
|
||||
fflush(stdout);
|
||||
|
||||
length = 10;
|
||||
while (length <= (NSEC_PER_SEC * 10)) {
|
||||
|
@ -112,6 +112,7 @@ int main(int argv, char **argc)
|
||||
printf("WARNING: ADJ_OFFSET in progress, this will cause inaccurate results\n");
|
||||
|
||||
printf("Estimating clock drift: ");
|
||||
fflush(stdout);
|
||||
sleep(120);
|
||||
|
||||
get_monotonic_and_raw(&mon, &raw);
|
||||
|
@ -55,6 +55,7 @@ int main(int argc, char **argv)
|
||||
printf("tai offset started at %i\n", ret);
|
||||
|
||||
printf("Checking tai offsets can be properly set: ");
|
||||
fflush(stdout);
|
||||
for (i = 1; i <= 60; i++) {
|
||||
ret = set_tai(i);
|
||||
ret = get_tai();
|
||||
|
@ -65,6 +65,7 @@ int main(int argc, char **argv)
|
||||
printf("tz_minuteswest started at %i, dst at %i\n", min, dst);
|
||||
|
||||
printf("Checking tz_minuteswest can be properly set: ");
|
||||
fflush(stdout);
|
||||
for (i = -15*60; i < 15*60; i += 30) {
|
||||
ret = set_tz(i, dst);
|
||||
ret = get_tz_min();
|
||||
@ -76,6 +77,7 @@ int main(int argc, char **argv)
|
||||
printf("[OK]\n");
|
||||
|
||||
printf("Checking invalid tz_minuteswest values are caught: ");
|
||||
fflush(stdout);
|
||||
|
||||
if (!set_tz(-15*60-1, dst)) {
|
||||
printf("[FAILED] %i didn't return failure!\n", -15*60-1);
|
||||
|
@ -163,6 +163,7 @@ int main(int argc, char **argv)
|
||||
strftime(buf, 255, "%a, %d %b %Y %T %z", localtime(&start));
|
||||
printf("%s\n", buf);
|
||||
printf("Testing consistency with %i threads for %ld seconds: ", thread_count, runtime);
|
||||
fflush(stdout);
|
||||
|
||||
/* spawn */
|
||||
for (i = 0; i < thread_count; i++)
|
||||
|
@ -123,6 +123,7 @@ int validate_freq(void)
|
||||
/* Set the leap second insert flag */
|
||||
|
||||
printf("Testing ADJ_FREQ... ");
|
||||
fflush(stdout);
|
||||
for (i = 0; i < NUM_FREQ_VALID; i++) {
|
||||
tx.modes = ADJ_FREQUENCY;
|
||||
tx.freq = valid_freq[i];
|
||||
@ -250,6 +251,7 @@ int set_bad_offset(long sec, long usec, int use_nano)
|
||||
int validate_set_offset(void)
|
||||
{
|
||||
printf("Testing ADJ_SETOFFSET... ");
|
||||
fflush(stdout);
|
||||
|
||||
/* Test valid values */
|
||||
if (set_offset(NSEC_PER_SEC - 1, 1))
|
||||
|
Loading…
x
Reference in New Issue
Block a user