Merge remote-tracking branch 'remotes/origin/tmp-eca84e5' into msm-4.14

* remotes/origin/tmp-eca84e5:
  Linux 4.14.48
  powerpc/mm/slice: Fix hugepage allocation at hint address on 8xx
  powerpc/mm/slice: Enhance for supporting PPC32
  powerpc/mm/slice: create header files dedicated to slices
  powerpc/mm/slice: Remove intermediate bitmap copy
  drm/i915: Disable LVDS on Radiant P845
  drm/i915/lvds: Move acpi lid notification registration to registration phase
  drm/psr: Fix missed entry in PSR setup time table.
  intel_th: Use correct device when freeing buffers
  Revert "rt2800: use TXOP_BACKOFF for probe frames"
  mm/huge_memory.c: __split_huge_page() use atomic ClearPageDirty()
  IB/core: Fix error code for invalid GID entry
  hwtracing: stm: fix build error on some arches
  stm class: Use vmalloc for the master map
  scsi: scsi_transport_srp: Fix shost to rport translation
  MIPS: prctl: Disallow FRE without FR with PR_SET_FP_MODE requests
  MIPS: ptrace: Fix PTRACE_PEEKUSR requests for 64-bit FGRs
  MIPS: lantiq: gphy: Drop reboot/remove reset asserts
  iio: adc: select buffer for at91-sama5d2_adc
  iio:kfifo_buf: check for uint overflow
  iio:buffer: make length types match kfifo types
  iio: ad7793: implement IIO_CHAN_INFO_SAMP_FREQ
  tcp: avoid integer overflows in tcp_rcv_space_adjust()
  kbuild: clang: disable unused variable warnings only when constant
  platform/chrome: cros_ec_lpc: remove redundant pointer request
  ASoC: Intel: sst: remove redundant variable dma_dev_name
  rtlwifi: rtl8192cu: Remove variable self-assignment in rf.c
  drm/amd/powerplay: Fix enum mismatch
  dma-buf: remove redundant initialization of sg_table
  drm/i915: Always sanity check engine state upon idling
  kbuild: clang: remove crufty HOSTCFLAGS
  cfg80211: further limit wiphy names to 64 bytes
  selinux: KASAN: slab-out-of-bounds in xattr_getsecurity
  tracing: Make the snapshot trigger work with instances
  tracing: Fix crash when freeing instances with event triggers
  Input: elan_i2c_smbus - fix corrupted stack
  Input: synaptics - add Lenovo 80 series ids to SMBus
  Input: synaptics - add Intertouch support on X1 Carbon 6th and X280
  Input: synaptics - Lenovo Thinkpad X1 Carbon G5 (2017) with Elantech trackpoints should use RMI
  Input: synaptics - Lenovo Carbon X1 Gen5 (2017) devices should use RMI
  xfs: detect agfl count corruption and reset agfl
  xfs: convert XFS_AGFL_SIZE to a helper function
  PCI: hv: Fix 2 hang issues in hv_compose_msi_msg()
  Revert "pinctrl: msm: Use dynamic GPIO numbering"
  x86/MCE/AMD: Cache SMCA MISC block addresses
  x86/mce/AMD: Carve out SMCA get_block_address() code
  objtool: Fix "noreturn" detection for recursive sibling calls
  objtool: Detect RIP-relative switch table references, part 2
  objtool: Detect RIP-relative switch table references
  objtool: Support GCC 8 switch tables
  objtool: Support GCC 8's cold subfunctions
  mm: fix the NULL mapping case in __isolate_lru_page()
  fix io_destroy()/aio_complete() race
  ANDROID: add extra free kbytes tunable
  Revert "ANDROID: net: xfrm: check dir value of xfrm_userpolicy_id"
  ANDROID: x86_64_cuttlefish_defconfig: Enable F2FS
  ANDROID: Update x86_64_cuttlefish_defconfig
  Revert "pinctrl: msm: Use dynamic GPIO numbering"
  FROMLIST: f2fs: early updates queued for v4.18-rc1

Conflicts:
	Makefile

Change-Id: Icf26ca353db37067d2502b3cb84884de5f3a1d1a
Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
This commit is contained in:
Isaac J. Manjarres 2018-06-05 09:32:48 -07:00
commit 10289c4840
73 changed files with 929 additions and 567 deletions

View File

@ -101,6 +101,7 @@ Date: February 2015
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description:
Controls the trimming rate in batch mode.
<deprecated>
What: /sys/fs/f2fs/<disk>/cp_interval
Date: October 2015

View File

@ -182,13 +182,15 @@ whint_mode=%s Control which write hints are passed down to block
passes down hints with its policy.
alloc_mode=%s Adjust block allocation policy, which supports "reuse"
and "default".
fsync_mode=%s Control the policy of fsync. Currently supports "posix"
and "strict". In "posix" mode, which is default, fsync
will follow POSIX semantics and does a light operation
to improve the filesystem performance. In "strict" mode,
fsync will be heavy and behaves in line with xfs, ext4
and btrfs, where xfstest generic/342 will pass, but the
performance will regress.
fsync_mode=%s Control the policy of fsync. Currently supports "posix",
"strict", and "nobarrier". In "posix" mode, which is
default, fsync will follow POSIX semantics and does a
light operation to improve the filesystem performance.
In "strict" mode, fsync will be heavy and behaves in line
with xfs, ext4 and btrfs, where xfstest generic/342 will
pass, but the performance will regress. "nobarrier" is
based on "posix", but doesn't issue flush command for
non-atomic files likewise "nobarrier" mount option.
test_dummy_encryption Enable dummy encryption, which provides a fake fscrypt
context. The fake fscrypt context is used by xfstests.

View File

@ -30,6 +30,7 @@ Currently, these files are in /proc/sys/vm:
- dirty_writeback_centisecs
- drop_caches
- extfrag_threshold
- extra_free_kbytes
- hugepages_treat_as_movable
- hugetlb_shm_group
- laptop_mode
@ -260,6 +261,21 @@ any throttling.
==============================================================
extra_free_kbytes
This parameter tells the VM to keep extra free memory between the threshold
where background reclaim (kswapd) kicks in, and the threshold where direct
reclaim (by allocating processes) kicks in.
This is useful for workloads that require low latency memory allocations
and have a bounded burstiness in memory allocations, for example a
realtime application that receives and transmits network traffic
(causing in-kernel memory allocations) with a maximum total message burst
size of 200MB may need 200MB of extra free memory to avoid direct reclaim
related latencies.
==============================================================
hugepages_treat_as_movable
This parameter controls whether we can allocate hugepages from ZONE_MOVABLE

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 47
SUBLEVEL = 48
EXTRAVERSION =
NAME = Petit Gorille
@ -769,7 +769,6 @@ KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
# These warnings generated too much noise in a regular build.
# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
endif
KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)

View File

@ -721,6 +721,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
if (value & ~known_bits)
return -EOPNOTSUPP;
/* Setting FRE without FR is not supported. */
if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
return -EOPNOTSUPP;
/* Avoid inadvertently triggering emulation */
if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
!(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))

View File

@ -809,7 +809,7 @@ long arch_ptrace(struct task_struct *child, long request,
break;
}
#endif
tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
break;
case PC:
tmp = regs->cp0_epc;

View File

@ -108,7 +108,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
addr & 1);
break;
}
tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
break;
case PC:
tmp = regs->cp0_epc;

View File

@ -0,0 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H
#define _ASM_POWERPC_BOOK3S_64_SLICE_H
#ifdef CONFIG_PPC_MM_SLICES
#define SLICE_LOW_SHIFT 28
#define SLICE_LOW_TOP (0x100000000ul)
#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
#define SLICE_HIGH_SHIFT 40
#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
#else /* CONFIG_PPC_MM_SLICES */
#define get_slice_psize(mm, addr) ((mm)->context.user_psize)
#define slice_set_user_psize(mm, psize) \
do { \
(mm)->context.user_psize = (psize); \
(mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
} while (0)
#endif /* CONFIG_PPC_MM_SLICES */
#endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */

View File

@ -169,6 +169,12 @@ typedef struct {
unsigned int id;
unsigned int active;
unsigned long vdso_base;
#ifdef CONFIG_PPC_MM_SLICES
u16 user_psize; /* page size index */
u64 low_slices_psize; /* page size encodings */
unsigned char high_slices_psize[0];
unsigned long addr_limit;
#endif
} mm_context_t;
#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)

View File

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_NOHASH_32_SLICE_H
#define _ASM_POWERPC_NOHASH_32_SLICE_H
#ifdef CONFIG_PPC_MM_SLICES
#define SLICE_LOW_SHIFT 28
#define SLICE_LOW_TOP (0x100000000ull)
#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
#define SLICE_HIGH_SHIFT 0
#define SLICE_NUM_HIGH 0ul
#define GET_HIGH_SLICE_INDEX(addr) (addr & 0)
#endif /* CONFIG_PPC_MM_SLICES */
#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_NOHASH_64_SLICE_H
#define _ASM_POWERPC_NOHASH_64_SLICE_H
#ifdef CONFIG_PPC_64K_PAGES
#define get_slice_psize(mm, addr) MMU_PAGE_64K
#else /* CONFIG_PPC_64K_PAGES */
#define get_slice_psize(mm, addr) MMU_PAGE_4K
#endif /* !CONFIG_PPC_64K_PAGES */
#define slice_set_user_psize(mm, psize) do { BUG(); } while (0)
#endif /* _ASM_POWERPC_NOHASH_64_SLICE_H */

View File

@ -344,5 +344,6 @@ typedef struct page *pgtable_t;
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
#include <asm/slice.h>
#endif /* _ASM_POWERPC_PAGE_H */

View File

@ -86,65 +86,6 @@ extern u64 ppc64_pft_size;
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_PPC_MM_SLICES
#define SLICE_LOW_SHIFT 28
#define SLICE_HIGH_SHIFT 40
#define SLICE_LOW_TOP (0x100000000ul)
#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
#ifndef __ASSEMBLY__
struct mm_struct;
extern unsigned long slice_get_unmapped_area(unsigned long addr,
unsigned long len,
unsigned long flags,
unsigned int psize,
int topdown);
extern unsigned int get_slice_psize(struct mm_struct *mm,
unsigned long addr);
extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long len, unsigned int psize);
#endif /* __ASSEMBLY__ */
#else
#define slice_init()
#ifdef CONFIG_PPC_STD_MMU_64
#define get_slice_psize(mm, addr) ((mm)->context.user_psize)
#define slice_set_user_psize(mm, psize) \
do { \
(mm)->context.user_psize = (psize); \
(mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
} while (0)
#else /* CONFIG_PPC_STD_MMU_64 */
#ifdef CONFIG_PPC_64K_PAGES
#define get_slice_psize(mm, addr) MMU_PAGE_64K
#else /* CONFIG_PPC_64K_PAGES */
#define get_slice_psize(mm, addr) MMU_PAGE_4K
#endif /* !CONFIG_PPC_64K_PAGES */
#define slice_set_user_psize(mm, psize) do { BUG(); } while(0)
#endif /* !CONFIG_PPC_STD_MMU_64 */
#define slice_set_range_psize(mm, start, len, psize) \
slice_set_user_psize((mm), (psize))
#endif /* CONFIG_PPC_MM_SLICES */
#ifdef CONFIG_HUGETLB_PAGE
#ifdef CONFIG_PPC_MM_SLICES
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
#endif /* !CONFIG_HUGETLB_PAGE */
#define VM_DATA_DEFAULT_FLAGS \
(is_32bit_task() ? \
VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)

View File

@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_SLICE_H
#define _ASM_POWERPC_SLICE_H
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/slice.h>
#elif defined(CONFIG_PPC64)
#include <asm/nohash/64/slice.h>
#elif defined(CONFIG_PPC_MMU_NOHASH)
#include <asm/nohash/32/slice.h>
#endif
#ifdef CONFIG_PPC_MM_SLICES
#ifdef CONFIG_HUGETLB_PAGE
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#ifndef __ASSEMBLY__
struct mm_struct;
unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
unsigned long flags, unsigned int psize,
int topdown);
unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr);
void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long len, unsigned int psize);
#endif /* __ASSEMBLY__ */
#else /* CONFIG_PPC_MM_SLICES */
#define slice_set_range_psize(mm, start, len, psize) \
slice_set_user_psize((mm), (psize))
#endif /* CONFIG_PPC_MM_SLICES */
#endif /* _ASM_POWERPC_SLICE_H */

View File

@ -915,6 +915,8 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_PPC_MM_SLICES
#ifdef CONFIG_PPC64
init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
#elif defined(CONFIG_PPC_8xx)
init_mm.context.addr_limit = DEFAULT_MAP_WINDOW;
#else
#error "context.addr_limit not initialized."
#endif

View File

@ -192,7 +192,7 @@ void set_context(unsigned long id, pgd_t *pgd)
mtspr(SPRN_M_TW, __pa(pgd) - offset);
/* Update context */
mtspr(SPRN_M_CASID, id);
mtspr(SPRN_M_CASID, id - 1);
/* sync */
mb();
}

View File

@ -552,9 +552,11 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
struct hstate *hstate = hstate_file(file);
int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
#ifdef CONFIG_PPC_RADIX_MMU
if (radix_enabled())
return radix__hugetlb_get_unmapped_area(file, addr, len,
pgoff, flags);
#endif
return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
}
#endif

View File

@ -331,6 +331,20 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
{
pr_hard("initing context for mm @%p\n", mm);
#ifdef CONFIG_PPC_MM_SLICES
if (!mm->context.addr_limit)
mm->context.addr_limit = DEFAULT_MAP_WINDOW;
/*
* We have MMU_NO_CONTEXT set to be ~0. Hence check
* explicitly against context.id == 0. This ensures that we properly
* initialize context slice details for newly allocated mm's (which will
* have id == 0) and don't alter context slice inherited via fork (which
* will have id != 0).
*/
if (mm->context.id == 0)
slice_set_user_psize(mm, mmu_virtual_psize);
#endif
mm->context.id = MMU_NO_CONTEXT;
mm->context.active = 0;
return 0;
@ -428,8 +442,8 @@ void __init mmu_context_init(void)
* -- BenH
*/
if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
first_context = 0;
last_context = 15;
first_context = 1;
last_context = 16;
no_selective_tlbil = true;
} else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
first_context = 1;

View File

@ -73,10 +73,12 @@ static void slice_range_to_mask(unsigned long start, unsigned long len,
unsigned long end = start + len - 1;
ret->low_slices = 0;
bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
if (SLICE_NUM_HIGH)
bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
if (start < SLICE_LOW_TOP) {
unsigned long mend = min(end, (SLICE_LOW_TOP - 1));
unsigned long mend = min(end,
(unsigned long)(SLICE_LOW_TOP - 1));
ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
- (1u << GET_LOW_SLICE_INDEX(start));
@ -113,11 +115,13 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
unsigned long start = slice << SLICE_HIGH_SHIFT;
unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
#ifdef CONFIG_PPC64
/* Hack, so that each addresses is controlled by exactly one
* of the high or low area bitmaps, the first high area starts
* at 4GB, not 0 */
if (start == 0)
start = SLICE_LOW_TOP;
#endif
return !slice_area_is_free(mm, start, end - start);
}
@ -127,7 +131,8 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
unsigned long i;
ret->low_slices = 0;
bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
if (SLICE_NUM_HIGH)
bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
for (i = 0; i < SLICE_NUM_LOW; i++)
if (!slice_low_has_vma(mm, i))
@ -149,7 +154,8 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
u64 lpsizes;
ret->low_slices = 0;
bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
if (SLICE_NUM_HIGH)
bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
lpsizes = mm->context.low_slices_psize;
for (i = 0; i < SLICE_NUM_LOW; i++)
@ -171,6 +177,10 @@ static int slice_check_fit(struct mm_struct *mm,
DECLARE_BITMAP(result, SLICE_NUM_HIGH);
unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.addr_limit);
if (!SLICE_NUM_HIGH)
return (mask.low_slices & available.low_slices) ==
mask.low_slices;
bitmap_and(result, mask.high_slices,
available.high_slices, slice_count);
@ -180,6 +190,7 @@ static int slice_check_fit(struct mm_struct *mm,
static void slice_flush_segments(void *parm)
{
#ifdef CONFIG_PPC64
struct mm_struct *mm = parm;
unsigned long flags;
@ -191,6 +202,7 @@ static void slice_flush_segments(void *parm)
local_irq_save(flags);
slb_flush_and_rebolt();
local_irq_restore(flags);
#endif
}
static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
@ -379,21 +391,21 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src)
{
DECLARE_BITMAP(result, SLICE_NUM_HIGH);
dst->low_slices |= src->low_slices;
bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
if (!SLICE_NUM_HIGH)
return;
bitmap_or(dst->high_slices, dst->high_slices, src->high_slices,
SLICE_NUM_HIGH);
}
static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src)
{
DECLARE_BITMAP(result, SLICE_NUM_HIGH);
dst->low_slices &= ~src->low_slices;
bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
if (!SLICE_NUM_HIGH)
return;
bitmap_andnot(dst->high_slices, dst->high_slices, src->high_slices,
SLICE_NUM_HIGH);
}
#ifdef CONFIG_PPC_64K_PAGES
@ -441,14 +453,17 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
* init different masks
*/
mask.low_slices = 0;
bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);
/* silence stupid warning */;
potential_mask.low_slices = 0;
bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);
compat_mask.low_slices = 0;
bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
if (SLICE_NUM_HIGH) {
bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);
bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);
bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
}
/* Sanity checks */
BUG_ON(mm->task_size == 0);
@ -586,7 +601,9 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
convert:
slice_andnot_mask(&mask, &good_mask);
slice_andnot_mask(&mask, &compat_mask);
if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) {
if (mask.low_slices ||
(SLICE_NUM_HIGH &&
!bitmap_empty(mask.high_slices, SLICE_NUM_HIGH))) {
slice_convert(mm, mask, psize);
if (psize > MMU_PAGE_BASE)
on_each_cpu(slice_flush_segments, mm, 1);

View File

@ -325,6 +325,7 @@ config PPC_BOOK3E_MMU
config PPC_MM_SLICES
bool
default y if PPC_STD_MMU_64
default y if PPC_8xx && HUGETLB_PAGE
default n
config PPC_HAVE_PMU_SUPPORT

View File

@ -32,6 +32,7 @@ CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_CC_STACKPROTECTOR_STRONG=y
CONFIG_REFCOUNT_FULL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
@ -89,8 +90,8 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=y
CONFIG_INET_ESP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_INET_DIAG_DESTROY=y
CONFIG_TCP_CONG_ADVANCED=y
@ -105,6 +106,7 @@ CONFIG_INET6_AH=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_NETLABEL=y
CONFIG_NETFILTER=y
@ -131,6 +133,7 @@ CONFIG_NETFILTER_XT_TARGET_TPROXY=y
CONFIG_NETFILTER_XT_TARGET_TRACE=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
CONFIG_NETFILTER_XT_MATCH_BPF=y
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@ -144,14 +147,17 @@ CONFIG_NETFILTER_XT_MATCH_MAC=y
CONFIG_NETFILTER_XT_MATCH_MARK=y
CONFIG_NETFILTER_XT_MATCH_POLICY=y
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
CONFIG_NETFILTER_XT_MATCH_STRING=y
CONFIG_NETFILTER_XT_MATCH_TIME=y
CONFIG_NETFILTER_XT_MATCH_U32=y
CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_NF_SOCKET_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_MATCH_AH=y
CONFIG_IP_NF_MATCH_ECN=y
@ -169,6 +175,7 @@ CONFIG_IP_NF_ARPTABLES=y
CONFIG_IP_NF_ARPFILTER=y
CONFIG_IP_NF_ARP_MANGLE=y
CONFIG_NF_CONNTRACK_IPV6=y
CONFIG_NF_SOCKET_IPV6=y
CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP6_NF_MATCH_IPV6HEADER=y
CONFIG_IP6_NF_MATCH_RPFILTER=y
@ -263,6 +270,7 @@ CONFIG_TABLET_USB_GTCO=y
CONFIG_TABLET_USB_HANWANG=y
CONFIG_TABLET_USB_KBTAB=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_KEYCHORD=y
CONFIG_INPUT_UINPUT=y
CONFIG_INPUT_GPIO=y
# CONFIG_SERIO_I8042 is not set
@ -303,7 +311,6 @@ CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_HIDRAW=y
CONFIG_UHID=y
# CONFIG_HID_GENERIC is not set
CONFIG_HID_A4TECH=y
CONFIG_HID_ACRUX=y
CONFIG_HID_ACRUX_FF=y
@ -391,6 +398,9 @@ CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@ -423,12 +433,9 @@ CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_PANIC_TIMEOUT=5
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_ENABLE_DEFAULT_TRACERS=y
# CONFIG_KPROBE_EVENTS is not set
# CONFIG_UPROBE_EVENTS is not set
CONFIG_IO_DELAY_NONE=y
CONFIG_DEBUG_BOOT_PARAMS=y
CONFIG_OPTIMIZE_INLINING=y
@ -441,4 +448,5 @@ CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_DEV_VIRTIO=y

View File

@ -94,6 +94,11 @@ static struct smca_bank_name smca_names[] = {
[SMCA_SMU] = { "smu", "System Management Unit" },
};
static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init =
{
[0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
};
const char *smca_get_name(enum smca_bank_types t)
{
if (t >= N_SMCA_BANK_TYPES)
@ -429,6 +434,41 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
wrmsr(MSR_CU_DEF_ERR, low, high);
}
static u32 smca_get_block_address(unsigned int cpu, unsigned int bank,
unsigned int block)
{
u32 low, high;
u32 addr = 0;
if (smca_get_bank_type(bank) == SMCA_RESERVED)
return addr;
if (!block)
return MSR_AMD64_SMCA_MCx_MISC(bank);
/* Check our cache first: */
if (smca_bank_addrs[bank][block] != -1)
return smca_bank_addrs[bank][block];
/*
* For SMCA enabled processors, BLKPTR field of the first MISC register
* (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
*/
if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
goto out;
if (!(low & MCI_CONFIG_MCAX))
goto out;
if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
(low & MASK_BLKPTR_LO))
addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
out:
smca_bank_addrs[bank][block] = addr;
return addr;
}
static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high,
unsigned int bank, unsigned int block)
{
@ -437,44 +477,8 @@ static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 hi
if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
return addr;
/* Get address from already initialized block. */
if (per_cpu(threshold_banks, cpu)) {
struct threshold_bank *bankp = per_cpu(threshold_banks, cpu)[bank];
if (bankp && bankp->blocks) {
struct threshold_block *blockp = &bankp->blocks[block];
if (blockp)
return blockp->address;
}
}
if (mce_flags.smca) {
if (smca_get_bank_type(bank) == SMCA_RESERVED)
return addr;
if (!block) {
addr = MSR_AMD64_SMCA_MCx_MISC(bank);
} else {
/*
* For SMCA enabled processors, BLKPTR field of the
* first MISC register (MCx_MISC0) indicates presence of
* additional MISC register set (MISC1-4).
*/
u32 low, high;
if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
return addr;
if (!(low & MCI_CONFIG_MCAX))
return addr;
if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
(low & MASK_BLKPTR_LO))
addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
}
return addr;
}
if (mce_flags.smca)
return smca_get_block_address(cpu, bank, block);
/* Fall back to method we used for older processors: */
switch (block) {

View File

@ -657,7 +657,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table = ERR_PTR(-EINVAL);
struct sg_table *sg_table;
might_sleep();

View File

@ -176,10 +176,10 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
cz_dpm_powerup_uvd(hwmgr);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_UNGATE);
AMD_CG_STATE_UNGATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
AMD_PG_STATE_UNGATE);
cz_dpm_update_uvd_dpm(hwmgr, false);
}
@ -208,11 +208,11 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_UNGATE);
AMD_CG_STATE_UNGATE);
cgs_set_powergating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
AMD_PG_STATE_UNGATE);
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true);
return 0;

View File

@ -162,7 +162,7 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
AMD_CG_STATE_UNGATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
AMD_PG_STATE_UNGATE);
smu7_update_uvd_dpm(hwmgr, false);
}

View File

@ -1136,6 +1136,7 @@ int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE])
static const u16 psr_setup_time_us[] = {
PSR_SETUP_TIME(330),
PSR_SETUP_TIME(275),
PSR_SETUP_TIME(220),
PSR_SETUP_TIME(165),
PSR_SETUP_TIME(110),
PSR_SETUP_TIME(55),

View File

@ -3378,24 +3378,12 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
return 0;
}
static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms)
{
return wait_for(intel_engine_is_idle(engine), timeout_ms);
}
static int wait_for_engines(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
if (GEM_WARN_ON(wait_for_engine(engine, 50))) {
i915_gem_set_wedged(i915);
return -EIO;
}
GEM_BUG_ON(intel_engine_get_seqno(engine) !=
intel_engine_last_submit(engine));
if (wait_for(intel_engines_are_idle(i915), 50)) {
DRM_ERROR("Failed to idle engines, declaring wedged!\n");
i915_gem_set_wedged(i915);
return -EIO;
}
return 0;
@ -4575,7 +4563,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
if (ret && ret != -EIO)
goto err_unlock;
assert_kernel_context_is_current(dev_priv);
@ -4619,11 +4607,12 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* machine in an unusable condition.
*/
i915_gem_sanitize(dev_priv);
goto out_rpm_put;
intel_runtime_pm_put(dev_priv);
return 0;
err_unlock:
mutex_unlock(&dev->struct_mutex);
out_rpm_put:
intel_runtime_pm_put(dev_priv);
return ret;
}

View File

@ -565,6 +565,36 @@ exit:
return NOTIFY_OK;
}
static int
intel_lvds_connector_register(struct drm_connector *connector)
{
struct intel_lvds_connector *lvds = to_lvds_connector(connector);
int ret;
ret = intel_connector_register(connector);
if (ret)
return ret;
lvds->lid_notifier.notifier_call = intel_lid_notify;
if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
DRM_DEBUG_KMS("lid notifier registration failed\n");
lvds->lid_notifier.notifier_call = NULL;
}
return 0;
}
static void
intel_lvds_connector_unregister(struct drm_connector *connector)
{
struct intel_lvds_connector *lvds = to_lvds_connector(connector);
if (lvds->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&lvds->lid_notifier);
intel_connector_unregister(connector);
}
/**
* intel_lvds_destroy - unregister and free LVDS structures
* @connector: connector to free
@ -577,9 +607,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
struct intel_lvds_connector *lvds_connector =
to_lvds_connector(connector);
if (lvds_connector->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
kfree(lvds_connector->base.edid);
@ -600,8 +627,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.late_register = intel_lvds_connector_register,
.early_unregister = intel_lvds_connector_unregister,
.destroy = intel_lvds_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
@ -818,6 +845,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Radiant P845",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"),
DMI_MATCH(DMI_PRODUCT_NAME, "P845"),
},
},
{ } /* terminating entry */
};
@ -1149,12 +1184,6 @@ out:
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
DRM_DEBUG_KMS("lid notifier registration failed\n");
lvds_connector->lid_notifier.notifier_call = NULL;
}
return;
failed:

View File

@ -741,8 +741,8 @@ err_nomem:
/* Reset the page to write-back before releasing */
set_memory_wb((unsigned long)win->block[i].bdesc, 1);
#endif
dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc,
win->block[i].addr);
dma_free_coherent(msc_dev(msc)->parent->parent, size,
win->block[i].bdesc, win->block[i].addr);
}
kfree(win);
@ -777,7 +777,7 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
/* Reset the page to write-back before releasing */
set_memory_wb((unsigned long)win->block[i].bdesc, 1);
#endif
dma_free_coherent(msc_dev(win->msc), PAGE_SIZE,
dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
win->block[i].bdesc, win->block[i].addr);
}

View File

@ -27,6 +27,7 @@
#include <linux/stm.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include "stm.h"
#include <uapi/linux/stm.h>
@ -685,7 +686,7 @@ static void stm_device_release(struct device *dev)
{
struct stm_device *stm = to_stm_device(dev);
kfree(stm);
vfree(stm);
}
int stm_register_device(struct device *parent, struct stm_data *stm_data,
@ -702,7 +703,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
return -EINVAL;
nmasters = stm_data->sw_end - stm_data->sw_start + 1;
stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL);
stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *));
if (!stm)
return -ENOMEM;
@ -755,7 +756,7 @@ err_device:
/* matches device_initialize() above */
put_device(&stm->dev);
err_free:
kfree(stm);
vfree(stm);
return err;
}

View File

@ -158,6 +158,7 @@ config AT91_SAMA5D2_ADC
tristate "Atmel AT91 SAMA5D2 ADC"
depends on ARCH_AT91 || COMPILE_TEST
depends on HAS_IOMEM
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Atmel SAMA5D2 ADC which is

View File

@ -348,55 +348,6 @@ static const u16 ad7793_sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39,
static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0,
33, 0, 17, 16, 12, 10, 8, 6, 4};
static ssize_t ad7793_read_frequency(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7793_state *st = iio_priv(indio_dev);
return sprintf(buf, "%d\n",
st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]);
}
static ssize_t ad7793_write_frequency(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7793_state *st = iio_priv(indio_dev);
long lval;
int i, ret;
ret = kstrtol(buf, 10, &lval);
if (ret)
return ret;
if (lval == 0)
return -EINVAL;
for (i = 0; i < 16; i++)
if (lval == st->chip_info->sample_freq_avail[i])
break;
if (i == 16)
return -EINVAL;
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
st->mode &= ~AD7793_MODE_RATE(-1);
st->mode |= AD7793_MODE_RATE(i);
ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode);
iio_device_release_direct_mode(indio_dev);
return len;
}
static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
ad7793_read_frequency,
ad7793_write_frequency);
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
"470 242 123 62 50 39 33 19 17 16 12 10 8 6 4");
@ -424,7 +375,6 @@ static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available,
ad7793_show_scale_available, NULL, 0);
static struct attribute *ad7793_attributes[] = {
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_in_m_in_scale_available.dev_attr.attr,
NULL
@ -435,7 +385,6 @@ static const struct attribute_group ad7793_attribute_group = {
};
static struct attribute *ad7797_attributes[] = {
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr,
NULL
};
@ -505,6 +454,10 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
*val -= offset;
}
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
*val = st->chip_info
->sample_freq_avail[AD7793_MODE_RATE(st->mode)];
return IIO_VAL_INT;
}
return -EINVAL;
}
@ -542,6 +495,26 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,
break;
}
break;
case IIO_CHAN_INFO_SAMP_FREQ:
if (!val) {
ret = -EINVAL;
break;
}
for (i = 0; i < 16; i++)
if (val == st->chip_info->sample_freq_avail[i])
break;
if (i == 16) {
ret = -EINVAL;
break;
}
st->mode &= ~AD7793_MODE_RATE(-1);
st->mode |= AD7793_MODE_RATE(i);
ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode),
st->mode);
break;
default:
ret = -EINVAL;
}

View File

@ -587,7 +587,7 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
* Should be used as the set_length callback for iio_buffer_access_ops
* struct for DMA buffers.
*/
int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length)
int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
{
/* Avoid an invalid state */
if (length < 2)

View File

@ -22,11 +22,18 @@ struct iio_kfifo {
#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
int bytes_per_datum, int length)
size_t bytes_per_datum, unsigned int length)
{
if ((length == 0) || (bytes_per_datum == 0))
return -EINVAL;
/*
* Make sure we don't overflow an unsigned int after kfifo rounds up to
* the next power of 2.
*/
if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum)
return -EINVAL;
return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
bytes_per_datum, GFP_KERNEL);
}
@ -67,7 +74,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
return 0;
}
static int iio_set_length_kfifo(struct iio_buffer *r, int length)
static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length)
{
/* Avoid an invalid state */
if (length < 2)

View File

@ -434,7 +434,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
return -EINVAL;
if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
return -EAGAIN;
return -EINVAL;
memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
if (attr) {

View File

@ -130,7 +130,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client,
bool max_baseline, u8 *value)
{
int error;
u8 val[3];
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
max_baseline ?
@ -149,7 +149,7 @@ static int elan_smbus_get_version(struct i2c_client *client,
bool iap, u8 *version)
{
int error;
u8 val[3];
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
iap ? ETP_SMBUS_IAP_VERSION_CMD :
@ -170,7 +170,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
u8 *clickpad)
{
int error;
u8 val[3];
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
ETP_SMBUS_SM_VERSION_CMD, val);
@ -188,7 +188,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
{
int error;
u8 val[3];
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
ETP_SMBUS_UNIQUEID_CMD, val);
@ -205,7 +205,7 @@ static int elan_smbus_get_checksum(struct i2c_client *client,
bool iap, u16 *csum)
{
int error;
u8 val[3];
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
iap ? ETP_SMBUS_FW_CHECKSUM_CMD :
@ -226,7 +226,7 @@ static int elan_smbus_get_max(struct i2c_client *client,
{
int ret;
int error;
u8 val[3];
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
if (ret != 3) {
@ -246,7 +246,7 @@ static int elan_smbus_get_resolution(struct i2c_client *client,
{
int ret;
int error;
u8 val[3];
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val);
if (ret != 3) {
@ -267,7 +267,7 @@ static int elan_smbus_get_num_traces(struct i2c_client *client,
{
int ret;
int error;
u8 val[3];
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val);
if (ret != 3) {
@ -294,7 +294,7 @@ static int elan_smbus_iap_get_mode(struct i2c_client *client,
{
int error;
u16 constant;
u8 val[3];
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val);
if (error < 0) {
@ -345,7 +345,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
int len;
int error;
enum tp_mode mode;
u8 val[3];
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06};
u16 password;
@ -419,7 +419,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
struct device *dev = &client->dev;
int error;
u16 result;
u8 val[3];
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
/*
* Due to the limitation of smbus protocol limiting

View File

@ -172,6 +172,12 @@ static const char * const smbus_pnp_ids[] = {
"LEN0048", /* X1 Carbon 3 */
"LEN0046", /* X250 */
"LEN004a", /* W541 */
"LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */
"LEN0092", /* X1 Carbon 6 */
"LEN0096", /* X280 */
"LEN0097", /* X280 -> ALPS trackpoint */
"LEN200f", /* T450s */
NULL
};

View File

@ -372,16 +372,15 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
/*
* Determine IFS values
* - Use TXOP_BACKOFF for probe and management frames except beacons
* - Use TXOP_BACKOFF for management frames except beacons
* - Use TXOP_SIFS for fragment bursts
* - Use TXOP_HTTXOP for everything else
*
* Note: rt2800 devices won't use CTS protection (if used)
* for frames not transmitted with TXOP_HTTXOP
*/
if ((ieee80211_is_mgmt(hdr->frame_control) &&
!ieee80211_is_beacon(hdr->frame_control)) ||
(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
if (ieee80211_is_mgmt(hdr->frame_control) &&
!ieee80211_is_beacon(hdr->frame_control))
txdesc->u.ht.txop = TXOP_BACKOFF;
else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
txdesc->u.ht.txop = TXOP_SIFS;

View File

@ -299,9 +299,6 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
writeVal = 0x00000000;
if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
writeVal = writeVal - 0x06060606;
else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
TXHIGHPWRLEVEL_BT2)
writeVal = writeVal;
*(p_outwriteval + rf) = writeVal;
}
}

View File

@ -531,6 +531,8 @@ struct hv_pci_compl {
s32 completion_status;
};
static void hv_pci_onchannelcallback(void *context);
/**
* hv_pci_generic_compl() - Invoked for a completion packet
* @context: Set up by the sender of the packet.
@ -675,6 +677,31 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
}
}
static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
{
u16 ret;
unsigned long flags;
void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET +
PCI_VENDOR_ID;
spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
/* Choose the function to be read. (See comment above) */
writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
/* Make sure the function was chosen before we start reading. */
mb();
/* Read from that function's config space. */
ret = readw(addr);
/*
* mb() is not required here, because the spin_unlock_irqrestore()
* is a barrier.
*/
spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
return ret;
}
/**
* _hv_pcifront_write_config() - Internal PCI config write
* @hpdev: The PCI driver's representation of the device
@ -1121,8 +1148,37 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
* Since this function is called with IRQ locks held, can't
* do normal wait for completion; instead poll.
*/
while (!try_wait_for_completion(&comp.comp_pkt.host_event))
while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
/* 0xFFFF means an invalid PCI VENDOR ID. */
if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
dev_err_once(&hbus->hdev->device,
"the device has gone\n");
goto free_int_desc;
}
/*
* When the higher level interrupt code calls us with
* interrupt disabled, we must poll the channel by calling
* the channel callback directly when channel->target_cpu is
* the current CPU. When the higher level interrupt code
* calls us with interrupt enabled, let's add the
* local_bh_disable()/enable() to avoid race.
*/
local_bh_disable();
if (hbus->hdev->channel->target_cpu == smp_processor_id())
hv_pci_onchannelcallback(hbus);
local_bh_enable();
if (hpdev->state == hv_pcichild_ejecting) {
dev_err_once(&hbus->hdev->device,
"the device is being ejected\n");
goto free_int_desc;
}
udelay(100);
}
if (comp.comp_pkt.completion_status < 0) {
dev_err(&hbus->hdev->device,

View File

@ -1329,7 +1329,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
return -EINVAL;
chip = &pctrl->chip;
chip->base = -1;
chip->base = 0;
chip->ngpio = ngpio;
chip->label = dev_name(pctrl->dev);
chip->parent = pctrl->dev;

View File

@ -54,7 +54,6 @@ static int ec_response_timed_out(void)
static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
struct cros_ec_command *msg)
{
struct ec_host_request *request;
struct ec_host_response response;
u8 sum;
int ret = 0;
@ -65,8 +64,6 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
/* Write buffer */
cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout);
request = (struct ec_host_request *)ec->dout;
/* Here we go */
sum = EC_COMMAND_PROTOCOL_3;
cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_CMD, 1, &sum);

View File

@ -51,6 +51,8 @@ struct srp_internal {
struct transport_container rport_attr_cont;
};
static int scsi_is_srp_rport(const struct device *dev);
#define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
#define dev_to_rport(d) container_of(d, struct srp_rport, dev)
@ -60,9 +62,24 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
return dev_to_shost(r->dev.parent);
}
static int find_child_rport(struct device *dev, void *data)
{
struct device **child = data;
if (scsi_is_srp_rport(dev)) {
WARN_ON_ONCE(*child);
*child = dev;
}
return 0;
}
static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
{
return transport_class_to_srp_rport(&shost->shost_gendev);
struct device *child = NULL;
WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child,
find_child_rport) < 0);
return child ? dev_to_rport(child) : NULL;
}
/**
@ -600,7 +617,8 @@ enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
struct srp_rport *rport = shost_to_rport(shost);
pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 &&
return rport && rport->fast_io_fail_tmo < 0 &&
rport->dev_loss_tmo < 0 &&
i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
}

View File

@ -30,7 +30,6 @@ struct xway_gphy_priv {
struct clk *gphy_clk_gate;
struct reset_control *gphy_reset;
struct reset_control *gphy_reset2;
struct notifier_block gphy_reboot_nb;
void __iomem *membase;
char *fw_name;
};
@ -64,24 +63,6 @@ static const struct of_device_id xway_gphy_match[] = {
};
MODULE_DEVICE_TABLE(of, xway_gphy_match);
static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb)
{
return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb);
}
static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb,
unsigned long code, void *unused)
{
struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb);
if (priv) {
reset_control_assert(priv->gphy_reset);
reset_control_assert(priv->gphy_reset2);
}
return NOTIFY_DONE;
}
static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv,
dma_addr_t *dev_addr)
{
@ -205,14 +186,6 @@ static int xway_gphy_probe(struct platform_device *pdev)
reset_control_deassert(priv->gphy_reset);
reset_control_deassert(priv->gphy_reset2);
/* assert the gphy reset because it can hang after a reboot: */
priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify;
priv->gphy_reboot_nb.priority = -1;
ret = register_reboot_notifier(&priv->gphy_reboot_nb);
if (ret)
dev_warn(dev, "Failed to register reboot notifier\n");
platform_set_drvdata(pdev, priv);
return ret;
@ -220,21 +193,12 @@ static int xway_gphy_probe(struct platform_device *pdev)
static int xway_gphy_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct xway_gphy_priv *priv = platform_get_drvdata(pdev);
int ret;
reset_control_assert(priv->gphy_reset);
reset_control_assert(priv->gphy_reset2);
iowrite32be(0, priv->membase);
clk_disable_unprepare(priv->gphy_clk_gate);
ret = unregister_reboot_notifier(&priv->gphy_reboot_nb);
if (ret)
dev_warn(dev, "Failed to unregister reboot notifier\n");
return 0;
}

View File

@ -643,9 +643,8 @@ static void free_ioctx_users(struct percpu_ref *ref)
while (!list_empty(&ctx->active_reqs)) {
req = list_first_entry(&ctx->active_reqs,
struct aio_kiocb, ki_list);
list_del_init(&req->ki_list);
kiocb_cancel(req);
list_del_init(&req->ki_list);
}
spin_unlock_irq(&ctx->ctx_lock);

View File

@ -176,15 +176,12 @@ enum {
#define CP_DISCARD 0x00000010
#define CP_TRIMMED 0x00000020
#define DEF_BATCHED_TRIM_SECTIONS 2048
#define BATCHED_TRIM_SEGMENTS(sbi) \
(GET_SEG_FROM_SEC(sbi, SM_I(sbi)->trim_sections))
#define BATCHED_TRIM_BLOCKS(sbi) \
(BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
#define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
#define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
#define DEF_MAX_DISCARD_LEN 512 /* Max. 2MB per discard */
#define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */
#define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */
#define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */
#define DEF_CP_INTERVAL 60 /* 60 secs */
#define DEF_IDLE_INTERVAL 5 /* 5 secs */
@ -694,7 +691,8 @@ static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
static inline bool __is_discard_mergeable(struct discard_info *back,
struct discard_info *front)
{
return back->lstart + back->len == front->lstart;
return (back->lstart + back->len == front->lstart) &&
(back->len + front->len < DEF_MAX_DISCARD_LEN);
}
static inline bool __is_discard_back_mergeable(struct discard_info *cur,
@ -1080,6 +1078,7 @@ enum {
enum fsync_mode {
FSYNC_MODE_POSIX, /* fsync follows posix semantics */
FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */
FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */
};
#ifdef CONFIG_F2FS_FS_ENCRYPTION
@ -2774,8 +2773,6 @@ int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
void init_discard_policy(struct discard_policy *dpolicy, int discard_type,
unsigned int granularity);
void drop_discard_cmd(struct f2fs_sb_info *sbi);
void stop_discard_thread(struct f2fs_sb_info *sbi);
bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);

View File

@ -306,7 +306,7 @@ sync_nodes:
remove_ino_entry(sbi, ino, APPEND_INO);
clear_inode_flag(inode, FI_APPEND_WRITE);
flush_out:
if (!atomic)
if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
ret = f2fs_issue_flush(sbi, inode->i_ino);
if (!ret) {
remove_ino_entry(sbi, ino, UPDATE_INO);

View File

@ -915,6 +915,39 @@ static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
#endif
}
static void __init_discard_policy(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy,
int discard_type, unsigned int granularity)
{
/* common policy */
dpolicy->type = discard_type;
dpolicy->sync = true;
dpolicy->granularity = granularity;
dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
dpolicy->io_aware_gran = MAX_PLIST_NUM;
if (discard_type == DPOLICY_BG) {
dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
dpolicy->io_aware = true;
dpolicy->sync = false;
if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
dpolicy->granularity = 1;
dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME;
}
} else if (discard_type == DPOLICY_FORCE) {
dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
dpolicy->io_aware = false;
} else if (discard_type == DPOLICY_FSTRIM) {
dpolicy->io_aware = false;
} else if (discard_type == DPOLICY_UMOUNT) {
dpolicy->io_aware = false;
}
}
/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy,
@ -1130,68 +1163,6 @@ static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
return 0;
}
static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy,
unsigned int start, unsigned int end)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
struct rb_node **insert_p = NULL, *insert_parent = NULL;
struct discard_cmd *dc;
struct blk_plug plug;
int issued;
next:
issued = 0;
mutex_lock(&dcc->cmd_lock);
f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
NULL, start,
(struct rb_entry **)&prev_dc,
(struct rb_entry **)&next_dc,
&insert_p, &insert_parent, true);
if (!dc)
dc = next_dc;
blk_start_plug(&plug);
while (dc && dc->lstart <= end) {
struct rb_node *node;
if (dc->len < dpolicy->granularity)
goto skip;
if (dc->state != D_PREP) {
list_move_tail(&dc->list, &dcc->fstrim_list);
goto skip;
}
__submit_discard_cmd(sbi, dpolicy, dc);
if (++issued >= dpolicy->max_requests) {
start = dc->lstart + dc->len;
blk_finish_plug(&plug);
mutex_unlock(&dcc->cmd_lock);
schedule();
goto next;
}
skip:
node = rb_next(&dc->rb_node);
dc = rb_entry_safe(node, struct discard_cmd, rb_node);
if (fatal_signal_pending(current))
break;
}
blk_finish_plug(&plug);
mutex_unlock(&dcc->cmd_lock);
}
static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy)
{
@ -1332,7 +1303,18 @@ next:
static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy)
{
__wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
struct discard_policy dp;
if (dpolicy) {
__wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
return;
}
/* wait all */
__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
__wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
__wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
}
/* This should be covered by global mutex, &sit_i->sentry_lock */
@ -1377,11 +1359,13 @@ bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
struct discard_policy dpolicy;
bool dropped;
init_discard_policy(&dpolicy, DPOLICY_UMOUNT, dcc->discard_granularity);
__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
dcc->discard_granularity);
__issue_discard_cmd(sbi, &dpolicy);
dropped = __drop_discard_cmd(sbi);
__wait_all_discard_cmd(sbi, &dpolicy);
/* just to make sure there is no pending discard commands */
__wait_all_discard_cmd(sbi, NULL);
return dropped;
}
@ -1397,7 +1381,7 @@ static int issue_discard_thread(void *data)
set_freezable();
do {
init_discard_policy(&dpolicy, DPOLICY_BG,
__init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
dcc->discard_granularity);
wait_event_interruptible_timeout(*q,
@ -1415,7 +1399,7 @@ static int issue_discard_thread(void *data)
dcc->discard_wake = 0;
if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
init_discard_policy(&dpolicy, DPOLICY_FORCE, 1);
__init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
sb_start_intwrite(sbi->sb);
@ -1708,32 +1692,6 @@ skip:
wake_up_discard_thread(sbi, false);
}
void init_discard_policy(struct discard_policy *dpolicy,
int discard_type, unsigned int granularity)
{
/* common policy */
dpolicy->type = discard_type;
dpolicy->sync = true;
dpolicy->granularity = granularity;
dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
dpolicy->io_aware_gran = MAX_PLIST_NUM;
if (discard_type == DPOLICY_BG) {
dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
dpolicy->io_aware = true;
} else if (discard_type == DPOLICY_FORCE) {
dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
dpolicy->io_aware = false;
} else if (discard_type == DPOLICY_FSTRIM) {
dpolicy->io_aware = false;
} else if (discard_type == DPOLICY_UMOUNT) {
dpolicy->io_aware = false;
}
}
static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
{
dev_t dev = sbi->sb->s_bdev->bd_dev;
@ -2373,11 +2331,72 @@ bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
return has_candidate;
}
static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy,
unsigned int start, unsigned int end)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
struct rb_node **insert_p = NULL, *insert_parent = NULL;
struct discard_cmd *dc;
struct blk_plug plug;
int issued;
next:
issued = 0;
mutex_lock(&dcc->cmd_lock);
f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
NULL, start,
(struct rb_entry **)&prev_dc,
(struct rb_entry **)&next_dc,
&insert_p, &insert_parent, true);
if (!dc)
dc = next_dc;
blk_start_plug(&plug);
while (dc && dc->lstart <= end) {
struct rb_node *node;
if (dc->len < dpolicy->granularity)
goto skip;
if (dc->state != D_PREP) {
list_move_tail(&dc->list, &dcc->fstrim_list);
goto skip;
}
__submit_discard_cmd(sbi, dpolicy, dc);
if (++issued >= dpolicy->max_requests) {
start = dc->lstart + dc->len;
blk_finish_plug(&plug);
mutex_unlock(&dcc->cmd_lock);
__wait_all_discard_cmd(sbi, NULL);
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto next;
}
skip:
node = rb_next(&dc->rb_node);
dc = rb_entry_safe(node, struct discard_cmd, rb_node);
if (fatal_signal_pending(current))
break;
}
blk_finish_plug(&plug);
mutex_unlock(&dcc->cmd_lock);
}
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
{
__u64 start = F2FS_BYTES_TO_BLK(range->start);
__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
unsigned int start_segno, end_segno, cur_segno;
unsigned int start_segno, end_segno;
block_t start_block, end_block;
struct cp_control cpc;
struct discard_policy dpolicy;
@ -2403,40 +2422,27 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
cpc.reason = CP_DISCARD;
cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
cpc.trim_start = start_segno;
cpc.trim_end = end_segno;
/* do checkpoint to issue discard commands safely */
for (cur_segno = start_segno; cur_segno <= end_segno;
cur_segno = cpc.trim_end + 1) {
cpc.trim_start = cur_segno;
if (sbi->discard_blks == 0)
goto out;
if (sbi->discard_blks == 0)
break;
else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
cpc.trim_end = end_segno;
else
cpc.trim_end = min_t(unsigned int,
rounddown(cur_segno +
BATCHED_TRIM_SEGMENTS(sbi),
sbi->segs_per_sec) - 1, end_segno);
mutex_lock(&sbi->gc_mutex);
err = write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex);
if (err)
break;
schedule();
}
mutex_lock(&sbi->gc_mutex);
err = write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex);
if (err)
goto out;
start_block = START_BLOCK(sbi, start_segno);
end_block = START_BLOCK(sbi, min(cur_segno, end_segno) + 1);
end_block = START_BLOCK(sbi, end_segno + 1);
init_discard_policy(&dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
__issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block);
trimmed = __wait_discard_cmd_range(sbi, &dpolicy,
start_block, end_block);
out:
range->len = F2FS_BLK_TO_BYTES(trimmed);
out:
return err;
}
@ -3823,8 +3829,6 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
sm_info->min_ssr_sections = reserved_sections(sbi);
sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
INIT_LIST_HEAD(&sm_info->sit_entry_set);
init_rwsem(&sm_info->curseg_lock);

View File

@ -740,6 +740,10 @@ static int parse_options(struct super_block *sb, char *options)
} else if (strlen(name) == 6 &&
!strncmp(name, "strict", 6)) {
F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
} else if (strlen(name) == 9 &&
!strncmp(name, "nobarrier", 9)) {
F2FS_OPTION(sbi).fsync_mode =
FSYNC_MODE_NOBARRIER;
} else {
kfree(name);
return -EINVAL;

View File

@ -245,6 +245,9 @@ out:
return count;
}
if (!strcmp(a->attr.name, "trim_sections"))
return -EINVAL;
*ui = t;
if (!strcmp(a->attr.name, "iostat_enable") && *ui == 0)

View File

@ -52,6 +52,23 @@ STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
/*
* Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
* the beginning of the block for a proper header with the location information
* and CRC.
*/
unsigned int
xfs_agfl_size(
struct xfs_mount *mp)
{
unsigned int size = mp->m_sb.sb_sectsize;
if (xfs_sb_version_hascrc(&mp->m_sb))
size -= sizeof(struct xfs_agfl);
return size / sizeof(xfs_agblock_t);
}
unsigned int
xfs_refc_block(
struct xfs_mount *mp)
@ -540,7 +557,7 @@ xfs_agfl_verify(
if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
return false;
for (i = 0; i < XFS_AGFL_SIZE(mp); i++) {
for (i = 0; i < xfs_agfl_size(mp); i++) {
if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
return false;
@ -2039,6 +2056,93 @@ xfs_alloc_space_available(
return true;
}
/*
* Check the agfl fields of the agf for inconsistency or corruption. The purpose
* is to detect an agfl header padding mismatch between current and early v5
* kernels. This problem manifests as a 1-slot size difference between the
* on-disk flcount and the active [first, last] range of a wrapped agfl. This
* may also catch variants of agfl count corruption unrelated to padding. Either
* way, we'll reset the agfl and warn the user.
*
* Return true if a reset is required before the agfl can be used, false
* otherwise.
*/
static bool
xfs_agfl_needs_reset(
struct xfs_mount *mp,
struct xfs_agf *agf)
{
uint32_t f = be32_to_cpu(agf->agf_flfirst);
uint32_t l = be32_to_cpu(agf->agf_fllast);
uint32_t c = be32_to_cpu(agf->agf_flcount);
int agfl_size = xfs_agfl_size(mp);
int active;
/* no agfl header on v4 supers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
return false;
/*
* The agf read verifier catches severe corruption of these fields.
* Repeat some sanity checks to cover a packed -> unpacked mismatch if
* the verifier allows it.
*/
if (f >= agfl_size || l >= agfl_size)
return true;
if (c > agfl_size)
return true;
/*
* Check consistency between the on-disk count and the active range. An
* agfl padding mismatch manifests as an inconsistent flcount.
*/
if (c && l >= f)
active = l - f + 1;
else if (c)
active = agfl_size - f + l + 1;
else
active = 0;
return active != c;
}
/*
* Reset the agfl to an empty state. Ignore/drop any existing blocks since the
* agfl content cannot be trusted. Warn the user that a repair is required to
* recover leaked blocks.
*
* The purpose of this mechanism is to handle filesystems affected by the agfl
* header padding mismatch problem. A reset keeps the filesystem online with a
* relatively minor free space accounting inconsistency rather than suffer the
* inevitable crash from use of an invalid agfl block.
*/
static void
xfs_agfl_reset(
struct xfs_trans *tp,
struct xfs_buf *agbp,
struct xfs_perag *pag)
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
ASSERT(pag->pagf_agflreset);
trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
xfs_warn(mp,
"WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
"Please unmount and run xfs_repair.",
pag->pag_agno, pag->pagf_flcount);
agf->agf_flfirst = 0;
agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
agf->agf_flcount = 0;
xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
XFS_AGF_FLCOUNT);
pag->pagf_flcount = 0;
pag->pagf_agflreset = false;
}
/*
* Decide whether to use this allocation group for this allocation.
* If so, fix up the btree freelist's size.
@ -2100,6 +2204,10 @@ xfs_alloc_fix_freelist(
}
}
/* reset a padding mismatched agfl before final free space check */
if (pag->pagf_agflreset)
xfs_agfl_reset(tp, agbp, pag);
/* If there isn't enough total space or single-extent, reject it. */
need = xfs_alloc_min_freelist(mp, pag);
if (!xfs_alloc_space_available(args, need, flags))
@ -2252,10 +2360,11 @@ xfs_alloc_get_freelist(
bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
be32_add_cpu(&agf->agf_flfirst, 1);
xfs_trans_brelse(tp, agflbp);
if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
agf->agf_flfirst = 0;
pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
ASSERT(!pag->pagf_agflreset);
be32_add_cpu(&agf->agf_flcount, -1);
xfs_trans_agflist_delta(tp, -1);
pag->pagf_flcount--;
@ -2363,10 +2472,11 @@ xfs_alloc_put_freelist(
be32_to_cpu(agf->agf_seqno), &agflbp)))
return error;
be32_add_cpu(&agf->agf_fllast, 1);
if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
agf->agf_fllast = 0;
pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
ASSERT(!pag->pagf_agflreset);
be32_add_cpu(&agf->agf_flcount, 1);
xfs_trans_agflist_delta(tp, 1);
pag->pagf_flcount++;
@ -2381,7 +2491,7 @@ xfs_alloc_put_freelist(
xfs_alloc_log_agf(tp, agbp, logflags);
ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
@ -2414,9 +2524,9 @@ xfs_agf_verify(
if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
return false;
if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
@ -2572,6 +2682,7 @@ xfs_alloc_read_agf(
pag->pagb_count = 0;
pag->pagb_tree = RB_ROOT;
pag->pagf_init = 1;
pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
}
#ifdef DEBUG
else if (!XFS_FORCED_SHUTDOWN(mp)) {

View File

@ -26,6 +26,8 @@ struct xfs_trans;
extern struct workqueue_struct *xfs_alloc_wq;
unsigned int xfs_agfl_size(struct xfs_mount *mp);
/*
* Freespace allocation types. Argument to xfs_alloc_[v]extent.
*/

View File

@ -798,24 +798,13 @@ typedef struct xfs_agi {
&(XFS_BUF_TO_AGFL(bp)->agfl_bno[0]) : \
(__be32 *)(bp)->b_addr)
/*
* Size of the AGFL. For CRC-enabled filesystes we steal a couple of
* slots in the beginning of the block for a proper header with the
* location information and CRC.
*/
#define XFS_AGFL_SIZE(mp) \
(((mp)->m_sb.sb_sectsize - \
(xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
sizeof(struct xfs_agfl) : 0)) / \
sizeof(xfs_agblock_t))
typedef struct xfs_agfl {
__be32 agfl_magicnum;
__be32 agfl_seqno;
uuid_t agfl_uuid;
__be64 agfl_lsn;
__be32 agfl_crc;
__be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */
__be32 agfl_bno[]; /* actually xfs_agfl_size(mp) */
} __attribute__((packed)) xfs_agfl_t;
#define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)

View File

@ -294,7 +294,7 @@ xfs_growfs_data_private(
}
agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
error = xfs_bwrite(bp);

View File

@ -353,6 +353,7 @@ typedef struct xfs_perag {
char pagi_inodeok; /* The agi is ok for inodes */
uint8_t pagf_levels[XFS_BTNUM_AGF];
/* # of levels in bno & cnt btree */
bool pagf_agflreset; /* agfl requires reset before use */
uint32_t pagf_flcount; /* count of blocks in freelist */
xfs_extlen_t pagf_freeblks; /* total free blocks */
xfs_extlen_t pagf_longest; /* longest free space */

View File

@ -1513,7 +1513,7 @@ TRACE_EVENT(xfs_extent_busy_trim,
__entry->tlen)
);
TRACE_EVENT(xfs_agf,
DECLARE_EVENT_CLASS(xfs_agf_class,
TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
unsigned long caller_ip),
TP_ARGS(mp, agf, flags, caller_ip),
@ -1569,6 +1569,13 @@ TRACE_EVENT(xfs_agf,
__entry->longest,
(void *)__entry->caller_ip)
);
#define DEFINE_AGF_EVENT(name) \
DEFINE_EVENT(xfs_agf_class, name, \
TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, \
unsigned long caller_ip), \
TP_ARGS(mp, agf, flags, caller_ip))
DEFINE_AGF_EVENT(xfs_agf);
DEFINE_AGF_EVENT(xfs_agfl_reset);
TRACE_EVENT(xfs_free_extent,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,

View File

@ -53,7 +53,7 @@ struct iio_buffer_access_funcs {
int (*request_update)(struct iio_buffer *buffer);
int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
int (*set_length)(struct iio_buffer *buffer, int length);
int (*set_length)(struct iio_buffer *buffer, unsigned int length);
int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
@ -72,10 +72,10 @@ struct iio_buffer_access_funcs {
*/
struct iio_buffer {
/** @length: Number of datums in buffer. */
int length;
unsigned int length;
/** @bytes_per_datum: Size of individual datum including timestamp. */
int bytes_per_datum;
size_t bytes_per_datum;
/**
* @access: Buffer access functions associated with the

View File

@ -334,7 +334,7 @@ struct tcp_sock {
/* Receiver queue space */
struct {
int space;
u32 space;
u32 seq;
u64 time;
} rcvq_space;

View File

@ -2617,7 +2617,7 @@ enum nl80211_attrs {
#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
#define NL80211_WIPHY_NAME_MAXLEN 128
#define NL80211_WIPHY_NAME_MAXLEN 64
#define NL80211_MAX_SUPP_RATES 32
#define NL80211_MAX_SUPP_HT_RATES 77

View File

@ -105,6 +105,7 @@ extern char core_pattern[];
extern unsigned int core_pipe_limit;
#endif
extern int pid_max;
extern int extra_free_kbytes;
extern int pid_max_min, pid_max_max;
extern int percpu_pagelist_fraction;
extern int latencytop_enabled;
@ -1570,6 +1571,14 @@ static struct ctl_table vm_table[] = {
.extra1 = &one,
.extra2 = &one_thousand,
},
{
.procname = "extra_free_kbytes",
.data = &extra_free_kbytes,
.maxlen = sizeof(extra_free_kbytes),
.mode = 0644,
.proc_handler = min_free_kbytes_sysctl_handler,
.extra1 = &zero,
},
{
.procname = "percpu_pagelist_fraction",
.data = &percpu_pagelist_fraction,

View File

@ -895,7 +895,7 @@ int __trace_bputs(unsigned long ip, const char *str)
EXPORT_SYMBOL_GPL(__trace_bputs);
#ifdef CONFIG_TRACER_SNAPSHOT
static void tracing_snapshot_instance(struct trace_array *tr)
void tracing_snapshot_instance(struct trace_array *tr)
{
struct tracer *tracer = tr->current_trace;
unsigned long flags;
@ -951,7 +951,7 @@ static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
struct trace_buffer *size_buf, int cpu_id);
static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
static int alloc_snapshot(struct trace_array *tr)
int tracing_alloc_snapshot_instance(struct trace_array *tr)
{
int ret;
@ -997,7 +997,7 @@ int tracing_alloc_snapshot(void)
struct trace_array *tr = &global_trace;
int ret;
ret = alloc_snapshot(tr);
ret = tracing_alloc_snapshot_instance(tr);
WARN_ON(ret < 0);
return ret;
@ -5405,7 +5405,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
#ifdef CONFIG_TRACER_MAX_TRACE
if (t->use_max_tr && !had_max_tr) {
ret = alloc_snapshot(tr);
ret = tracing_alloc_snapshot_instance(tr);
if (ret < 0)
goto out;
}
@ -6386,7 +6386,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
}
#endif
if (!tr->allocated_snapshot) {
ret = alloc_snapshot(tr);
ret = tracing_alloc_snapshot_instance(tr);
if (ret < 0)
break;
}
@ -7107,7 +7107,7 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
return ret;
out_reg:
ret = alloc_snapshot(tr);
ret = tracing_alloc_snapshot_instance(tr);
if (ret < 0)
goto out;

View File

@ -1813,6 +1813,17 @@ static inline void __init trace_event_init(void) { }
static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
#endif
#ifdef CONFIG_TRACER_SNAPSHOT
void tracing_snapshot_instance(struct trace_array *tr);
int tracing_alloc_snapshot_instance(struct trace_array *tr);
#else
static inline void tracing_snapshot_instance(struct trace_array *tr) { }
static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
{
return 0;
}
#endif
extern struct trace_iterator *tracepoint_print_iter;
#endif /* _LINUX_KERNEL_TRACE_H */

View File

@ -482,9 +482,10 @@ clear_event_triggers(struct trace_array *tr)
struct trace_event_file *file;
list_for_each_entry(file, &tr->events, list) {
struct event_trigger_data *data;
list_for_each_entry_rcu(data, &file->triggers, list) {
struct event_trigger_data *data, *n;
list_for_each_entry_safe(data, n, &file->triggers, list) {
trace_event_trigger_enable_disable(file, 0);
list_del_rcu(&data->list);
if (data->ops->free)
data->ops->free(data->ops, data);
}
@ -641,6 +642,7 @@ event_trigger_callback(struct event_command *cmd_ops,
trigger_data->count = -1;
trigger_data->ops = trigger_ops;
trigger_data->cmd_ops = cmd_ops;
trigger_data->private_data = file;
INIT_LIST_HEAD(&trigger_data->list);
INIT_LIST_HEAD(&trigger_data->named_list);
@ -1041,7 +1043,12 @@ static struct event_command trigger_traceoff_cmd = {
static void
snapshot_trigger(struct event_trigger_data *data, void *rec)
{
tracing_snapshot();
struct trace_event_file *file = data->private_data;
if (file)
tracing_snapshot_instance(file->tr);
else
tracing_snapshot();
}
static void
@ -1063,7 +1070,7 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
{
int ret = register_trigger(glob, ops, data, file);
if (ret > 0 && tracing_alloc_snapshot() != 0) {
if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
unregister_trigger(glob, ops, data, file);
ret = 0;
}

View File

@ -2388,7 +2388,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
__split_huge_page_tail(head, i, lruvec, list);
/* Some pages can be beyond i_size: drop them from page cache */
if (head[i].index >= end) {
__ClearPageDirty(head + i);
ClearPageDirty(head + i);
__delete_from_page_cache(head + i, NULL);
if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
shmem_uncharge(head->mapping->host, 1);

View File

@ -258,10 +258,22 @@ compound_page_dtor * const compound_page_dtors[] = {
#endif
};
/*
* Try to keep at least this much lowmem free. Do not allow normal
* allocations below this point, only high priority ones. Automatically
* tuned according to the amount of memory in the system.
*/
int min_free_kbytes = 1024;
int user_min_free_kbytes = -1;
int watermark_scale_factor = 10;
/*
* Extra memory for the system to try freeing. Used to temporarily
* free memory, to make space for new workloads. Anyone can allocate
* down to the min watermarks controlled by min_free_kbytes above.
*/
int extra_free_kbytes = 0;
static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
static unsigned long __meminitdata dma_reserve;
@ -6899,6 +6911,7 @@ static void setup_per_zone_lowmem_reserve(void)
static void __setup_per_zone_wmarks(void)
{
unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
unsigned long pages_low = extra_free_kbytes >> (PAGE_SHIFT - 10);
unsigned long lowmem_pages = 0;
struct zone *zone;
unsigned long flags;
@ -6910,11 +6923,14 @@ static void __setup_per_zone_wmarks(void)
}
for_each_zone(zone) {
u64 tmp;
u64 min, low;
spin_lock_irqsave(&zone->lock, flags);
tmp = (u64)pages_min * zone->managed_pages;
do_div(tmp, lowmem_pages);
min = (u64)pages_min * zone->managed_pages;
do_div(min, lowmem_pages);
low = (u64)pages_low * zone->managed_pages;
do_div(low, vm_total_pages);
if (is_highmem(zone)) {
/*
* __GFP_HIGH and PF_MEMALLOC allocations usually don't
@ -6935,7 +6951,7 @@ static void __setup_per_zone_wmarks(void)
* If it's a lowmem zone, reserve a number of pages
* proportionate to the zone's size.
*/
zone->watermark[WMARK_MIN] = tmp;
zone->watermark[WMARK_MIN] = min;
}
/*
@ -6943,12 +6959,14 @@ static void __setup_per_zone_wmarks(void)
* scale factor in proportion to available memory, but
* ensure a minimum size on small systems.
*/
tmp = max_t(u64, tmp >> 2,
min = max_t(u64, min >> 2,
mult_frac(zone->managed_pages,
watermark_scale_factor, 10000));
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) +
low + min;
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) +
low + min * 2;
spin_unlock_irqrestore(&zone->lock, flags);
}
@ -7031,7 +7049,7 @@ core_initcall(init_per_zone_wmark_min)
/*
* min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
* that we can call two helper functions whenever min_free_kbytes
* changes.
* or extra_free_kbytes changes.
*/
int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)

View File

@ -1451,7 +1451,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
return ret;
mapping = page_mapping(page);
migrate_dirty = mapping && mapping->a_ops->migratepage;
migrate_dirty = !mapping || mapping->a_ops->migratepage;
unlock_page(page);
if (!migrate_dirty)
return ret;

View File

@ -592,8 +592,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
void tcp_rcv_space_adjust(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 copied;
int time;
int copied;
tcp_mstamp_refresh(tp);
time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
@ -616,12 +616,13 @@ void tcp_rcv_space_adjust(struct sock *sk)
if (sysctl_tcp_moderate_rcvbuf &&
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
int rcvwin, rcvmem, rcvbuf;
int rcvmem, rcvbuf;
u64 rcvwin;
/* minimal window to cope with packet losses, assuming
* steady state. Add some cushion because of small variations.
*/
rcvwin = (copied << 1) + 16 * tp->advmss;
rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
/* If rate increased by 25%,
* assume slow start, rcvwin = 3 * copied
@ -641,7 +642,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
while (tcp_win_from_space(rcvmem) < tp->advmss)
rcvmem += 128;
rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]);
do_div(rcvwin, tp->advmss);
rcvbuf = min_t(u64, rcvwin * rcvmem, sysctl_tcp_rmem[2]);
if (rcvbuf > sk->sk_rcvbuf) {
sk->sk_rcvbuf = rcvbuf;

View File

@ -1753,10 +1753,6 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
struct sk_buff *skb;
int err;
err = verify_policy_dir(dir);
if (err)
return ERR_PTR(err);
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return ERR_PTR(-ENOMEM);
@ -2278,10 +2274,6 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net *net = sock_net(skb->sk);
struct xfrm_encap_tmpl *encap = NULL;
err = verify_policy_dir(pi->dir);
if (err)
return err;
if (attrs[XFRMA_MIGRATE] == NULL)
return -EINVAL;
@ -2415,11 +2407,6 @@ static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
{
struct net *net = &init_net;
struct sk_buff *skb;
int err;
err = verify_policy_dir(dir);
if (err)
return err;
skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k, !!encap),
GFP_ATOMIC);
@ -3089,11 +3076,6 @@ out_free_skb:
static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
int err;
err = verify_policy_dir(dir);
if (err)
return err;
switch (c->event) {
case XFRM_MSG_NEWPOLICY:

View File

@ -1448,7 +1448,7 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
scontext_len, &context, def_sid);
if (rc == -EINVAL && force) {
context.str = str;
context.len = scontext_len;
context.len = strlen(str) + 1;
str = NULL;
} else if (rc)
goto out_unlock;

View File

@ -274,7 +274,6 @@ int sst_dma_new(struct sst_dsp *sst)
struct sst_pdata *sst_pdata = sst->pdata;
struct sst_dma *dma;
struct resource mem;
const char *dma_dev_name;
int ret = 0;
if (sst->pdata->resindex_dma_base == -1)
@ -285,7 +284,6 @@ int sst_dma_new(struct sst_dsp *sst)
* is attached to the ADSP IP. */
switch (sst->pdata->dma_engine) {
case SST_DMA_TYPE_DW:
dma_dev_name = "dw_dmac";
break;
default:
dev_err(sst->dev, "error: invalid DMA engine %d\n",

View File

@ -59,6 +59,31 @@ static struct instruction *next_insn_same_sec(struct objtool_file *file,
return next;
}
static struct instruction *next_insn_same_func(struct objtool_file *file,
struct instruction *insn)
{
struct instruction *next = list_next_entry(insn, list);
struct symbol *func = insn->func;
if (!func)
return NULL;
if (&next->list != &file->insn_list && next->func == func)
return next;
/* Check if we're already in the subfunction: */
if (func == func->cfunc)
return NULL;
/* Move to the subfunction: */
return find_insn(file, func->cfunc->sec, func->cfunc->offset);
}
#define func_for_each_insn_all(file, func, insn) \
for (insn = find_insn(file, func->sec, func->offset); \
insn; \
insn = next_insn_same_func(file, insn))
#define func_for_each_insn(file, func, insn) \
for (insn = find_insn(file, func->sec, func->offset); \
insn && &insn->list != &file->insn_list && \
@ -148,10 +173,14 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
if (!strcmp(func->name, global_noreturns[i]))
return 1;
if (!func->sec)
if (!func->len)
return 0;
func_for_each_insn(file, func, insn) {
insn = find_insn(file, func->sec, func->offset);
if (!insn->func)
return 0;
func_for_each_insn_all(file, func, insn) {
empty = false;
if (insn->type == INSN_RETURN)
@ -166,35 +195,28 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
* case, the function's dead-end status depends on whether the target
* of the sibling call returns.
*/
func_for_each_insn(file, func, insn) {
if (insn->sec != func->sec ||
insn->offset >= func->offset + func->len)
break;
func_for_each_insn_all(file, func, insn) {
if (insn->type == INSN_JUMP_UNCONDITIONAL) {
struct instruction *dest = insn->jump_dest;
struct symbol *dest_func;
if (!dest)
/* sibling call to another file */
return 0;
if (dest->sec != func->sec ||
dest->offset < func->offset ||
dest->offset >= func->offset + func->len) {
/* local sibling call */
dest_func = find_symbol_by_offset(dest->sec,
dest->offset);
if (!dest_func)
continue;
if (dest->func && dest->func->pfunc != insn->func->pfunc) {
/* local sibling call */
if (recursion == 5) {
WARN_FUNC("infinite recursion (objtool bug!)",
dest->sec, dest->offset);
return -1;
/*
* Infinite recursion: two functions
* have sibling calls to each other.
* This is a very rare case. It means
* they aren't dead ends.
*/
return 0;
}
return __dead_end_function(file, dest_func,
return __dead_end_function(file, dest->func,
recursion + 1);
}
}
@ -421,7 +443,7 @@ static void add_ignores(struct objtool_file *file)
if (!ignore_func(file, func))
continue;
func_for_each_insn(file, func, insn)
func_for_each_insn_all(file, func, insn)
insn->ignore = true;
}
}
@ -781,30 +803,35 @@ out:
return ret;
}
static int add_switch_table(struct objtool_file *file, struct symbol *func,
struct instruction *insn, struct rela *table,
struct rela *next_table)
static int add_switch_table(struct objtool_file *file, struct instruction *insn,
struct rela *table, struct rela *next_table)
{
struct rela *rela = table;
struct instruction *alt_insn;
struct alternative *alt;
struct symbol *pfunc = insn->func->pfunc;
unsigned int prev_offset = 0;
list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
if (rela == next_table)
break;
if (rela->sym->sec != insn->sec ||
rela->addend <= func->offset ||
rela->addend >= func->offset + func->len)
/* Make sure the switch table entries are consecutive: */
if (prev_offset && rela->offset != prev_offset + 8)
break;
alt_insn = find_insn(file, insn->sec, rela->addend);
if (!alt_insn) {
WARN("%s: can't find instruction at %s+0x%x",
file->rodata->rela->name, insn->sec->name,
rela->addend);
return -1;
}
/* Detect function pointers from contiguous objects: */
if (rela->sym->sec == pfunc->sec &&
rela->addend == pfunc->offset)
break;
alt_insn = find_insn(file, rela->sym->sec, rela->addend);
if (!alt_insn)
break;
/* Make sure the jmp dest is in the function or subfunction: */
if (alt_insn->func->pfunc != pfunc)
break;
alt = malloc(sizeof(*alt));
if (!alt) {
@ -814,6 +841,13 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func,
alt->insn = alt_insn;
list_add_tail(&alt->list, &insn->alts);
prev_offset = rela->offset;
}
if (!prev_offset) {
WARN_FUNC("can't find switch jump table",
insn->sec, insn->offset);
return -1;
}
return 0;
@ -868,40 +902,21 @@ static struct rela *find_switch_table(struct objtool_file *file,
{
struct rela *text_rela, *rodata_rela;
struct instruction *orig_insn = insn;
unsigned long table_offset;
text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
if (text_rela && text_rela->sym == file->rodata->sym) {
/* case 1 */
rodata_rela = find_rela_by_dest(file->rodata,
text_rela->addend);
if (rodata_rela)
return rodata_rela;
/* case 2 */
rodata_rela = find_rela_by_dest(file->rodata,
text_rela->addend + 4);
if (!rodata_rela)
return NULL;
file->ignore_unreachables = true;
return rodata_rela;
}
/* case 3 */
/*
* Backward search using the @first_jump_src links, these help avoid
* much of the 'in between' code. Which avoids us getting confused by
* it.
*/
for (insn = list_prev_entry(insn, list);
for (;
&insn->list != &file->insn_list &&
insn->sec == func->sec &&
insn->offset >= func->offset;
insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
if (insn->type == INSN_JUMP_DYNAMIC)
if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
break;
/* allow small jumps within the range */
@ -917,18 +932,29 @@ static struct rela *find_switch_table(struct objtool_file *file,
if (!text_rela || text_rela->sym != file->rodata->sym)
continue;
table_offset = text_rela->addend;
if (text_rela->type == R_X86_64_PC32)
table_offset += 4;
/*
* Make sure the .rodata address isn't associated with a
* symbol. gcc jump tables are anonymous data.
*/
if (find_symbol_containing(file->rodata, text_rela->addend))
if (find_symbol_containing(file->rodata, table_offset))
continue;
rodata_rela = find_rela_by_dest(file->rodata, text_rela->addend);
if (!rodata_rela)
continue;
rodata_rela = find_rela_by_dest(file->rodata, table_offset);
if (rodata_rela) {
/*
* Use of RIP-relative switch jumps is quite rare, and
* indicates a rare GCC quirk/bug which can leave dead
* code behind.
*/
if (text_rela->type == R_X86_64_PC32)
file->ignore_unreachables = true;
return rodata_rela;
return rodata_rela;
}
}
return NULL;
@ -942,7 +968,7 @@ static int add_func_switch_tables(struct objtool_file *file,
struct rela *rela, *prev_rela = NULL;
int ret;
func_for_each_insn(file, func, insn) {
func_for_each_insn_all(file, func, insn) {
if (!last)
last = insn;
@ -973,8 +999,7 @@ static int add_func_switch_tables(struct objtool_file *file,
* the beginning of another switch table in the same function.
*/
if (prev_jump) {
ret = add_switch_table(file, func, prev_jump, prev_rela,
rela);
ret = add_switch_table(file, prev_jump, prev_rela, rela);
if (ret)
return ret;
}
@ -984,7 +1009,7 @@ static int add_func_switch_tables(struct objtool_file *file,
}
if (prev_jump) {
ret = add_switch_table(file, func, prev_jump, prev_rela, NULL);
ret = add_switch_table(file, prev_jump, prev_rela, NULL);
if (ret)
return ret;
}
@ -1748,15 +1773,13 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
while (1) {
next_insn = next_insn_same_sec(file, insn);
if (file->c_file && func && insn->func && func != insn->func) {
if (file->c_file && func && insn->func && func != insn->func->pfunc) {
WARN("%s() falls through to next function %s()",
func->name, insn->func->name);
return 1;
}
if (insn->func)
func = insn->func;
func = insn->func ? insn->func->pfunc : NULL;
if (func && insn->ignore) {
WARN_FUNC("BUG: why am I validating an ignored function?",
@ -1777,7 +1800,7 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
i = insn;
save_insn = NULL;
func_for_each_insn_continue_reverse(file, func, i) {
func_for_each_insn_continue_reverse(file, insn->func, i) {
if (i->save) {
save_insn = i;
break;
@ -1864,7 +1887,7 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
case INSN_JUMP_UNCONDITIONAL:
if (insn->jump_dest &&
(!func || !insn->jump_dest->func ||
func == insn->jump_dest->func)) {
insn->jump_dest->func->pfunc == func)) {
ret = validate_branch(file, insn->jump_dest,
state);
if (ret)
@ -2059,7 +2082,7 @@ static int validate_functions(struct objtool_file *file)
for_each_sec(file, sec) {
list_for_each_entry(func, &sec->symbol_list, list) {
if (func->type != STT_FUNC)
if (func->type != STT_FUNC || func->pfunc != func)
continue;
insn = find_insn(file, sec, func->offset);

View File

@ -79,6 +79,19 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset)
return NULL;
}
struct symbol *find_symbol_by_name(struct elf *elf, const char *name)
{
struct section *sec;
struct symbol *sym;
list_for_each_entry(sec, &elf->sections, list)
list_for_each_entry(sym, &sec->symbol_list, list)
if (!strcmp(sym->name, name))
return sym;
return NULL;
}
struct symbol *find_symbol_containing(struct section *sec, unsigned long offset)
{
struct symbol *sym;
@ -203,10 +216,11 @@ static int read_sections(struct elf *elf)
static int read_symbols(struct elf *elf)
{
struct section *symtab;
struct symbol *sym;
struct section *symtab, *sec;
struct symbol *sym, *pfunc;
struct list_head *entry, *tmp;
int symbols_nr, i;
char *coldstr;
symtab = find_section_by_name(elf, ".symtab");
if (!symtab) {
@ -281,6 +295,30 @@ static int read_symbols(struct elf *elf)
hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx);
}
/* Create parent/child links for any cold subfunctions */
list_for_each_entry(sec, &elf->sections, list) {
list_for_each_entry(sym, &sec->symbol_list, list) {
if (sym->type != STT_FUNC)
continue;
sym->pfunc = sym->cfunc = sym;
coldstr = strstr(sym->name, ".cold.");
if (coldstr) {
coldstr[0] = '\0';
pfunc = find_symbol_by_name(elf, sym->name);
coldstr[0] = '.';
if (!pfunc) {
WARN("%s(): can't find parent function",
sym->name);
goto err;
}
sym->pfunc = pfunc;
pfunc->cfunc = sym;
}
}
}
return 0;
err:

View File

@ -61,6 +61,7 @@ struct symbol {
unsigned char bind, type;
unsigned long offset;
unsigned int len;
struct symbol *pfunc, *cfunc;
};
struct rela {
@ -86,6 +87,7 @@ struct elf {
struct elf *elf_open(const char *name, int flags);
struct section *find_section_by_name(struct elf *elf, const char *name);
struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
struct symbol *find_symbol_by_name(struct elf *elf, const char *name);
struct symbol *find_symbol_containing(struct section *sec, unsigned long offset);
struct rela *find_rela_by_dest(struct section *sec, unsigned long offset);
struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,