msm-4.14/mm/mmap.c
Srinivasarao P 90bb7a2b24 Merge android-4.14-stable.180 (816f245) into msm-4.14
* refs/heads/tmp-816f245:
  Revert "clk: qcom: rcg2: Don't crash if our parent can't be found; return an error"
  Reverting crypto patches
  Reverting incremental fs changes
  Linux 4.14.180
  cgroup, netclassid: remove double cond_resched
  mac80211: add ieee80211_is_any_nullfunc()
  ALSA: hda: Match both PCI ID and SSID for driver blacklist
  tracing: Reverse the order of trace_types_lock and event_mutex
  sctp: Fix SHUTDOWN CTSN Ack in the peer restart case
  net: systemport: suppress warnings on failed Rx SKB allocations
  net: bcmgenet: suppress warnings on failed Rx SKB allocations
  lib/mpi: Fix building for powerpc with clang
  net: dsa: b53: Rework ARL bin logic
  scripts/config: allow colons in option strings for sed
  s390/ftrace: fix potential crashes when switching tracers
  cifs: protect updating server->dstaddr with a spinlock
  net: stmmac: Fix sub-second increment
  net: stmmac: fix enabling socfpga's ptp_ref_clock
  wimax/i2400m: Fix potential urb refcnt leak
  ASoC: codecs: hdac_hdmi: Fix incorrect use of list_for_each_entry
  ASoC: rsnd: Fix HDMI channel mapping for multi-SSI mode
  ASoC: sgtl5000: Fix VAG power-on handling
  selftests/ipc: Fix test failure seen after initial test run
  ASoC: topology: Check return value of pcm_new_ver
  powerpc/pci/of: Parse unassigned resources
  vhost: vsock: kick send_pkt worker once device is started
  ANDROID: arm64: fix a mismerge in proc.S
  Linux 4.14.179
  selinux: properly handle multiple messages in selinux_netlink_send()
  dmaengine: dmatest: Fix iteration non-stop logic
  nfs: Fix potential posix_acl refcnt leak in nfs3_set_acl
  ALSA: opti9xx: shut up gcc-10 range warning
  iommu/amd: Fix legacy interrupt remapping for x2APIC-enabled system
  scsi: target/iblock: fix WRITE SAME zeroing
  iommu/qcom: Fix local_base status check
  vfio/type1: Fix VA->PA translation for PFNMAP VMAs in vaddr_get_pfn()
  vfio: avoid possible overflow in vfio_iommu_type1_pin_pages
  RDMA/mlx4: Initialize ib_spec on the stack
  RDMA/mlx5: Set GRH fields in query QP on RoCE
  dm verity fec: fix hash block number in verity_fec_decode
  PM: hibernate: Freeze kernel threads in software_resume()
  PM: ACPI: Output correct message on target power state
  ALSA: pcm: oss: Place the plugin buffer overflow checks correctly
  ALSA: hda/hdmi: fix without unlocked before return
  ALSA: hda/realtek - Two front mics on a Lenovo ThinkCenter
  mmc: sdhci-pci: Fix eMMC driver strength for BYT-based controllers
  mmc: sdhci-xenon: fix annoying 1.8V regulator warning
  btrfs: fix partial loss of prealloc extent past i_size after fsync
  btrfs: fix block group leak when removing fails
  drm/qxl: qxl_release use after free
  drm/qxl: qxl_release leak in qxl_hw_surface_alloc()
  drm/qxl: qxl_release leak in qxl_draw_dirty_fb()
  drm/edid: Fix off-by-one in DispID DTD pixel clock
  ext4: fix special inode number checks in __ext4_iget()
  ANDROID: Incremental fs: Fix issues with very large files
  Linux 4.14.178
  propagate_one(): mnt_set_mountpoint() needs mount_lock
  ext4: check for non-zero journal inum in ext4_calculate_overhead
  qed: Fix use after free in qed_chain_free
  ext4: unsigned int compared against zero
  ext4: fix block validity checks for journal inodes using indirect blocks
  ext4: don't perform block validity checks on the journal inode
  ext4: protect journal inode's blocks using block_validity
  ext4: avoid declaring fs inconsistent due to invalid file handles
  hwmon: (jc42) Fix name to have no illegal characters
  ext4: convert BUG_ON's to WARN_ON's in mballoc.c
  ext4: increase wait time needed before reuse of deleted inode numbers
  ext4: use matching invalidatepage in ext4_writepage
  arm64: Delete the space separator in __emit_inst
  xen/xenbus: ensure xenbus_map_ring_valloc() returns proper grant status
  objtool: Support Clang non-section symbols in ORC dump
  objtool: Fix CONFIG_UBSAN_TRAP unreachable warnings
  scsi: target: fix PR IN / READ FULL STATUS for FC
  xfs: fix partially uninitialized structure in xfs_reflink_remap_extent
  x86: hyperv: report value of misc_features
  bpf, x86: Fix encoding for lower 8-bit registers in BPF_STX BPF_B
  mm: shmem: disable interrupt when acquiring info->lock in userfaultfd_copy path
  perf/core: fix parent pid/tid in task exit events
  ARM: dts: bcm283x: Disable dsi0 node
  net/cxgb4: Check the return from t4_query_params properly
  i2c: altera: use proper variable to hold errno
  nfsd: memory corruption in nfsd4_lock()
  iio:ad7797: Use correct attribute_group
  usb: gadget: udc: bdc: Remove unnecessary NULL checks in bdc_req_complete
  usb: dwc3: gadget: Do link recovery for SS and SSP
  binder: take read mode of mmap_sem in binder_alloc_free_page()
  include/uapi/linux/swab.h: fix userspace breakage, use __BITS_PER_LONG for swap
  mtd: cfi: fix deadloop in cfi_cmdset_0002.c do_write_buffer
  remoteproc: Fix wrong rvring index computation
  xfs: Fix deadlock between AGI and AGF with RENAME_WHITEOUT
  xfs: validate sb_logsunit is a multiple of the fs blocksize
  serial: sh-sci: Make sure status register SCxSR is read in correct sequence
  usb: f_fs: Clear OS Extended descriptor counts to zero in ffs_data_reset()
  UAS: fix deadlock in error handling and PM flushing work
  UAS: no use logging any details in case of ENODEV
  cdc-acm: introduce a cool down
  cdc-acm: close race betrween suspend() and acm_softint
  staging: vt6656: Power save stop wake_up_count wrap around.
  staging: vt6656: Fix pairwise key entry save.
  staging: vt6656: Fix drivers TBTT timing counter.
  staging: vt6656: Fix calling conditions of vnt_set_bss_mode
  staging: vt6656: Don't set RCR_MULTICAST or RCR_BROADCAST by default.
  vt: don't hardcode the mem allocation upper bound
  staging: comedi: Fix comedi_device refcnt leak in comedi_open
  staging: comedi: dt2815: fix writing hi byte of analog output
  powerpc/setup_64: Set cache-line-size based on cache-block-size
  ARM: imx: provide v7_cpu_resume() only on ARM_CPU_SUSPEND=y
  iwlwifi: pcie: actually release queue memory in TVQM
  ASoC: dapm: fixup dapm kcontrol widget
  audit: check the length of userspace generated audit records
  usb-storage: Add unusual_devs entry for JMicron JMS566
  tty: rocket, avoid OOB access
  tty: hvc: fix buffer overflow during hvc_alloc().
  KVM: VMX: Enable machine check support for 32bit targets
  KVM: Check validity of resolved slot when searching memslots
  tpm: ibmvtpm: retry on H_CLOSED in tpm_ibmvtpm_send()
  tpm/tpm_tis: Free IRQ if probing fails
  ALSA: usb-audio: Filter out unsupported sample rates on Focusrite devices
  ALSA: usb-audio: Fix usb audio refcnt leak when getting spdif
  ALSA: hda/realtek - Add new codec supported for ALC245
  ALSA: usx2y: Fix potential NULL dereference
  tools/vm: fix cross-compile build
  mm/ksm: fix NULL pointer dereference when KSM zero page is enabled
  mm/hugetlb: fix a addressing exception caused by huge_pte_offset
  vmalloc: fix remap_vmalloc_range() bounds checks
  overflow.h: Add arithmetic shift helper
  USB: hub: Fix handling of connect changes during sleep
  USB: core: Fix free-while-in-use bug in the USB S-Glibrary
  USB: early: Handle AMD's spec-compliant identifiers, too
  USB: Add USB_QUIRK_DELAY_CTRL_MSG and USB_QUIRK_DELAY_INIT for Corsair K70 RGB RAPIDFIRE
  USB: sisusbvga: Change port variable from signed to unsigned
  fs/namespace.c: fix mountpoint reference counter race
  iio: xilinx-xadc: Fix sequencer configuration for aux channels in simultaneous mode
  iio: xilinx-xadc: Fix clearing interrupt when enabling trigger
  iio: xilinx-xadc: Fix ADC-B powerdown
  iio: adc: stm32-adc: fix sleep in atomic context
  ALSA: hda: Remove ASUS ROG Zenith from the blacklist
  KEYS: Avoid false positive ENOMEM error on key read
  vrf: Check skb for XFRM_TRANSFORMED flag
  xfrm: Always set XFRM_TRANSFORMED in xfrm{4,6}_output_finish
  net: dsa: b53: Fix ARL register definitions
  team: fix hang in team_mode_get()
  tcp: cache line align MAX_TCP_HEADER
  net/x25: Fix x25_neigh refcnt leak when receiving frame
  net: netrom: Fix potential nr_neigh refcnt leak in nr_add_node
  net: bcmgenet: correct per TX/RX ring statistics
  macvlan: fix null dereference in macvlan_device_event()
  macsec: avoid to set wrong mtu
  ipv6: fix restrict IPV6_ADDRFORM operation
  cxgb4: fix large delays in PTP synchronization
  mm, slub: restore the original intention of prefetch_freepointer()
  PCI/ASPM: Allow re-enabling Clock PM
  perf/core: Disable page faults when getting phys address
  pwm: bcm2835: Dynamically allocate base
  pwm: renesas-tpu: Fix late Runtime PM enablement
  s390/cio: avoid duplicated 'ADD' uevents
  ipc/util.c: sysvipc_find_ipc() should increase position index
  selftests: kmod: fix handling test numbers above 9
  kernel/gcov/fs.c: gcov_seq_next() should increase position index
  ASoC: Intel: atom: Take the drv->lock mutex before calling sst_send_slot_map()
  scsi: iscsi: Report unbind session event when the target has been removed
  pwm: rcar: Fix late Runtime PM enablement
  ceph: don't skip updating wanted caps when cap is stale
  ceph: return ceph_mdsc_do_request() errors from __get_parent()
  scsi: lpfc: Fix kasan slab-out-of-bounds error in lpfc_unreg_login
  watchdog: reset last_hw_keepalive time at start
  vti4: removed duplicate log message.
  crypto: mxs-dcp - make symbols 'sha1_null_hash' and 'sha256_null_hash' static
  drm/msm: Use the correct dma_sync calls harder
  keys: Fix the use of the C++ keyword "private" in uapi/linux/keyctl.h
  net: ipv4: avoid unused variable warning for sysctl
  net: ipv4: emulate READ_ONCE() on ->hdrincl bit-field in raw_sendmsg()
  ext4: fix extent_status fragmentation for plain files
  FROMGIT: f2fs: fix missing check for f2fs_unlock_op
  ANDROID: Fix kernel build regressions from virtio-gpu-next patches
  ANDROID: Incremental fs: Add setattr call
  ANDROID: cuttlefish_defconfig: enable LTO and CFI
  ANDROID: x86: map CFI jump tables in pti_clone_entry_text
  ANDROID: crypto: aesni: fix function types for aesni_(enc|dec)
  ANDROID: x86: disable CFI for do_syscall_*
  ANDROID: BACKPORT: x86, module: Ignore __typeid__ relocations
  ANDROID: BACKPORT: x86, relocs: Ignore __typeid__ relocations
  ANDROID: BACKPORT: x86/extable: Do not mark exception callback as CFI
  FROMLIST: crypto, x86/sha: Eliminate casts on asm implementations
  UPSTREAM: crypto: x86 - Rename functions to avoid conflict with crypto/sha256.h
  BACKPORT: x86/vmlinux: Actually use _etext for the end of the text segment
  ANDROID: x86: disable STACK_VALIDATION with LTO_CLANG
  ANDROID: x86: add support for CONFIG_LTO_CLANG
  ANDROID: x86/vdso: disable LTO only for VDSO
  ANDROID: x86/cpu/vmware: use the full form of inl in VMWARE_PORT
  UPSTREAM: x86/build/lto: Fix truncated .bss with -fdata-sections
  ANDROID: kbuild: don't select LD_DEAD_CODE_DATA_ELIMINATION with LTO
  ANDROID: kbuild: export LTO and CFI flags
  ANDROID: cfi: remove unnecessary <asm/memory.h> include
  ANDROID: drm/virtio: rebase to latest virgl/drm-misc-next (take 2)
  UPSTREAM: sysrq: Use panic() to force a crash
  ANDROID: Incremental fs: Use simple compression in log buffer
  ANDROID: dm-bow: Fix not to skip trim at framented range
  ANDROID: Remove VLA from uid_sys_stats.c
  ANDROID: cuttlefish_defconfig: enable CONFIG_DEBUG_LIST
  Linux 4.14.177
  KEYS: Don't write out to userspace while holding key semaphore
  KEYS: Use individual pages in big_key for crypto buffers
  mtd: phram: fix a double free issue in error path
  mtd: lpddr: Fix a double free in probe()
  locktorture: Print ratio of acquisitions, not failures
  tty: evh_bytechan: Fix out of bounds accesses
  fbdev: potential information leak in do_fb_ioctl()
  net: dsa: bcm_sf2: Fix overflow checks
  iommu/amd: Fix the configuration of GCR3 table root pointer
  libnvdimm: Out of bounds read in __nd_ioctl()
  ext2: fix debug reference to ext2_xattr_cache
  ext2: fix empty body warnings when -Wextra is used
  iommu/vt-d: Fix mm reference leak
  NFS: Fix memory leaks in nfs_pageio_stop_mirroring()
  drm/amdkfd: kfree the wrong pointer
  x86: ACPI: fix CPU hotplug deadlock
  KVM: s390: vsie: Fix possible race when shadowing region 3 tables
  compiler.h: fix error in BUILD_BUG_ON() reporting
  percpu_counter: fix a data race at vm_committed_as
  include/linux/swapops.h: correct guards for non_swap_entry()
  ext4: do not commit super on read-only bdev
  powerpc/maple: Fix declaration made after definition
  s390/cpuinfo: fix wrong output when CPU0 is offline
  NFS: direct.c: Fix memory leak of dreq when nfs_get_lock_context fails
  NFSv4/pnfs: Return valid stateids in nfs_layout_find_inode_by_stateid()
  rtc: 88pm860x: fix possible race condition
  soc: imx: gpc: fix power up sequencing
  clk: tegra: Fix Tegra PMC clock out parents
  power: supply: bq27xxx_battery: Silence deferred-probe error
  clk: at91: usb: continue if clk_hw_round_rate() return zero
  of: unittest: kmemleak in of_unittest_platform_populate()
  rbd: call rbd_dev_unprobe() after unwatching and flushing notifies
  rbd: avoid a deadlock on header_rwsem when flushing notifies
  of: fix missing kobject init for !SYSFS && OF_DYNAMIC config
  soc: qcom: smem: Use le32_to_cpu for comparison
  wil6210: abort properly in cfg suspend
  wil6210: fix length check in __wmi_send
  wil6210: add block size checks during FW load
  wil6210: fix PCIe bus mastering in case of interface down
  rpmsg: glink: smem: Ensure ordering during tx
  rpmsg: glink: Fix missing mutex_init() in qcom_glink_alloc_channel()
  rtc: pm8xxx: Fix issue in RTC write path
  rpmsg: glink: use put_device() if device_register fail
  wil6210: rate limit wil_rx_refill error
  scsi: ufs: ufs-qcom: remove broken hci version quirk
  scsi: ufs: make sure all interrupts are processed
  wil6210: fix temperature debugfs
  wil6210: increase firmware ready timeout
  arch_topology: Fix section miss match warning due to free_raw_capacity()
  arm64: traps: Don't print stack or raw PC/LR values in backtraces
  arm64: perf: remove unsupported events for Cortex-A73
  Revert "gpio: set up initial state from .get_direction()"
  clk: Fix debugfs_create_*() usage
  drm: NULL pointer dereference [null-pointer-deref] (CWE 476) problem
  video: fbdev: sis: Remove unnecessary parentheses and commented code
  lib/raid6: use vdupq_n_u8 to avoid endianness warnings
  ALSA: hda: Don't release card at firmware loading error
  irqchip/mbigen: Free msi_desc on device teardown
  netfilter: nf_tables: report EOPNOTSUPP on unsupported flags/object type
  arm, bpf: Fix bugs with ALU64 {RSH, ARSH} BPF_K shift by 0
  ext4: use non-movable memory for superblock readahead
  scsi: sg: add sg_remove_request in sg_common_write
  objtool: Fix switch table detection in .text.unlikely
  mm/vmalloc.c: move 'area->pages' after if statement
  x86/resctrl: Fix invalid attempt at removing the default resource group
  x86/resctrl: Preserve CDP enable over CPU hotplug
  x86/intel_rdt: Enable L2 CDP in MSR IA32_L2_QOS_CFG
  x86/intel_rdt: Add two new resources for L2 Code and Data Prioritization (CDP)
  x86/intel_rdt: Enumerate L2 Code and Data Prioritization (CDP) feature
  x86/microcode/AMD: Increase microcode PATCH_MAX_SIZE
  scsi: target: fix hang when multiple threads try to destroy the same iscsi session
  scsi: target: remove boilerplate code
  kvm: x86: Host feature SSBD doesn't imply guest feature SPEC_CTRL_SSBD
  dm flakey: check for null arg_name in parse_features()
  ext4: do not zeroout extents beyond i_disksize
  mac80211_hwsim: Use kstrndup() in place of kasprintf()
  btrfs: check commit root generation in should_ignore_root
  tracing: Fix the race between registering 'snapshot' event trigger and triggering 'snapshot' operation
  ALSA: usb-audio: Don't override ignore_ctl_error value from the map
  ASoC: Intel: mrfld: return error codes when an error occurs
  ASoC: Intel: mrfld: fix incorrect check on p->sink
  ext4: fix incorrect inodes per group in error message
  ext4: fix incorrect group count in ext4_fill_super error message
  pwm: pca9685: Fix PWM/GPIO inter-operation
  jbd2: improve comments about freeing data buffers whose page mapping is NULL
  scsi: ufs: Fix ufshcd_hold() caused scheduling while atomic
  net: stmmac: dwmac-sunxi: Provide TX and RX fifo sizes
  net: revert default NAPI poll timeout to 2 jiffies
  net: qrtr: send msgs from local of same id as broadcast
  net: ipv6: do not consider routes via gateways for anycast address check
  net: ipv4: devinet: Fix crash when add/del multicast IP with autojoin
  hsr: check protocol version in hsr_newlink()
  amd-xgbe: Use __napi_schedule() in BH context
  mfd: dln2: Fix sanity checking for endpoints
  misc: echo: Remove unnecessary parentheses and simplify check for zero
  powerpc/fsl_booke: Avoid creating duplicate tlb1 entry
  ipmi: fix hung processes in __get_guid()
  ftrace/kprobe: Show the maxactive number on kprobe_events
  drm: Remove PageReserved manipulation from drm_pci_alloc
  drm/dp_mst: Fix clearing payload state on topology disable
  crypto: caam - update xts sector size for large input length
  dm zoned: remove duplicate nr_rnd_zones increase in dmz_init_zone()
  btrfs: use nofs allocations for running delayed items
  Btrfs: fix crash during unmount due to race with delayed inode workers
  powerpc: Make setjmp/longjmp signature standard
  powerpc: Add attributes for setjmp/longjmp
  scsi: mpt3sas: Fix kernel panic observed on soft HBA unplug
  powerpc/kprobes: Ignore traps that happened in real mode
  powerpc/xive: Use XIVE_BAD_IRQ instead of zero to catch non configured IPIs
  powerpc/hash64/devmap: Use H_PAGE_THP_HUGE when setting up huge devmap PTE entries
  powerpc/64/tm: Don't let userspace set regs->trap via sigreturn
  powerpc/powernv/idle: Restore AMR/UAMOR/AMOR after idle
  libata: Return correct status in sata_pmp_eh_recover_pm() when ATA_DFLAG_DETACH is set
  hfsplus: fix crash and filesystem corruption when deleting files
  cpufreq: powernv: Fix use-after-free
  kmod: make request_module() return an error when autoloading is disabled
  Input: i8042 - add Acer Aspire 5738z to nomux list
  s390/diag: fix display of diagnose call statistics
  perf tools: Support Python 3.8+ in Makefile
  ocfs2: no need try to truncate file beyond i_size
  fs/filesystems.c: downgrade user-reachable WARN_ONCE() to pr_warn_once()
  ext4: fix a data race at inode->i_blocks
  NFS: Fix a page leak in nfs_destroy_unlinked_subrequests()
  rtc: omap: Use define directive for PIN_CONFIG_ACTIVE_HIGH
  arm64: armv8_deprecated: Fix undef_hook mask for thumb setend
  scsi: zfcp: fix missing erp_lock in port recovery trigger for point-to-point
  dm verity fec: fix memory leak in verity_fec_dtr
  mm: Use fixed constant in page_frag_alloc instead of size + 1
  tools: gpio: Fix out-of-tree build regression
  x86/speculation: Remove redundant arch_smt_update() invocation
  powerpc/pseries: Drop pointless static qualifier in vpa_debugfs_init()
  net: rtnl_configure_link: fix dev flags changes arg to __dev_notify_flags
  ALSA: hda: Initialize power_state field properly
  crypto: mxs-dcp - fix scatterlist linearization for hash
  btrfs: drop block from cache on error in relocation
  CIFS: Fix bug which the return value by asynchronous read is error
  KVM: VMX: fix crash cleanup when KVM wasn't used
  KVM: VMX: Always VMCLEAR in-use VMCSes during crash with kexec support
  KVM: x86: Allocate new rmap and large page tracking when moving memslot
  KVM: s390: vsie: Fix delivery of addressing exceptions
  KVM: s390: vsie: Fix region 1 ASCE sanity shadow address checks
  KVM: nVMX: Properly handle userspace interrupt window request
  x86/entry/32: Add missing ASM_CLAC to general_protection entry
  signal: Extend exec_id to 64bits
  ath9k: Handle txpower changes even when TPC is disabled
  MIPS: OCTEON: irq: Fix potential NULL pointer dereference
  irqchip/versatile-fpga: Apply clear-mask earlier
  KEYS: reaching the keys quotas correctly
  PCI: endpoint: Fix for concurrent memory allocation in OB address region
  PCI/ASPM: Clear the correct bits when enabling L1 substates
  nvme-fc: Revert "add module to ops template to allow module references"
  thermal: devfreq_cooling: inline all stubs for CONFIG_DEVFREQ_THERMAL=n
  acpi/x86: ignore unspecified bit positions in the ACPI global lock field
  media: ti-vpe: cal: fix disable_irqs to only the intended target
  ALSA: hda/realtek - Set principled PC Beep configuration for ALC256
  ALSA: doc: Document PC Beep Hidden Register on Realtek ALC256
  ALSA: pcm: oss: Fix regression by buffer overflow fix
  ALSA: ice1724: Fix invalid access for enumerated ctl items
  ALSA: hda: Fix potential access overflow in beep helper
  ALSA: hda: Add driver blacklist
  ALSA: usb-audio: Add mixer workaround for TRX40 and co
  usb: gadget: composite: Inform controller driver of self-powered
  usb: gadget: f_fs: Fix use after free issue as part of queue failure
  ASoC: topology: use name_prefix for new kcontrol
  ASoC: dpcm: allow start or stop during pause for backend
  ASoC: dapm: connect virtual mux with default value
  ASoC: fix regwmask
  slub: improve bit diffusion for freelist ptr obfuscation
  misc: rtsx: set correct pcr_ops for rts522A
  uapi: rename ext2_swab() to swab() and share globally in swab.h
  btrfs: track reloc roots based on their commit root bytenr
  btrfs: remove a BUG_ON() from merge_reloc_roots()
  block, bfq: fix use-after-free in bfq_idle_slice_timer_body
  locking/lockdep: Avoid recursion in lockdep_count_{for,back}ward_deps()
  irqchip/gic-v4: Provide irq_retrigger to avoid circular locking dependency
  usb: dwc3: core: add support for disabling SS instances in park mode
  block: Fix use-after-free issue accessing struct io_cq
  genirq/irqdomain: Check pointer in irq_domain_alloc_irqs_hierarchy()
  efi/x86: Ignore the memory attributes table on i386
  x86/boot: Use unsigned comparison for addresses
  gfs2: Don't demote a glock until its revokes are written
  libata: Remove extra scsi_host_put() in ata_scsi_add_hosts()
  PCI/switchtec: Fix init_completion race condition with poll_wait()
  selftests/x86/ptrace_syscall_32: Fix no-vDSO segfault
  sched: Avoid scale real weight down to zero
  irqchip/versatile-fpga: Handle chained IRQs properly
  block: keep bdi->io_pages in sync with max_sectors_kb for stacked devices
  x86: Don't let pgprot_modify() change the page encryption bit
  null_blk: fix spurious IO errors after failed past-wp access
  null_blk: Handle null_add_dev() failures properly
  null_blk: Fix the null_add_dev() error path
  i2c: st: fix missing struct parameter description
  qlcnic: Fix bad kzalloc null test
  cxgb4/ptp: pass the sign of offset delta in FW CMD
  hinic: fix wrong para of wait_for_completion_timeout
  hinic: fix a bug of waitting for IO stopped
  net: vxge: fix wrong __VA_ARGS__ usage
  bus: sunxi-rsb: Return correct data when mixing 16-bit and 8-bit reads
  ANDROID: fix wakeup reason findings
  UPSTREAM: gpu/trace: add a gpu total memory usage tracepoint
  CHROMIUM: drm/virtio: rebase zero-copy patches to virgl/drm-misc-next
  CHROMIUM: virtio-gpu: add VIRTIO_GPU_F_RESOURCE_UUID feature
  CHROMIUM: drm/virtgpu: add legacy VIRTIO_GPU_* values for non-upstream variants
  CHROMIUM: drm/virtgpu: fix various warnings
  CHROMIUM: drm/virtgpu: implement metadata allocation ioctl
  CHROMIUM: drm/virtgpu: introduce request IDRs
  CHROMIUM: drm/virtgpu: implement DRM_VIRTGPU_RESOURCE_CREATE_V2
  CHROMIUM: drm/virtgpu: add stub ioctl implementation
  CHROMIUM: drm/virtgpu: check for revelant capabilites
  CHROMIUM: drm/virtgpu: add memory type to virtio_gpu_object_params
  CHROMIUM: drm/virtgpu: make memory and resource creation opaque
  CHROMIUM: virtio-gpu api: VIRTIO_GPU_F_MEMORY
  CHROMIUM: virtwl: store plane info per virtio_gpu_object
  CHROMIUM: drm/virtgpu: expose new ioctls to userspace
  BACKPORT: drm/virtio: move virtio_gpu_object_{attach, detach} calls.
  ANDROID: drm: ttm: Add ttm_tt_create2 driver hook
  UPSTREAM: virtio-gpu api: comment feature flags
  UPSTREAM: drm/virtio: module_param_named() requires linux/moduleparam.h
  BACKPORT: drm/virtio: fix resource id creation race
  BACKPORT: drm/virtio: make resource id workaround runtime switchable.
  BACKPORT: drm/virtio: do NOT reuse resource ids
  BACKPORT: drm/virtio: Drop deprecated load/unload initialization
  f2fs: fix quota_sync failure due to f2fs_lock_op
  f2fs: support read iostat
  f2fs: Fix the accounting of dcc->undiscard_blks
  f2fs: fix to handle error path of f2fs_ra_meta_pages()
  f2fs: report the discard cmd errors properly
  f2fs: fix long latency due to discard during umount
  f2fs: add tracepoint for f2fs iostat
  f2fs: introduce sysfs/data_io_flag to attach REQ_META/FUA
  UPSTREAM: kheaders: include only headers into kheaders_data.tar.xz
  UPSTREAM: kheaders: remove meaningless -R option of 'ls'
  ANDROID: Incremental fs: Fix create_file performance
  ANDROID: Incremental fs: Fix compound page usercopy crash
  ANDROID: Incremental fs: Clean up incfs_test build process
  ANDROID: Incremental fs: make remount log buffer change atomic
  ANDROID: Incremental fs: Optimize get_filled_block
  ANDROID: Incremental fs: Fix mislabeled __user ptrs
  ANDROID: Incremental fs: Use 64-bit int for file_size when writing hash blocks
  Revert "ANDROID: Incremental fs: Fix initialization, use of bitfields"
  Linux 4.14.176
  drm/msm: Use the correct dma_sync calls in msm_gem
  rpmsg: glink: smem: Support rx peak for size less than 4 bytes
  drm_dp_mst_topology: fix broken drm_dp_sideband_parse_remote_dpcd_read()
  usb: dwc3: don't set gadget->is_otg flag
  rpmsg: glink: Remove chunk size word align warning
  arm64: Fix size of __early_cpu_boot_status
  drm/msm: stop abusing dma_map/unmap for cache
  clk: qcom: rcg: Return failure for RCG update
  acpi/nfit: Fix bus command validation
  fbcon: fix null-ptr-deref in fbcon_switch
  RDMA/cm: Update num_paths in cma_resolve_iboe_route error flow
  Bluetooth: RFCOMM: fix ODEBUG bug in rfcomm_dev_ioctl
  ceph: canonicalize server path in place
  ceph: remove the extra slashes in the server path
  IB/hfi1: Fix memory leaks in sysfs registration and unregistration
  IB/hfi1: Call kobject_put() when kobject_init_and_add() fails
  ASoC: jz4740-i2s: Fix divider written at incorrect offset in register
  hwrng: imx-rngc - fix an error path
  tools/accounting/getdelays.c: fix netlink attribute length
  random: always use batched entropy for get_random_u{32,64}
  mlxsw: spectrum_flower: Do not stop at FLOW_ACTION_VLAN_MANGLE
  slcan: Don't transmit uninitialized stack data in padding
  net: stmmac: dwmac1000: fix out-of-bounds mac address reg setting
  net: phy: micrel: kszphy_resume(): add delay after genphy_resume() before accessing PHY registers
  net: dsa: bcm_sf2: Ensure correct sub-node is parsed
  ipv6: don't auto-add link-local address to lag ports
  mm: mempolicy: require at least one nodeid for MPOL_PREFERRED
  padata: always acquire cpu_hotplug_lock before pinst->lock
  coresight: do not use the BIT() macro in the UAPI header
  misc: pci_endpoint_test: Fix to support > 10 pci-endpoint-test devices
  blk-mq: Allow blocking queue tag iter callbacks
  blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter
  drm/etnaviv: replace MMU flush marker with flush sequence
  tools/power turbostat: Fix gcc build warnings
  initramfs: restore default compression behavior
  drm/bochs: downgrade pci_request_region failure from error to warning
  sctp: fix possibly using a bad saddr with a given dst
  sctp: fix refcount bug in sctp_wfree
  net, ip_tunnel: fix interface lookup with no key
  ipv4: fix a RCU-list lock in fib_triestat_seq_show
  ANDROID: power: wakeup_reason: wake reason enhancements
  ubifs: wire up FS_IOC_GET_ENCRYPTION_NONCE
  f2fs: wire up FS_IOC_GET_ENCRYPTION_NONCE
  ext4: wire up FS_IOC_GET_ENCRYPTION_NONCE
  fscrypt: add FS_IOC_GET_ENCRYPTION_NONCE ioctl
  FROMLIST: power_supply: Add additional health properties to the header
  UPSTREAM: power: supply: core: Update sysfs-class-power ABI document
  BACKPORT: FROMGIT: kbuild: mkcompile_h: Include $LD version in /proc/version
  ANDROID: fscrypt: fall back to filesystem-layer crypto when needed
  ANDROID: block: require drivers to declare supported crypto key type(s)
  ANDROID: block: make blk_crypto_start_using_mode() properly check for support
  f2fs: keep inline_data when compression conversion
  f2fs: fix to disable compression on directory
  f2fs: add missing CONFIG_F2FS_FS_COMPRESSION
  f2fs: switch discard_policy.timeout to bool type
  f2fs: fix to verify tpage before releasing in f2fs_free_dic()
  f2fs: show compression in statx
  f2fs: clean up dic->tpages assignment
  f2fs: compress: support zstd compress algorithm
  f2fs: compress: add .{init,destroy}_decompress_ctx callback
  f2fs: compress: fix to call missing destroy_compress_ctx()
  f2fs: change default compression algorithm
  f2fs: clean up {cic,dic}.ref handling
  f2fs: fix to use f2fs_readpage_limit() in f2fs_read_multi_pages()
  f2fs: xattr.h: Make stub helpers inline
  f2fs: fix to avoid double unlock
  f2fs: fix potential .flags overflow on 32bit architecture
  f2fs: fix NULL pointer dereference in f2fs_verity_work()
  f2fs: fix to clear PG_error if fsverity failed
  f2fs: don't call fscrypt_get_encryption_info() explicitly in f2fs_tmpfile()
  f2fs: don't trigger data flush in foreground operation
  f2fs: fix NULL pointer dereference in f2fs_write_begin()
  f2fs: clean up f2fs_may_encrypt()
  f2fs: fix to avoid potential deadlock
  f2fs: don't change inode status under page lock
  f2fs: fix potential deadlock on compressed quota file
  f2fs: delete DIO read lock
  f2fs: don't mark compressed inode dirty during f2fs_iget()
  f2fs: fix to account compressed blocks in f2fs_compressed_blocks()
  f2fs: xattr.h: Replace zero-length array with flexible-array member
  f2fs: fix to update f2fs_super_block fields under sb_lock
  f2fs: Add a new CP flag to help fsck fix resize SPO issues
  f2fs: Fix mount failure due to SPO after a successful online resize FS
  f2fs: use kmem_cache pool during inline xattr lookups
  f2fs: skip migration only when BG_GC is called
  f2fs: fix to show tracepoint correctly
  f2fs: avoid __GFP_NOFAIL in f2fs_bio_alloc
  f2fs: introduce F2FS_IOC_GET_COMPRESS_BLOCKS
  f2fs: fix to avoid triggering IO in write path
  f2fs: add prefix for f2fs slab cache name
  f2fs: introduce DEFAULT_IO_TIMEOUT
  f2fs: skip GC when section is full
  f2fs: add migration count iff migration happens
  f2fs: clean up bggc mount option
  f2fs: clean up lfs/adaptive mount option
  f2fs: fix to show norecovery mount option
  f2fs: clean up parameter of macro XATTR_SIZE()
  f2fs: clean up codes with {f2fs_,}data_blkaddr()
  f2fs: show mounted time
  f2fs: Use scnprintf() for avoiding potential buffer overflow
  f2fs: allow to clear F2FS_COMPR_FL flag
  f2fs: fix to check dirty pages during compressed inode conversion
  f2fs: fix to account compressed inode correctly
  f2fs: fix wrong check on F2FS_IOC_FSSETXATTR
  f2fs: fix to avoid use-after-free in f2fs_write_multi_pages()
  f2fs: fix to avoid using uninitialized variable
  f2fs: fix inconsistent comments
  f2fs: remove i_sem lock coverage in f2fs_setxattr()
  f2fs: cover last_disk_size update with spinlock
  f2fs: fix to check i_compr_blocks correctly
  FROMLIST: kmod: make request_module() return an error when autoloading is disabled
  UPSTREAM: loop: Only freeze block queue when needed.
  UPSTREAM: loop: Only change blocksize when needed.
  ANDROID: Incremental fs: Fix remount
  ANDROID: Incremental fs: Protect get_fill_block, and add a field
  ANDROID: Incremental fs: Fix crash polling 0 size read_log
  ANDROID: Incremental fs: get_filled_blocks: better index_out
  ANDROID: Fix wq fp check for CFI builds
  ANDROID: Incremental fs: Fix four resource bugs
  ANDROID: kbuild: ensure __cfi_check is correctly aligned
  ANDROID: kbuild: fix module linker script flags for LTO
  Linux 4.14.175
  arm64: dts: ls1046ardb: set RGMII interfaces to RGMII_ID mode
  arm64: dts: ls1043a-rdb: correct RGMII delay mode to rgmii-id
  ARM: bcm2835-rpi-zero-w: Add missing pinctrl name
  ARM: dts: oxnas: Fix clear-mask property
  perf map: Fix off by one in strncpy() size argument
  arm64: alternative: fix build with clang integrated assembler
  net: ks8851-ml: Fix IO operations, again
  gpiolib: acpi: Add quirk to ignore EC wakeups on HP x2 10 CHT + AXP288 model
  bpf: Explicitly memset some bpf info structures declared on the stack
  bpf: Explicitly memset the bpf_attr structure
  platform/x86: pmc_atom: Add Lex 2I385SW to critclk_systems DMI table
  vt: vt_ioctl: fix use-after-free in vt_in_use()
  vt: vt_ioctl: fix VT_DISALLOCATE freeing in-use virtual console
  vt: vt_ioctl: remove unnecessary console allocation checks
  vt: switch vt_dont_switch to bool
  vt: ioctl, switch VT_IS_IN_USE and VT_BUSY to inlines
  vt: selection, introduce vc_is_sel
  mac80211: fix authentication with iwlwifi/mvm
  mac80211: Check port authorization in the ieee80211_tx_dequeue() case
  media: xirlink_cit: add missing descriptor sanity checks
  media: stv06xx: add missing descriptor sanity checks
  media: dib0700: fix rc endpoint lookup
  media: ov519: add missing endpoint sanity checks
  libfs: fix infoleak in simple_attr_read()
  staging: wlan-ng: fix use-after-free Read in hfa384x_usbin_callback
  staging: wlan-ng: fix ODEBUG bug in prism2sta_disconnect_usb
  staging: rtl8188eu: Add ASUS USB-N10 Nano B1 to device table
  media: usbtv: fix control-message timeouts
  media: flexcop-usb: fix endpoint sanity check
  usb: musb: fix crash with highmen PIO and usbmon
  USB: serial: io_edgeport: fix slab-out-of-bounds read in edge_interrupt_callback
  USB: cdc-acm: restore capability check order
  USB: serial: option: add Wistron Neweb D19Q1
  USB: serial: option: add BroadMobi BM806U
  USB: serial: option: add support for ASKEY WWHC050
  afs: Fix some tracing details
  Input: raydium_i2c_ts - fix error codes in raydium_i2c_boot_trigger()
  Input: raydium_i2c_ts - use true and false for boolean values
  vti6: Fix memory leak of skb if input policy check fails
  netfilter: nft_fwd_netdev: validate family and chain type
  xfrm: policy: Fix doulbe free in xfrm_policy_timer
  xfrm: add the missing verify_sec_ctx_len check in xfrm_add_acquire
  xfrm: fix uctx len check in verify_sec_ctx_len
  RDMA/mlx5: Block delay drop to unprivileged users
  vti[6]: fix packet tx through bpf_redirect() in XinY cases
  xfrm: handle NETDEV_UNREGISTER for xfrm device
  genirq: Fix reference leaks on irq affinity notifiers
  RDMA/core: Ensure security pkey modify is not lost
  gpiolib: acpi: Add quirk to ignore EC wakeups on HP x2 10 BYT + AXP288 model
  gpiolib: acpi: Rework honor_wakeup option into an ignore_wake option
  gpiolib: acpi: Correct comment for HP x2 10 honor_wakeup quirk
  mac80211: mark station unauthorized before key removal
  scsi: sd: Fix optimal I/O size for devices that change reported values
  scripts/dtc: Remove redundant YYLOC global declaration
  tools: Let O= makes handle a relative path with -C option
  perf probe: Do not depend on dwfl_module_addrsym()
  ARM: dts: omap5: Add bus_dma_limit for L3 bus
  ARM: dts: dra7: Add bus_dma_limit for L3 bus
  Input: avoid BIT() macro usage in the serio.h UAPI header
  Input: synaptics - enable RMI on HP Envy 13-ad105ng
  i2c: hix5hd2: add missed clk_disable_unprepare in remove
  ftrace/x86: Anotate text_mutex split between ftrace_arch_code_modify_post_process() and ftrace_arch_code_modify_prepare()
  arm64: compat: map SPSR_ELx<->PSR for signals
  arm64: ptrace: map SPSR_ELx<->PSR for compat tasks
  sxgbe: Fix off by one in samsung driver strncpy size arg
  dpaa_eth: Remove unnecessary boolean expression in dpaa_get_headroom
  mac80211: Do not send mesh HWMP PREQ if HWMP is disabled
  scsi: ipr: Fix softlockup when rescanning devices in petitboot
  fsl/fman: detect FMan erratum A050385
  arm64: dts: ls1043a: FMan erratum A050385
  dt-bindings: net: FMan erratum A050385
  cgroup1: don't call release_agent when it is ""
  drivers/of/of_mdio.c:fix of_mdiobus_register()
  cpupower: avoid multiple definition with gcc -fno-common
  cgroup-v1: cgroup_pidlist_next should update position index
  net: ipv4: don't let PMTU updates increase route MTU
  hsr: set .netnsok flag
  hsr: add restart routine into hsr_get_node_list()
  hsr: use rcu_read_lock() in hsr_get_node_{list/status}()
  vxlan: check return value of gro_cells_init()
  net: dsa: mt7530: Change the LINK bit to reflect the link status
  bnxt_en: fix memory leaks in bnxt_dcbnl_ieee_getets()
  slcan: not call free_netdev before rtnl_unlock in slcan_open
  NFC: fdp: Fix a signedness bug in fdp_nci_send_patch()
  net: stmmac: dwmac-rk: fix error path in rk_gmac_probe
  net_sched: keep alloc_hash updated after hash allocation
  net_sched: cls_route: remove the right filter from hashtable
  net: qmi_wwan: add support for ASKEY WWHC050
  net/packet: tpacket_rcv: avoid a producer race condition
  net: mvneta: Fix the case where the last poll did not process all rx
  net: dsa: Fix duplicate frames flooded by learning
  macsec: restrict to ethernet devices
  hsr: fix general protection fault in hsr_addr_is_self()
  Revert "drm/dp_mst: Skip validating ports during destruction, just ref"
  staging: greybus: loopback_test: fix potential path truncations
  staging: greybus: loopback_test: fix potential path truncation
  drm/bridge: dw-hdmi: fix AVI frame colorimetry
  arm64: smp: fix crash_smp_send_stop() behaviour
  arm64: smp: fix smp_send_stop() behaviour
  ALSA: hda/realtek: Fix pop noise on ALC225
  Revert "ipv6: Fix handling of LLA with VRF and sockets bound to VRF"
  Revert "vrf: mark skb for multicast or link-local as enslaved to VRF"
  futex: Unbreak futex hashing
  futex: Fix inode life-time issue
  kbuild: Disable -Wpointer-to-enum-cast
  iio: adc: at91-sama5d2_adc: fix differential channels in triggered mode
  iio: adc: at91-sama5d2_adc: fix channel configuration for differential channels
  USB: cdc-acm: fix rounding error in TIOCSSERIAL
  USB: cdc-acm: fix close_delay and closing_wait units in TIOCSSERIAL
  x86/mm: split vmalloc_sync_all()
  page-flags: fix a crash at SetPageError(THP_SWAP)
  mm, slub: prevent kmalloc_node crashes and memory leaks
  mm: slub: be more careful about the double cmpxchg of freelist
  memcg: fix NULL pointer dereference in __mem_cgroup_usage_unregister_event
  xhci: Do not open code __print_symbolic() in xhci trace events
  rtc: max8907: add missing select REGMAP_IRQ
  intel_th: pci: Add Elkhart Lake CPU support
  intel_th: Fix user-visible error codes
  staging/speakup: fix get_word non-space look-ahead
  staging: rtl8188eu: Add device id for MERCUSYS MW150US v2
  mmc: sdhci-of-at91: fix cd-gpios for SAMA5D2
  iio: magnetometer: ak8974: Fix negative raw values in sysfs
  iio: trigger: stm32-timer: disable master mode when stopping
  ALSA: pcm: oss: Remove WARNING from snd_pcm_plug_alloc() checks
  ALSA: pcm: oss: Avoid plugin buffer overflow
  ALSA: seq: oss: Fix running status after receiving sysex
  ALSA: seq: virmidi: Fix running status after receiving sysex
  ALSA: line6: Fix endless MIDI read loop
  usb: xhci: apply XHCI_SUSPEND_DELAY to AMD XHCI controller 1022:145c
  USB: serial: pl2303: add device-id for HP LD381
  usb: host: xhci-plat: add a shutdown
  USB: serial: option: add ME910G1 ECM composition 0x110b
  usb: quirks: add NO_LPM quirk for RTL8153 based ethernet adapters
  USB: Disable LPM on WD19's Realtek Hub
  parse-maintainers: Mark as executable
  block, bfq: fix overwrite of bfq_group pointer in bfq_find_set_group()
  xenbus: req->err should be updated before req->state
  xenbus: req->body should be updated before req->state
  dm bio record: save/restore bi_end_io and bi_integrity
  altera-stapl: altera_get_note: prevent write beyond end of 'key'
  drivers/perf: arm_pmu_acpi: Fix incorrect checking of gicc pointer
  drm/exynos: dsi: fix workaround for the legacy clock name
  drm/exynos: dsi: propagate error value and silence meaningless warning
  spi/zynqmp: remove entry that causes a cs glitch
  spi: pxa2xx: Add CS control clock quirk
  ARM: dts: dra7: Add "dma-ranges" property to PCIe RC DT nodes
  powerpc: Include .BTF section
  spi: qup: call spi_qup_pm_resume_runtime before suspending
  UPSTREAM: ubifs: wire up FS_IOC_GET_ENCRYPTION_NONCE
  UPSTREAM: f2fs: wire up FS_IOC_GET_ENCRYPTION_NONCE
  UPSTREAM: ext4: wire up FS_IOC_GET_ENCRYPTION_NONCE
  UPSTREAM: fscrypt: add FS_IOC_GET_ENCRYPTION_NONCE ioctl
  UPSTREAM: usb: raw_gadget: fix compilation warnings in uapi headers
  BACKPORT: usb: gadget: add raw-gadget interface
  UPSTREAM: usb: gadget: move choice ... endchoice to legacy/Kconfig
  ANDROID: clang: update to 10.0.5
  FROMLIST: arm64: define __alloc_zeroed_user_highpage
  ANDROID: Incremental fs: Add INCFS_IOC_GET_FILLED_BLOCKS
  ANDROID: Incremental fs: Fix two typos
  f2fs: fix to avoid potential deadlock
  f2fs: add missing function name in kernel message
  f2fs: recycle unused compress_data.chksum feild
  f2fs: fix to avoid NULL pointer dereference
  f2fs: fix leaking uninitialized memory in compressed clusters
  f2fs: fix the panic in do_checkpoint()
  f2fs: fix to wait all node page writeback
  mm/swapfile.c: move inode_lock out of claim_swapfile
  UPSTREAM: ipv6: ndisc: add support for 'PREF64' dns64 prefix identifier
  UPSTREAM: ipv6: ndisc: add support for 'PREF64' dns64 prefix identifier
  ANDROID: dm-bow: Fix free_show value is incorrect
  UPSTREAM: coresight: Potential uninitialized variable in probe()
  ANDROID: kbuild: do not merge .section..* into .section in modules
  ANDROID: scsi: ufs: add ->map_sg_crypto() variant op
  UPSTREAM: bpf: Explicitly memset some bpf info structures declared on the stack
  UPSTREAM: bpf: Explicitly memset the bpf_attr structure
  Linux 4.14.174
  ipv4: ensure rcu_read_lock() in cipso_v4_error()
  mm: slub: add missing TID bump in kmem_cache_alloc_bulk()
  ARM: 8958/1: rename missed uaccess .fixup section
  ARM: 8957/1: VDSO: Match ARMv8 timer in cntvct_functional()
  jbd2: fix data races at struct journal_head
  net: rmnet: fix NULL pointer dereference in rmnet_newlink()
  hinic: fix a bug of setting hw_ioctxt
  slip: not call free_netdev before rtnl_unlock in slip_open
  signal: avoid double atomic counter increments for user accounting
  mac80211: rx: avoid RCU list traversal under mutex
  net: ks8851-ml: Fix IRQ handling and locking
  net: usb: qmi_wwan: restore mtu min/max values after raw_ip switch
  scsi: libfc: free response frame from GPN_ID
  cfg80211: check reg_rule for NULL in handle_channel_custom()
  HID: i2c-hid: add Trekstor Surfbook E11B to descriptor override
  HID: apple: Add support for recent firmware on Magic Keyboards
  ACPI: watchdog: Allow disabling WDAT at boot
  perf/amd/uncore: Replace manual sampling check with CAP_NO_INTERRUPT flag
  batman-adv: Don't schedule OGM for disabled interface
  batman-adv: Avoid free/alloc race when handling OGM buffer
  batman-adv: Avoid free/alloc race when handling OGM2 buffer
  batman-adv: Fix duplicated OGMs on NETDEV_UP
  batman-adv: Fix debugfs path for renamed softif
  batman-adv: Fix debugfs path for renamed hardif
  batman-adv: prevent TT request storms by not sending inconsistent TT TLVLs
  batman-adv: Fix TT sync flags for intermediate TT responses
  batman-adv: Avoid race in TT TVLV allocator helper
  batman-adv: update data pointers after skb_cow()
  batman-adv: Fix internal interface indices types
  batman-adv: Fix lock for ogm cnt access in batadv_iv_ogm_calc_tq
  batman-adv: Fix check of retrieved orig_gw in batadv_v_gw_is_eligible
  batman-adv: Always initialize fragment header priority
  batman-adv: Avoid spurious warnings from bat_v neigh_cmp implementation
  efi: Add a sanity check to efivar_store_raw()
  net/smc: check for valid ib_client_data
  ipv6: restrict IPV6_ADDRFORM operation
  i2c: acpi: put device when verifying client fails
  iommu/vt-d: Ignore devices with out-of-spec domain number
  iommu/vt-d: Fix the wrong printing in RHSA parsing
  netfilter: nft_payload: add missing attribute validation for payload csum flags
  netfilter: cthelper: add missing attribute validation for cthelper
  nl80211: add missing attribute validation for channel switch
  nl80211: add missing attribute validation for beacon report scanning
  nl80211: add missing attribute validation for critical protocol indication
  pinctrl: core: Remove extra kref_get which blocks hogs being freed
  pinctrl: meson-gxl: fix GPIOX sdio pins
  iommu/vt-d: Fix a bug in intel_iommu_iova_to_phys() for huge page
  iommu/vt-d: dmar: replace WARN_TAINT with pr_warn + add_taint
  iommu/dma: Fix MSI reservation allocation
  x86/mce: Fix logic and comments around MSR_PPIN_CTL
  efi: Fix a race and a buffer overflow while reading efivars via sysfs
  ARC: define __ALIGN_STR and __ALIGN symbols for ARC
  KVM: x86: clear stale x86_emulate_ctxt->intercept value
  gfs2_atomic_open(): fix O_EXCL|O_CREAT handling on cold dcache
  cifs_atomic_open(): fix double-put on late allocation failure
  ktest: Add timeout for ssh sync testing
  drm/amd/display: remove duplicated assignment to grph_obj_type
  workqueue: don't use wq_select_unbound_cpu() for bound works
  iommu/vt-d: quirk_ioat_snb_local_iommu: replace WARN_TAINT with pr_warn + add_taint
  virtio-blk: fix hw_queue stopped on arbitrary error
  iwlwifi: mvm: Do not require PHY_SKU NVM section for 3168 devices
  cgroup: Iterate tasks that did not finish do_exit()
  cgroup: cgroup_procs_next should increase position index
  ipvlan: don't deref eth hdr before checking it's set
  ipvlan: egress mcast packets are not exceptional
  ipvlan: do not add hardware address of master to its unicast filter list
  inet_diag: return classid for all socket types
  macvlan: add cond_resched() during multicast processing
  net: fec: validate the new settings in fec_enet_set_coalesce()
  slip: make slhc_compress() more robust against malicious packets
  bonding/alb: make sure arp header is pulled before accessing it
  net: phy: fix MDIO bus PM PHY resuming
  nfc: add missing attribute validation for vendor subcommand
  nfc: add missing attribute validation for SE API
  team: add missing attribute validation for array index
  team: add missing attribute validation for port ifindex
  net: fq: add missing attribute validation for orphan mask
  macsec: add missing attribute validation for port
  can: add missing attribute validation for termination
  nl802154: add missing attribute validation for dev_type
  nl802154: add missing attribute validation
  fib: add missing attribute validation for tun_id
  net: memcg: fix lockdep splat in inet_csk_accept()
  net: memcg: late association of sock to memcg
  cgroup: memcg: net: do not associate sock with unrelated cgroup
  bnxt_en: reinitialize IRQs when MTU is modified
  sfc: detach from cb_page in efx_copy_channel()
  r8152: check disconnect status after long sleep
  net/packet: tpacket_rcv: do not increment ring index on drop
  net: nfc: fix bounds checking bugs on "pipe"
  net: macsec: update SCI upon MAC address change.
  netlink: Use netlink header as base to calculate bad attribute offset
  ipvlan: do not use cond_resched_rcu() in ipvlan_process_multicast()
  ipvlan: add cond_resched_rcu() while processing muticast backlog
  ipv6/addrconf: call ipv6_mc_up() for non-Ethernet interface
  gre: fix uninit-value in __iptunnel_pull_header
  cgroup, netclassid: periodically release file_lock on classid updating
  net: phy: Avoid multiple suspends
  phy: Revert toggling reset changes.
  ANDROID: Incremental fs: Add INCFS_IOC_PERMIT_FILL
  ANDROID: Incremental fs: Remove signature checks from kernel
  ANDROID: Incremental fs: Pad hash blocks
  ANDROID: Incremental fs: Make fill block an ioctl
  ANDROID: Incremental fs: Remove all access_ok checks
  UPSTREAM: cgroup: Iterate tasks that did not finish do_exit()
  UPSTREAM: arm64: memory: Add missing brackets to untagged_addr() macro
  UPSTREAM: mm: Avoid creating virtual address aliases in brk()/mmap()/mremap()
  ANDROID: Add TPM support and the vTPM proxy to Cuttlefish.
  ANDROID: serdev: restrict claim of platform devices
  UPSTREAM: fscrypt: don't evict dirty inodes after removing key
  fscrypt: don't evict dirty inodes after removing key
  Linux 4.14.173
  ASoC: topology: Fix memleak in soc_tplg_manifest_load()
  xhci: handle port status events for removed USB3 hcd
  dm integrity: fix a deadlock due to offloading to an incorrect workqueue
  powerpc: fix hardware PMU exception bug on PowerVM compatibility mode systems
  dmaengine: coh901318: Fix a double lock bug in dma_tc_handle()
  hwmon: (adt7462) Fix an error return in ADT7462_REG_VOLT()
  ARM: imx: build v7_cpu_resume() unconditionally
  IB/hfi1, qib: Ensure RCU is locked when accessing list
  RMDA/cm: Fix missing ib_cm_destroy_id() in ib_cm_insert_listen()
  RDMA/iwcm: Fix iwcm work deallocation
  ASoC: dapm: Correct DAPM handling of active widgets during shutdown
  ASoC: pcm512x: Fix unbalanced regulator enable call in probe error path
  ASoC: pcm: Fix possible buffer overflow in dpcm state sysfs output
  ASoC: intel: skl: Fix possible buffer overflow in debug outputs
  ASoC: intel: skl: Fix pin debug prints
  ASoC: topology: Fix memleak in soc_tplg_link_elems_load()
  ARM: dts: ls1021a: Restore MDIO compatible to gianfar
  dm cache: fix a crash due to incorrect work item cancelling
  dmaengine: tegra-apb: Prevent race conditions of tasklet vs free list
  dmaengine: tegra-apb: Fix use-after-free
  x86/pkeys: Manually set X86_FEATURE_OSPKE to preserve existing changes
  vt: selection, push sel_lock up
  vt: selection, push console lock down
  vt: selection, close sel_buffer race
  serial: 8250_exar: add support for ACCES cards
  tty:serial:mvebu-uart:fix a wrong return
  arm: dts: dra76x: Fix mmc3 max-frequency
  fat: fix uninit-memory access for partial initialized inode
  mm, numa: fix bad pmd by atomically check for pmd_trans_huge when marking page tables prot_numa
  vgacon: Fix a UAF in vgacon_invert_region
  usb: core: port: do error out if usb_autopm_get_interface() fails
  usb: core: hub: do error out if usb_autopm_get_interface() fails
  usb: core: hub: fix unhandled return by employing a void function
  usb: quirks: add NO_LPM quirk for Logitech Screen Share
  usb: storage: Add quirk for Samsung Fit flash
  cifs: don't leak -EAGAIN for stat() during reconnect
  net: thunderx: workaround BGX TX Underflow issue
  x86/xen: Distribute switch variables for initialization
  nvme: Fix uninitialized-variable warning
  x86/boot/compressed: Don't declare __force_order in kaslr_64.c
  s390/cio: cio_ignore_proc_seq_next should increase position index
  watchdog: da9062: do not ping the hw during stop()
  net: ks8851-ml: Fix 16-bit IO operation
  net: ks8851-ml: Fix 16-bit data access
  net: ks8851-ml: Remove 8-bit bus accessors
  drm/msm/dsi: save pll state before dsi host is powered off
  drm: msm: Fix return type of dsi_mgr_connector_mode_valid for kCFI
  drm/msm/mdp5: rate limit pp done timeout warnings
  usb: gadget: serial: fix Tx stall after buffer overflow
  usb: gadget: ffs: ffs_aio_cancel(): Save/restore IRQ flags
  usb: gadget: composite: Support more than 500mA MaxPower
  selftests: fix too long argument
  serial: ar933x_uart: set UART_CS_{RX,TX}_READY_ORIDE
  kprobes: Fix optimize_kprobe()/unoptimize_kprobe() cancellation logic
  RDMA/core: Fix use of logical OR in get_new_pps
  RDMA/core: Fix pkey and port assignment in get_new_pps
  net: dsa: bcm_sf2: Forcibly configure IMP port for 1Gb/sec
  EDAC/amd64: Set grain per DIMM
  x86/mce: Handle varying MCA bank counts
  vhost: Check docket sk_family instead of call getname
  audit: always check the netlink payload length in audit_receive_msg()
  Revert "char/random: silence a lockdep splat with printk()"
  mm, thp: fix defrag setting if newline is not used
  mm/huge_memory.c: use head to check huge zero page
  perf hists browser: Restore ESC as "Zoom out" of DSO/thread/etc
  kprobes: Set unoptimized flag after unoptimizing code
  drivers: net: xgene: Fix the order of the arguments of 'alloc_etherdev_mqs()'
  tuntap: correctly set SOCKWQ_ASYNC_NOSPACE
  KVM: Check for a bad hva before dropping into the ghc slow path
  KVM: SVM: Override default MMIO mask if memory encryption is enabled
  mwifiex: drop most magic numbers from mwifiex_process_tdls_action_frame()
  namei: only return -ECHILD from follow_dotdot_rcu()
  net: ena: make ena rxfh support ETH_RSS_HASH_NO_CHANGE
  net: atlantic: fix potential error handling
  net: netlink: cap max groups which will be considered in netlink_bind()
  include/linux/bitops.h: introduce BITS_PER_TYPE
  ecryptfs: Fix up bad backport of fe2e082f5da5b4a0a92ae32978f81507ef37ec66
  usb: charger: assign specific number for enum value
  drm/i915/gvt: Separate display reset from ALL_ENGINES reset
  i2c: jz4780: silence log flood on txabrt
  i2c: altera: Fix potential integer overflow
  MIPS: VPE: Fix a double free and a memory leak in 'release_vpe()'
  HID: hiddev: Fix race in in hiddev_disconnect()
  Revert "PM / devfreq: Modify the device name as devfreq(X) for sysfs"
  tracing: Disable trace_printk() on post poned tests
  HID: core: increase HID report buffer size to 8KiB
  HID: core: fix off-by-one memset in hid_report_raw_event()
  HID: ite: Only bind to keyboard USB interface on Acer SW5-012 keyboard dock
  KVM: VMX: check descriptor table exits on instruction emulation
  ACPI: watchdog: Fix gas->access_width usage
  ACPICA: Introduce ACPI_ACCESS_BYTE_WIDTH() macro
  audit: fix error handling in audit_data_to_entry()
  ext4: potential crash on allocation error in ext4_alloc_flex_bg_array()
  net: sched: correct flower port blocking
  qede: Fix race between rdma destroy workqueue and link change event
  ipv6: Fix route replacement with dev-only route
  ipv6: Fix nlmsg_flags when splitting a multipath route
  sctp: move the format error check out of __sctp_sf_do_9_1_abort
  nfc: pn544: Fix occasional HW initialization failure
  net: phy: restore mdio regs in the iproc mdio driver
  net: fib_rules: Correctly set table field when table number exceeds 8 bits
  sysrq: Remove duplicated sysrq message
  sysrq: Restore original console_loglevel when sysrq disabled
  cfg80211: add missing policy for NL80211_ATTR_STATUS_CODE
  cifs: Fix mode output in debugging statements
  net: ena: ena-com.c: prevent NULL pointer dereference
  net: ena: ethtool: use correct value for crc32 hash
  net: ena: fix incorrectly saving queue numbers when setting RSS indirection table
  net: ena: rss: store hash function as values and not bits
  net: ena: rss: fix failure to get indirection table
  net: ena: fix incorrect default RSS key
  net: ena: add missing ethtool TX timestamping indication
  net: ena: fix uses of round_jiffies()
  net: ena: fix potential crash when rxfh key is NULL
  qmi_wwan: unconditionally reject 2 ep interfaces
  qmi_wwan: re-add DW5821e pre-production variant
  cfg80211: check wiphy driver existence for drvinfo report
  mac80211: consider more elements in parsing CRC
  dax: pass NOWAIT flag to iomap_apply
  drm/msm: Set dma maximum segment size for mdss
  ipmi:ssif: Handle a possible NULL pointer reference
  ext4: fix potential race between s_group_info online resizing and access
  ext4: fix potential race between s_flex_groups online resizing and access
  ext4: fix potential race between online resizing and write operations
  netfilter: nf_conntrack: resolve clash for matching conntracks
  iwlwifi: pcie: fix rb_allocator workqueue allocation
  FROMLIST: f2fs: fix wrong check on F2FS_IOC_FSSETXATTR
  UPSTREAM: binder: prevent UAF for binderfs devices II
  UPSTREAM: binder: prevent UAF for binderfs devices
  FROMLIST: lib: test_stackinit.c: XFAIL switch variable init tests
  ANDROID: cuttlefish: disable KPROBES
  ANDROID: scsi: ufs: allow ufs variants to override sg entry size
  FROMLIST: ufs: fix a bug on printing PRDT
  BACKPORT: loop: Add LOOP_SET_BLOCK_SIZE in compat ioctl
  ANDROID: fix build issue in security/selinux/avc.c
  ANDROID: cuttlefish_defconfig: Disable CONFIG_RT_GROUP_SCHED
  ANDROID: Enable HID_NINTENDO as y
  FROMLIST: HID: nintendo: add nintendo switch controller driver
  Linux 4.14.172
  s390/mm: Explicitly compare PAGE_DEFAULT_KEY against zero in storage_key_init_range
  xen: Enable interrupts when calling _cond_resched()
  ata: ahci: Add shutdown to freeze hardware resources of ahci
  netfilter: xt_hashlimit: limit the max size of hashtable
  ALSA: seq: Fix concurrent access to queue current tick/time
  ALSA: seq: Avoid concurrent access to queue flags
  ALSA: rawmidi: Avoid bit fields for state flags
  genirq/proc: Reject invalid affinity masks (again)
  iommu/vt-d: Fix compile warning from intel-svm.h
  ecryptfs: replace BUG_ON with error handling code
  staging: greybus: use after free in gb_audio_manager_remove_all()
  staging: rtl8723bs: fix copy of overlapping memory
  usb: gadget: composite: Fix bMaxPower for SuperSpeedPlus
  scsi: Revert "target: iscsi: Wait for all commands to finish before freeing a session"
  scsi: Revert "RDMA/isert: Fix a recently introduced regression related to logout"
  Btrfs: fix btrfs_wait_ordered_range() so that it waits for all ordered extents
  btrfs: do not check delayed items are empty for single transaction cleanup
  btrfs: fix bytes_may_use underflow in prealloc error condtition
  KVM: apic: avoid calculating pending eoi from an uninitialized val
  KVM: nVMX: handle nested posted interrupts when apicv is disabled for L1
  KVM: nVMX: Check IO instruction VM-exit conditions
  KVM: nVMX: Refactor IO bitmap checks into helper function
  ext4: fix race between writepages and enabling EXT4_EXTENTS_FL
  ext4: rename s_journal_flag_rwsem to s_writepages_rwsem
  ext4: fix mount failure with quota configured as module
  ext4: add cond_resched() to __ext4_find_entry()
  ext4: fix a data race in EXT4_I(inode)->i_disksize
  KVM: nVMX: Don't emulate instructions in guest mode
  lib/stackdepot.c: fix global out-of-bounds in stack_slabs
  serial: 8250: Check UPF_IRQ_SHARED in advance
  vt: vt_ioctl: fix race in VT_RESIZEX
  VT_RESIZEX: get rid of field-by-field copyin
  xhci: apply XHCI_PME_STUCK_QUIRK to Intel Comet Lake platforms
  KVM: x86: don't notify userspace IOAPIC on edge-triggered interrupt EOI
  drm/amdgpu/soc15: fix xclk for raven
  mm/vmscan.c: don't round up scan size for online memory cgroup
  Revert "ipc,sem: remove uneeded sem_undo_list lock usage in exit_sem()"
  MAINTAINERS: Update drm/i915 bug filing URL
  serdev: ttyport: restore client ops on deregistration
  tty: serial: imx: setup the correct sg entry for tx dma
  tty/serial: atmel: manage shutdown in case of RS485 or ISO7816 mode
  x86/mce/amd: Fix kobject lifetime
  x86/mce/amd: Publish the bank pointer only after setup has succeeded
  staging: rtl8723bs: Fix potential overuse of kernel memory
  staging: rtl8723bs: Fix potential security hole
  staging: rtl8188eu: Fix potential overuse of kernel memory
  staging: rtl8188eu: Fix potential security hole
  USB: hub: Fix the broken detection of USB3 device in SMSC hub
  USB: hub: Don't record a connect-change event during reset-resume
  USB: Fix novation SourceControl XL after suspend
  usb: uas: fix a plug & unplug racing
  usb: host: xhci: update event ring dequeue pointer on purpose
  xhci: fix runtime pm enabling for quirky Intel hosts
  xhci: Force Maximum Packet size for Full-speed bulk devices to valid range.
  staging: vt6656: fix sign of rx_dbm to bb_pre_ed_rssi.
  staging: android: ashmem: Disallow ashmem memory from being remapped
  vt: selection, handle pending signals in paste_selection
  floppy: check FDC index for errors before assigning it
  USB: misc: iowarrior: add support for the 100 device
  USB: misc: iowarrior: add support for the 28 and 28L devices
  USB: misc: iowarrior: add support for 2 OEMed devices
  thunderbolt: Prevent crash if non-active NVMem file is read
  net/smc: fix leak of kernel memory to user space
  net/sched: flower: add missing validation of TCA_FLOWER_FLAGS
  net/sched: matchall: add missing validation of TCA_MATCHALL_FLAGS
  net: dsa: tag_qca: Make sure there is headroom for tag
  enic: prevent waking up stopped tx queues over watchdog reset
  selinux: ensure we cleanup the internal AVC counters on error in avc_update()
  mlxsw: spectrum_dpipe: Add missing error path
  virtio_balloon: prevent pfn array overflow
  help_next should increase position index
  brd: check and limit max_part par
  microblaze: Prevent the overflow of the start
  iwlwifi: mvm: Fix thermal zone registration
  irqchip/gic-v3-its: Reference to its_invall_cmd descriptor when building INVALL
  bcache: explicity type cast in bset_bkey_last()
  reiserfs: prevent NULL pointer dereference in reiserfs_insert_item()
  lib/scatterlist.c: adjust indentation in __sg_alloc_table
  ocfs2: fix a NULL pointer dereference when call ocfs2_update_inode_fsync_trans()
  radeon: insert 10ms sleep in dce5_crtc_load_lut
  trigger_next should increase position index
  ftrace: fpid_next() should increase position index
  drm/nouveau/disp/nv50-: prevent oops when no channel method map provided
  irqchip/gic-v3: Only provision redistributors that are enabled in ACPI
  ceph: check availability of mds cluster on mount after wait timeout
  cifs: fix NULL dereference in match_prepath
  iwlegacy: ensure loop counter addr does not wrap and cause an infinite loop
  hostap: Adjust indentation in prism2_hostapd_add_sta
  ARM: 8951/1: Fix Kexec compilation issue.
  jbd2: make sure ESHUTDOWN to be recorded in the journal superblock
  jbd2: switch to use jbd2_journal_abort() when failed to submit the commit record
  powerpc/sriov: Remove VF eeh_dev state when disabling SR-IOV
  ALSA: hda - Add docking station support for Lenovo Thinkpad T420s
  driver core: platform: fix u32 greater or equal to zero comparison
  s390/ftrace: generate traced function stack frame
  x86/decoder: Add TEST opcode to Group3-2
  ALSA: hda/hdmi - add retry logic to parse_intel_hdmi()
  irqchip/mbigen: Set driver .suppress_bind_attrs to avoid remove problems
  remoteproc: Initialize rproc_class before use
  btrfs: device stats, log when stats are zeroed
  btrfs: safely advance counter when looking up bio csums
  btrfs: fix possible NULL-pointer dereference in integrity checks
  pwm: Remove set but not set variable 'pwm'
  ide: serverworks: potential overflow in svwks_set_pio_mode()
  cmd64x: potential buffer overflow in cmd64x_program_timings()
  pwm: omap-dmtimer: Remove PWM chip in .remove before making it unfunctional
  x86/mm: Fix NX bit clearing issue in kernel_map_pages_in_pgd
  f2fs: fix memleak of kobject
  watchdog/softlockup: Enforce that timestamp is valid on boot
  arm64: fix alternatives with LLVM's integrated assembler
  scsi: iscsi: Don't destroy session if there are outstanding connections
  f2fs: free sysfs kobject
  iommu/arm-smmu-v3: Use WRITE_ONCE() when changing validity of an STE
  usb: musb: omap2430: Get rid of musb .set_vbus for omap2430 glue
  drm/vmwgfx: prevent memory leak in vmw_cmdbuf_res_add
  drm/nouveau: Fix copy-paste error in nouveau_fence_wait_uevent_handler
  drm/nouveau/gr/gk20a,gm200-: add terminators to method lists read from fw
  drm/nouveau/secboot/gm20b: initialize pointer in gm20b_secboot_new()
  vme: bridges: reduce stack usage
  driver core: Print device when resources present in really_probe()
  driver core: platform: Prevent resouce overflow from causing infinite loops
  tty: synclink_gt: Adjust indentation in several functions
  tty: synclinkmp: Adjust indentation in several functions
  ASoC: atmel: fix build error with CONFIG_SND_ATMEL_SOC_DMA=m
  wan: ixp4xx_hss: fix compile-testing on 64-bit
  Input: edt-ft5x06 - work around first register access error
  rcu: Use WRITE_ONCE() for assignments to ->pprev for hlist_nulls
  efi/x86: Don't panic or BUG() on non-critical error conditions
  soc/tegra: fuse: Correct straps' address for older Tegra124 device trees
  IB/hfi1: Add software counter for ctxt0 seq drop
  udf: Fix free space reporting for metadata and virtual partitions
  usbip: Fix unsafe unaligned pointer usage
  drm: remove the newline for CRC source name.
  tools lib api fs: Fix gcc9 stringop-truncation compilation error
  ALSA: sh: Fix compile warning wrt const
  ALSA: sh: Fix unused variable warnings
  clk: sunxi-ng: add mux and pll notifiers for A64 CPU clock
  RDMA/rxe: Fix error type of mmap_offset
  pinctrl: sh-pfc: sh7269: Fix CAN function GPIOs
  PM / devfreq: rk3399_dmc: Add COMPILE_TEST and HAVE_ARM_SMCCC dependency
  x86/vdso: Provide missing include file
  dmaengine: Store module owner in dma_device struct
  ARM: dts: r8a7779: Add device node for ARM global timer
  drm/mediatek: handle events when enabling/disabling crtc
  scsi: aic7xxx: Adjust indentation in ahc_find_syncrate
  scsi: ufs: Complete pending requests in host reset and restore path
  ACPICA: Disassembler: create buffer fields in ACPI_PARSE_LOAD_PASS1
  orinoco: avoid assertion in case of NULL pointer
  rtlwifi: rtl_pci: Fix -Wcast-function-type
  iwlegacy: Fix -Wcast-function-type
  ipw2x00: Fix -Wcast-function-type
  b43legacy: Fix -Wcast-function-type
  ALSA: usx2y: Adjust indentation in snd_usX2Y_hwdep_dsp_status
  fore200e: Fix incorrect checks of NULL pointer dereference
  reiserfs: Fix spurious unlock in reiserfs_fill_super() error handling
  media: v4l2-device.h: Explicitly compare grp{id,mask} to zero in v4l2_device macros
  ARM: dts: imx6: rdu2: Disable WP for USDHC2 and USDHC3
  arm64: dts: qcom: msm8996: Disable USB2 PHY suspend by core
  NFC: port100: Convert cpu_to_le16(le16_to_cpu(E1) + E2) to use le16_add_cpu().
  PCI/IOV: Fix memory leak in pci_iov_add_virtfn()
  net/wan/fsl_ucc_hdlc: reject muram offsets above 64K
  regulator: rk808: Lower log level on optional GPIOs being not available
  drm/amdgpu: remove 4 set but not used variable in amdgpu_atombios_get_connector_info_from_object_table
  clk: qcom: rcg2: Don't crash if our parent can't be found; return an error
  kconfig: fix broken dependency in randconfig-generated .config
  KVM: s390: ENOTSUPP -> EOPNOTSUPP fixups
  nbd: add a flush_workqueue in nbd_start_device
  ext4, jbd2: ensure panic when aborting with zero errno
  tracing: Fix very unlikely race of registering two stat tracers
  tracing: Fix tracing_stat return values in error handling paths
  x86/sysfb: Fix check for bad VRAM size
  jbd2: clear JBD2_ABORT flag before journal_reset to update log tail info when load journal
  kselftest: Minimise dependency of get_size on C library interfaces
  clocksource/drivers/bcm2835_timer: Fix memory leak of timer
  usb: dwc2: Fix IN FIFO allocation
  usb: gadget: udc: fix possible sleep-in-atomic-context bugs in gr_probe()
  uio: fix a sleep-in-atomic-context bug in uio_dmem_genirq_irqcontrol()
  sparc: Add .exit.data section.
  MIPS: Loongson: Fix potential NULL dereference in loongson3_platform_init()
  efi/x86: Map the entire EFI vendor string before copying it
  pinctrl: baytrail: Do not clear IRQ flags on direct-irq enabled pins
  media: sti: bdisp: fix a possible sleep-in-atomic-context bug in bdisp_device_run()
  char/random: silence a lockdep splat with printk()
  gpio: gpio-grgpio: fix possible sleep-in-atomic-context bugs in grgpio_irq_map/unmap()
  powerpc/powernv/iov: Ensure the pdn for VFs always contains a valid PE number
  media: i2c: mt9v032: fix enum mbus codes and frame sizes
  pxa168fb: Fix the function used to release some memory in an error handling path
  pinctrl: sh-pfc: sh7264: Fix CAN function GPIOs
  gianfar: Fix TX timestamping with a stacked DSA driver
  ALSA: ctl: allow TLV read operation for callback type of element in locked case
  ext4: fix ext4_dax_read/write inode locking sequence for IOCB_NOWAIT
  leds: pca963x: Fix open-drain initialization
  brcmfmac: Fix use after free in brcmf_sdio_readframes()
  cpu/hotplug, stop_machine: Fix stop_machine vs hotplug order
  drm/gma500: Fixup fbdev stolen size usage evaluation
  KVM: nVMX: Use correct root level for nested EPT shadow page tables
  Revert "KVM: VMX: Add non-canonical check on writes to RTIT address MSRs"
  Revert "KVM: nVMX: Use correct root level for nested EPT shadow page tables"
  scsi: qla2xxx: fix a potential NULL pointer dereference
  jbd2: do not clear the BH_Mapped flag when forgetting a metadata buffer
  jbd2: move the clearing of b_modified flag to the journal_unmap_buffer()
  hwmon: (pmbus/ltc2978) Fix PMBus polling of MFR_COMMON definitions.
  perf/x86/intel: Fix inaccurate period in context switch for auto-reload
  s390/time: Fix clk type in get_tod_clock
  RDMA/core: Fix protection fault in get_pkey_idx_qp_list
  IB/hfi1: Close window for pq and request coliding
  serial: imx: Only handle irqs that are actually enabled
  serial: imx: ensure that RX irqs are off if RX is off
  padata: Remove broken queue flushing
  perf/x86/amd: Add missing L2 misses event spec to AMD Family 17h's event map
  KVM: nVMX: Use correct root level for nested EPT shadow page tables
  arm64: ssbs: Fix context-switch when SSBS is present on all CPUs
  btrfs: log message when rw remount is attempted with unclean tree-log
  btrfs: print message when tree-log replay starts
  Btrfs: fix race between using extent maps and merging them
  ext4: improve explanation of a mount failure caused by a misconfigured kernel
  ext4: fix checksum errors with indexed dirs
  ext4: fix support for inode sizes > 1024 bytes
  ext4: don't assume that mmp_nodename/bdevname have NUL
  ARM: 8723/2: always assume the "unified" syntax for assembly code
  arm64: nofpsimd: Handle TIF_FOREIGN_FPSTATE flag cleanly
  arm64: ptrace: nofpsimd: Fail FP/SIMD regset operations
  arm64: cpufeature: Set the FP/SIMD compat HWCAP bits properly
  ALSA: usb-audio: Apply sample rate quirk for Audioengine D1
  Input: synaptics - remove the LEN0049 dmi id from topbuttonpad list
  Input: synaptics - enable SMBus on ThinkPad L470
  Input: synaptics - switch T470s to RMI4 by default
  ecryptfs: fix a memory leak bug in ecryptfs_init_messaging()
  ecryptfs: fix a memory leak bug in parse_tag_1_packet()
  ASoC: sun8i-codec: Fix setting DAI data format
  ALSA: hda: Use scnprintf() for printing texts for sysfs/procfs
  iommu/qcom: Fix bogus detach logic
  KVM: x86: emulate RDPID
  UPSTREAM: sched/psi: Fix OOB write when writing 0 bytes to PSI files
  UPSTREAM: psi: Fix a division error in psi poll()
  UPSTREAM: sched/psi: Fix sampling error and rare div0 crashes with cgroups and high uptime
  UPSTREAM: sched/psi: Correct overly pessimistic size calculation
  FROMLIST: f2fs: Handle casefolding with Encryption
  FROMLIST: fscrypt: Have filesystems handle their d_ops
  FROMLIST: ext4: Use generic casefolding support
  FROMLIST: f2fs: Use generic casefolding support
  FROMLIST: Add standard casefolding support
  FROMLIST: unicode: Add utf8_casefold_hash
  ANDROID: cuttlefish_defconfig: Add CONFIG_UNICODE
  ANDROID: sdcardfs: fix -ENOENT lookup race issue
  ANDROID: gki_defconfig: Enable CONFIG_RD_LZ4
  ANDROID: dm: Add wrapped key support in dm-default-key
  ANDROID: dm: add support for passing through derive_raw_secret
  ANDROID: block: Prevent crypto fallback for wrapped keys
  ANDROID: Disable wq fp check in CFI builds
  ANDROID: increase limit on sched-tune boost groups
  ANDROID: ufs, block: fix crypto power management and move into block layer
  ANDROID: Incremental fs: Support xattrs
  ANDROID: test_stackinit: work around LLVM PR44916
  ANDROID: clang: update to 10.0.4
  fs-verity: use u64_to_user_ptr()
  fs-verity: use mempool for hash requests
  fs-verity: implement readahead of Merkle tree pages
  ext4: readpages() should submit IO as read-ahead
  fs-verity: implement readahead for FS_IOC_ENABLE_VERITY
  fscrypt: improve format of no-key names
  ubifs: allow both hash and disk name to be provided in no-key names
  ubifs: don't trigger assertion on invalid no-key filename
  fscrypt: clarify what is meant by a per-file key
  fscrypt: derive dirhash key for casefolded directories
  fscrypt: don't allow v1 policies with casefolding
  fscrypt: add "fscrypt_" prefix to fname_encrypt()
  fscrypt: don't print name of busy file when removing key
  fscrypt: document gfp_flags for bounce page allocation
  fscrypt: optimize fscrypt_zeroout_range()
  fscrypt: remove redundant bi_status check
  fscrypt: Allow modular crypto algorithms
  fscrypt: include <linux/ioctl.h> in UAPI header
  fscrypt: don't check for ENOKEY from fscrypt_get_encryption_info()
  fscrypt: remove fscrypt_is_direct_key_policy()
  fscrypt: move fscrypt_valid_enc_modes() to policy.c
  fscrypt: check for appropriate use of DIRECT_KEY flag earlier
  fscrypt: split up fscrypt_supported_policy() by policy version
  fscrypt: introduce fscrypt_needs_contents_encryption()
  fscrypt: move fscrypt_d_revalidate() to fname.c
  fscrypt: constify inode parameter to filename encryption functions
  fscrypt: constify struct fscrypt_hkdf parameter to fscrypt_hkdf_expand()
  fscrypt: verify that the crypto_skcipher has the correct ivsize
  fscrypt: use crypto_skcipher_driver_name()
  fscrypt: support passing a keyring key to FS_IOC_ADD_ENCRYPTION_KEY
  keys: Export lookup_user_key to external users
  f2fs: fix build error on PAGE_KERNEL_RO

 Conflicts:
	arch/arm64/kernel/smp.c
	arch/arm64/kernel/traps.c
	block/blk-crypto-fallback.c
	block/keyslot-manager.c
	drivers/base/power/wakeup.c
	drivers/clk/clk.c
	drivers/clk/qcom/clk-rcg2.c
	drivers/gpu/Makefile
	drivers/gpu/drm/msm/msm_drv.c
	drivers/gpu/drm/msm/msm_gem.c
	drivers/hwtracing/coresight/coresight-funnel.c
	drivers/irqchip/irq-gic-v3.c
	drivers/md/dm.c
	drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
	drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
	drivers/net/macsec.c
	drivers/net/phy/micrel.c
	drivers/net/wireless/ath/wil6210/cfg80211.c
	drivers/net/wireless/ath/wil6210/fw_inc.c
	drivers/net/wireless/ath/wil6210/pcie_bus.c
	drivers/net/wireless/ath/wil6210/pm.c
	drivers/net/wireless/ath/wil6210/wil6210.h
	drivers/of/base.c
	drivers/power/supply/power_supply_sysfs.c
	drivers/rpmsg/qcom_glink_smem.c
	drivers/scsi/sd.c
	drivers/scsi/ufs/ufshcd-crypto.c
	drivers/scsi/ufs/ufshcd.c
	drivers/scsi/ufs/ufshcd.h
	drivers/scsi/ufs/ufshci.h
	drivers/usb/dwc3/core.c
	drivers/usb/dwc3/gadget.c
	drivers/usb/gadget/Kconfig
	drivers/usb/gadget/composite.c
	drivers/usb/gadget/function/f_fs.c
	drivers/usb/gadget/legacy/Makefile
	drivers/usb/host/xhci-mem.c
	fs/ext4/readpage.c
	fs/sdcardfs/lookup.c
	include/linux/key.h
	include/linux/keyslot-manager.h
	include/linux/power_supply.h
	include/uapi/linux/coresight-stm.h
	net/qrtr/qrtr.c

Change-Id: Iaa9fcbe987e721f02596e167249a519781ed3888
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
2020-08-05 11:41:56 +05:30

3811 lines
102 KiB
C

/*
* mm/mmap.c
*
* Written by obz.
*
* Address space accounting code <alan@lxorguk.ukuu.org.uk>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/mm.h>
#include <linux/vmacache.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/capability.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/hugetlb.h>
#include <linux/shmem_fs.h>
#include <linux/profile.h>
#include <linux/export.h>
#include <linux/mount.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
#include <linux/mmu_notifier.h>
#include <linux/mmdebug.h>
#include <linux/perf_event.h>
#include <linux/audit.h>
#include <linux/khugepaged.h>
#include <linux/uprobes.h>
#include <linux/rbtree_augmented.h>
#include <linux/notifier.h>
#include <linux/memory.h>
#include <linux/printk.h>
#include <linux/userfaultfd_k.h>
#include <linux/moduleparam.h>
#include <linux/pkeys.h>
#include <linux/oom.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#include "internal.h"
#ifndef arch_mmap_check
#define arch_mmap_check(addr, len, flags) (0)
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
#endif
static bool ignore_rlimit_data;
core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long start, unsigned long end);
/* description of effects of mapping type and prot in current implementation.
* this is due to the limited x86 page protection hardware. The expected
* behavior is in parens:
*
* map_type prot
* PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
* MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
* w: (no) no w: (no) no w: (yes) yes w: (no) no
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
*
* MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
* w: (no) no w: (no) no w: (copy) copy w: (no) no
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
*/
pgprot_t protection_map[16] __ro_after_init = {
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
};
pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
return __pgprot(pgprot_val(protection_map[vm_flags &
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
pgprot_val(arch_vm_get_page_prot(vm_flags)));
}
EXPORT_SYMBOL(vm_get_page_prot);
static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
{
return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
}
/* Update vma->vm_page_prot to reflect vma->vm_flags. */
void vma_set_page_prot(struct vm_area_struct *vma)
{
unsigned long vm_flags = vma->vm_flags;
pgprot_t vm_page_prot;
vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
if (vma_wants_writenotify(vma, vm_page_prot)) {
vm_flags &= ~VM_SHARED;
vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
}
/* remove_protection_ptes reads vma->vm_page_prot without mmap_sem */
WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
}
/*
* Requires inode->i_mapping->i_mmap_rwsem
*/
static void __remove_shared_vm_struct(struct vm_area_struct *vma,
struct file *file, struct address_space *mapping)
{
if (vma->vm_flags & VM_DENYWRITE)
atomic_inc(&file_inode(file)->i_writecount);
if (vma->vm_flags & VM_SHARED)
mapping_unmap_writable(mapping);
flush_dcache_mmap_lock(mapping);
vma_interval_tree_remove(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
}
/*
* Unlink a file-based vm structure from its interval tree, to hide
* vma from rmap and vmtruncate before freeing its page tables.
*/
void unlink_file_vma(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
if (file) {
struct address_space *mapping = file->f_mapping;
i_mmap_lock_write(mapping);
__remove_shared_vm_struct(vma, file, mapping);
i_mmap_unlock_write(mapping);
}
}
static void __free_vma(struct vm_area_struct *vma)
{
if (vma->vm_file)
fput(vma->vm_file);
mpol_put(vma_policy(vma));
kmem_cache_free(vm_area_cachep, vma);
}
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
void put_vma(struct vm_area_struct *vma)
{
if (atomic_dec_and_test(&vma->vm_ref_count))
__free_vma(vma);
}
#else
static inline void put_vma(struct vm_area_struct *vma)
{
__free_vma(vma);
}
#endif
/*
* Close a vm structure and free it, returning the next.
*/
static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
{
struct vm_area_struct *next = vma->vm_next;
might_sleep();
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
put_vma(vma);
return next;
}
static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
struct list_head *uf);
SYSCALL_DEFINE1(brk, unsigned long, brk)
{
unsigned long retval;
unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm;
struct vm_area_struct *next;
unsigned long min_brk;
bool populate;
LIST_HEAD(uf);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
#ifdef CONFIG_COMPAT_BRK
/*
* CONFIG_COMPAT_BRK can still be overridden by setting
* randomize_va_space to 2, which will still cause mm->start_brk
* to be arbitrarily shifted
*/
if (current->brk_randomized)
min_brk = mm->start_brk;
else
min_brk = mm->end_data;
#else
min_brk = mm->start_brk;
#endif
if (brk < min_brk)
goto out;
/*
* Check against rlimit here. If this check is done later after the test
* of oldbrk with newbrk then it can escape the test and let the data
* segment grow beyond its set limit the in case where the limit is
* not page aligned -Ram Gupta
*/
if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
mm->end_data, mm->start_data))
goto out;
newbrk = PAGE_ALIGN(brk);
oldbrk = PAGE_ALIGN(mm->brk);
if (oldbrk == newbrk)
goto set_brk;
/* Always allow shrinking brk. */
if (brk <= mm->brk) {
if (!do_munmap(mm, newbrk, oldbrk-newbrk, &uf))
goto set_brk;
goto out;
}
/* Check against existing mmap mappings. */
next = find_vma(mm, oldbrk);
if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
goto out;
/* Ok, looks good - let it rip. */
if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)
goto out;
set_brk:
mm->brk = brk;
populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
up_write(&mm->mmap_sem);
userfaultfd_unmap_complete(mm, &uf);
if (populate)
mm_populate(oldbrk, newbrk - oldbrk);
return brk;
out:
retval = mm->brk;
up_write(&mm->mmap_sem);
return retval;
}
static long vma_compute_subtree_gap(struct vm_area_struct *vma)
{
unsigned long max, prev_end, subtree_gap;
/*
* Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
* allow two stack_guard_gaps between them here, and when choosing
* an unmapped area; whereas when expanding we only require one.
* That's a little inconsistent, but keeps the code here simpler.
*/
max = vm_start_gap(vma);
if (vma->vm_prev) {
prev_end = vm_end_gap(vma->vm_prev);
if (max > prev_end)
max -= prev_end;
else
max = 0;
}
if (vma->vm_rb.rb_left) {
subtree_gap = rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb)->rb_subtree_gap;
if (subtree_gap > max)
max = subtree_gap;
}
if (vma->vm_rb.rb_right) {
subtree_gap = rb_entry(vma->vm_rb.rb_right,
struct vm_area_struct, vm_rb)->rb_subtree_gap;
if (subtree_gap > max)
max = subtree_gap;
}
return max;
}
#ifdef CONFIG_DEBUG_VM_RB
static int browse_rb(struct mm_struct *mm)
{
struct rb_root *root = &mm->mm_rb;
int i = 0, j, bug = 0;
struct rb_node *nd, *pn = NULL;
unsigned long prev = 0, pend = 0;
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
struct vm_area_struct *vma;
vma = rb_entry(nd, struct vm_area_struct, vm_rb);
if (vma->vm_start < prev) {
pr_emerg("vm_start %lx < prev %lx\n",
vma->vm_start, prev);
bug = 1;
}
if (vma->vm_start < pend) {
pr_emerg("vm_start %lx < pend %lx\n",
vma->vm_start, pend);
bug = 1;
}
if (vma->vm_start > vma->vm_end) {
pr_emerg("vm_start %lx > vm_end %lx\n",
vma->vm_start, vma->vm_end);
bug = 1;
}
spin_lock(&mm->page_table_lock);
if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
pr_emerg("free gap %lx, correct %lx\n",
vma->rb_subtree_gap,
vma_compute_subtree_gap(vma));
bug = 1;
}
spin_unlock(&mm->page_table_lock);
i++;
pn = nd;
prev = vma->vm_start;
pend = vma->vm_end;
}
j = 0;
for (nd = pn; nd; nd = rb_prev(nd))
j++;
if (i != j) {
pr_emerg("backwards %d, forwards %d\n", j, i);
bug = 1;
}
return bug ? -1 : i;
}
static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
{
struct rb_node *nd;
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
struct vm_area_struct *vma;
vma = rb_entry(nd, struct vm_area_struct, vm_rb);
VM_BUG_ON_VMA(vma != ignore &&
vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
vma);
}
}
static void validate_mm(struct mm_struct *mm)
{
int bug = 0;
int i = 0;
unsigned long highest_address = 0;
struct vm_area_struct *vma = mm->mmap;
while (vma) {
struct anon_vma *anon_vma = vma->anon_vma;
struct anon_vma_chain *avc;
if (anon_vma) {
anon_vma_lock_read(anon_vma);
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
anon_vma_interval_tree_verify(avc);
anon_vma_unlock_read(anon_vma);
}
highest_address = vm_end_gap(vma);
vma = vma->vm_next;
i++;
}
if (i != mm->map_count) {
pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
bug = 1;
}
if (highest_address != mm->highest_vm_end) {
pr_emerg("mm->highest_vm_end %lx, found %lx\n",
mm->highest_vm_end, highest_address);
bug = 1;
}
i = browse_rb(mm);
if (i != mm->map_count) {
if (i != -1)
pr_emerg("map_count %d rb %d\n", mm->map_count, i);
bug = 1;
}
VM_BUG_ON_MM(bug, mm);
}
#else
#define validate_mm_rb(root, ignore) do { } while (0)
#define validate_mm(mm) do { } while (0)
#endif
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
#define mm_rb_write_lock(mm) write_lock(&(mm)->mm_rb_lock)
#define mm_rb_write_unlock(mm) write_unlock(&(mm)->mm_rb_lock)
#else
#define mm_rb_write_lock(mm) do { } while (0)
#define mm_rb_write_unlock(mm) do { } while (0)
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
unsigned long, rb_subtree_gap, vma_compute_subtree_gap)
/*
* Update augmented rbtree rb_subtree_gap values after vma->vm_start or
* vma->vm_prev->vm_end values changed, without modifying the vma's position
* in the rbtree.
*/
static void vma_gap_update(struct vm_area_struct *vma)
{
/*
* As it turns out, RB_DECLARE_CALLBACKS() already created a callback
* function that does exacltly what we want.
*/
vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
}
static inline void vma_rb_insert(struct vm_area_struct *vma,
struct mm_struct *mm)
{
struct rb_root *root = &mm->mm_rb;
/* All rb_subtree_gap values must be consistent prior to insertion */
validate_mm_rb(root, NULL);
rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
}
static void __vma_rb_erase(struct vm_area_struct *vma, struct mm_struct *mm)
{
struct rb_root *root = &mm->mm_rb;
/*
* Note rb_erase_augmented is a fairly large inline function,
* so make sure we instantiate it only once with our desired
* augmented rbtree callbacks.
*/
mm_rb_write_lock(mm);
rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
mm_rb_write_unlock(mm); /* wmb */
/*
* Ensure the removal is complete before clearing the node.
* Matched by vma_has_changed()/handle_speculative_fault().
*/
RB_CLEAR_NODE(&vma->vm_rb);
}
static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
struct mm_struct *mm,
struct vm_area_struct *ignore)
{
/*
* All rb_subtree_gap values must be consistent prior to erase,
* with the possible exception of the "next" vma being erased if
* next->vm_start was reduced.
*/
validate_mm_rb(&mm->mm_rb, ignore);
__vma_rb_erase(vma, mm);
}
static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
struct mm_struct *mm)
{
/*
* All rb_subtree_gap values must be consistent prior to erase,
* with the possible exception of the vma being erased.
*/
validate_mm_rb(&mm->mm_rb, vma);
__vma_rb_erase(vma, mm);
}
/*
* vma has some anon_vma assigned, and is already inserted on that
* anon_vma's interval trees.
*
* Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
* vma must be removed from the anon_vma's interval trees using
* anon_vma_interval_tree_pre_update_vma().
*
* After the update, the vma will be reinserted using
* anon_vma_interval_tree_post_update_vma().
*
* The entire update must be protected by exclusive mmap_sem and by
* the root anon_vma's mutex.
*/
static inline void
anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
{
struct anon_vma_chain *avc;
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
}
static inline void
anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
{
struct anon_vma_chain *avc;
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
}
static int find_vma_links(struct mm_struct *mm, unsigned long addr,
unsigned long end, struct vm_area_struct **pprev,
struct rb_node ***rb_link, struct rb_node **rb_parent)
{
struct rb_node **__rb_link, *__rb_parent, *rb_prev;
__rb_link = &mm->mm_rb.rb_node;
rb_prev = __rb_parent = NULL;
while (*__rb_link) {
struct vm_area_struct *vma_tmp;
__rb_parent = *__rb_link;
vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
if (vma_tmp->vm_end > addr) {
/* Fail if an existing vma overlaps the area */
if (vma_tmp->vm_start < end)
return -ENOMEM;
__rb_link = &__rb_parent->rb_left;
} else {
rb_prev = __rb_parent;
__rb_link = &__rb_parent->rb_right;
}
}
*pprev = NULL;
if (rb_prev)
*pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
*rb_link = __rb_link;
*rb_parent = __rb_parent;
return 0;
}
static unsigned long count_vma_pages_range(struct mm_struct *mm,
unsigned long addr, unsigned long end)
{
unsigned long nr_pages = 0;
struct vm_area_struct *vma;
/* Find first overlaping mapping */
vma = find_vma_intersection(mm, addr, end);
if (!vma)
return 0;
nr_pages = (min(end, vma->vm_end) -
max(addr, vma->vm_start)) >> PAGE_SHIFT;
/* Iterate over the rest of the overlaps */
for (vma = vma->vm_next; vma; vma = vma->vm_next) {
unsigned long overlap_len;
if (vma->vm_start > end)
break;
overlap_len = min(end, vma->vm_end) - vma->vm_start;
nr_pages += overlap_len >> PAGE_SHIFT;
}
return nr_pages;
}
void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
struct rb_node **rb_link, struct rb_node *rb_parent)
{
/* Update tracking information for the gap following the new vma. */
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
mm->highest_vm_end = vm_end_gap(vma);
/*
* vma->vm_prev wasn't known when we followed the rbtree to find the
* correct insertion point for that vma. As a result, we could not
* update the vma vm_rb parents rb_subtree_gap values on the way down.
* So, we first insert the vma with a zero rb_subtree_gap value
* (to be consistent with what we did on the way down), and then
* immediately update the gap to the correct value. Finally we
* rebalance the rbtree after all augmented values have been set.
*/
mm_rb_write_lock(mm);
rb_link_node(&vma->vm_rb, rb_parent, rb_link);
vma->rb_subtree_gap = 0;
vma_gap_update(vma);
vma_rb_insert(vma, mm);
mm_rb_write_unlock(mm);
}
static void __vma_link_file(struct vm_area_struct *vma)
{
struct file *file;
file = vma->vm_file;
if (file) {
struct address_space *mapping = file->f_mapping;
if (vma->vm_flags & VM_DENYWRITE)
atomic_dec(&file_inode(file)->i_writecount);
if (vma->vm_flags & VM_SHARED)
atomic_inc(&mapping->i_mmap_writable);
flush_dcache_mmap_lock(mapping);
vma_interval_tree_insert(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
}
}
static void
__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct rb_node **rb_link,
struct rb_node *rb_parent)
{
__vma_link_list(mm, vma, prev, rb_parent);
__vma_link_rb(mm, vma, rb_link, rb_parent);
}
static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct rb_node **rb_link,
struct rb_node *rb_parent)
{
struct address_space *mapping = NULL;
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
i_mmap_lock_write(mapping);
}
__vma_link(mm, vma, prev, rb_link, rb_parent);
__vma_link_file(vma);
if (mapping)
i_mmap_unlock_write(mapping);
mm->map_count++;
validate_mm(mm);
}
/*
* Helper for vma_adjust() in the split_vma insert case: insert a vma into the
* mm's list and rbtree. It has already been inserted into the interval tree.
*/
static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
{
struct vm_area_struct *prev;
struct rb_node **rb_link, *rb_parent;
if (find_vma_links(mm, vma->vm_start, vma->vm_end,
&prev, &rb_link, &rb_parent))
BUG();
__vma_link(mm, vma, prev, rb_link, rb_parent);
mm->map_count++;
}
static __always_inline void __vma_unlink_common(struct mm_struct *mm,
struct vm_area_struct *vma,
struct vm_area_struct *prev,
bool has_prev,
struct vm_area_struct *ignore)
{
struct vm_area_struct *next;
vma_rb_erase_ignore(vma, mm, ignore);
next = vma->vm_next;
if (has_prev)
prev->vm_next = next;
else {
prev = vma->vm_prev;
if (prev)
prev->vm_next = next;
else
mm->mmap = next;
}
if (next)
next->vm_prev = prev;
/* Kill the cache */
vmacache_invalidate(mm);
}
static inline void __vma_unlink_prev(struct mm_struct *mm,
struct vm_area_struct *vma,
struct vm_area_struct *prev)
{
__vma_unlink_common(mm, vma, prev, true, vma);
}
/*
* We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
* is already present in an i_mmap tree without adjusting the tree.
* The following helper function should be used when such adjustments
* are necessary. The "insert" vma (if any) is to be inserted
* before we drop the necessary locks.
*/
int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
struct vm_area_struct *expand, bool keep_locked)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
struct address_space *mapping = NULL;
struct rb_root_cached *root = NULL;
struct anon_vma *anon_vma = NULL;
struct file *file = vma->vm_file;
bool start_changed = false, end_changed = false;
long adjust_next = 0;
int remove_next = 0;
/*
* Why using vm_raw_write*() functions here to avoid lockdep's warning ?
*
* Locked is complaining about a theoretical lock dependency, involving
* 3 locks:
* mapping->i_mmap_rwsem --> vma->vm_sequence --> fs_reclaim
*
* Here are the major path leading to this dependency :
* 1. __vma_adjust() mmap_sem -> vm_sequence -> i_mmap_rwsem
* 2. move_vmap() mmap_sem -> vm_sequence -> fs_reclaim
* 3. __alloc_pages_nodemask() fs_reclaim -> i_mmap_rwsem
* 4. unmap_mapping_range() i_mmap_rwsem -> vm_sequence
*
* So there is no way to solve this easily, especially because in
* unmap_mapping_range() the i_mmap_rwsem is grab while the impacted
* VMAs are not yet known.
* However, the way the vm_seq is used is guarantying that we will
* never block on it since we just check for its value and never wait
* for it to move, see vma_has_changed() and handle_speculative_fault().
*/
vm_raw_write_begin(vma);
if (next)
vm_raw_write_begin(next);
if (next && !insert) {
struct vm_area_struct *exporter = NULL, *importer = NULL;
if (end >= next->vm_end) {
/*
* vma expands, overlapping all the next, and
* perhaps the one after too (mprotect case 6).
* The only other cases that gets here are
* case 1, case 7 and case 8.
*/
if (next == expand) {
/*
* The only case where we don't expand "vma"
* and we expand "next" instead is case 8.
*/
VM_WARN_ON(end != next->vm_end);
/*
* remove_next == 3 means we're
* removing "vma" and that to do so we
* swapped "vma" and "next".
*/
remove_next = 3;
VM_WARN_ON(file != next->vm_file);
swap(vma, next);
} else {
VM_WARN_ON(expand != vma);
/*
* case 1, 6, 7, remove_next == 2 is case 6,
* remove_next == 1 is case 1 or 7.
*/
remove_next = 1 + (end > next->vm_end);
VM_WARN_ON(remove_next == 2 &&
end != next->vm_next->vm_end);
VM_WARN_ON(remove_next == 1 &&
end != next->vm_end);
/* trim end to next, for case 6 first pass */
end = next->vm_end;
}
exporter = next;
importer = vma;
/*
* If next doesn't have anon_vma, import from vma after
* next, if the vma overlaps with it.
*/
if (remove_next == 2 && !next->anon_vma)
exporter = next->vm_next;
} else if (end > next->vm_start) {
/*
* vma expands, overlapping part of the next:
* mprotect case 5 shifting the boundary up.
*/
adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
exporter = next;
importer = vma;
VM_WARN_ON(expand != importer);
} else if (end < vma->vm_end) {
/*
* vma shrinks, and !insert tells it's not
* split_vma inserting another: so it must be
* mprotect case 4 shifting the boundary down.
*/
adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
exporter = vma;
importer = next;
VM_WARN_ON(expand != importer);
}
/*
* Easily overlooked: when mprotect shifts the boundary,
* make sure the expanding vma has anon_vma set if the
* shrinking vma had, to cover any anon pages imported.
*/
if (exporter && exporter->anon_vma && !importer->anon_vma) {
int error;
importer->anon_vma = exporter->anon_vma;
error = anon_vma_clone(importer, exporter);
if (error) {
if (next && next != vma)
vm_raw_write_end(next);
vm_raw_write_end(vma);
return error;
}
}
}
again:
vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
if (file) {
mapping = file->f_mapping;
root = &mapping->i_mmap;
uprobe_munmap(vma, vma->vm_start, vma->vm_end);
if (adjust_next)
uprobe_munmap(next, next->vm_start, next->vm_end);
i_mmap_lock_write(mapping);
if (insert) {
/*
* Put into interval tree now, so instantiated pages
* are visible to arm/parisc __flush_dcache_page
* throughout; but we cannot insert into address
* space until vma start or end is updated.
*/
__vma_link_file(insert);
}
}
anon_vma = vma->anon_vma;
if (!anon_vma && adjust_next)
anon_vma = next->anon_vma;
if (anon_vma) {
VM_WARN_ON(adjust_next && next->anon_vma &&
anon_vma != next->anon_vma);
anon_vma_lock_write(anon_vma);
anon_vma_interval_tree_pre_update_vma(vma);
if (adjust_next)
anon_vma_interval_tree_pre_update_vma(next);
}
if (root) {
flush_dcache_mmap_lock(mapping);
vma_interval_tree_remove(vma, root);
if (adjust_next)
vma_interval_tree_remove(next, root);
}
if (start != vma->vm_start) {
WRITE_ONCE(vma->vm_start, start);
start_changed = true;
}
if (end != vma->vm_end) {
WRITE_ONCE(vma->vm_end, end);
end_changed = true;
}
WRITE_ONCE(vma->vm_pgoff, pgoff);
if (adjust_next) {
WRITE_ONCE(next->vm_start,
next->vm_start + (adjust_next << PAGE_SHIFT));
WRITE_ONCE(next->vm_pgoff, next->vm_pgoff + adjust_next);
}
if (root) {
if (adjust_next)
vma_interval_tree_insert(next, root);
vma_interval_tree_insert(vma, root);
flush_dcache_mmap_unlock(mapping);
}
if (remove_next) {
/*
* vma_merge has merged next into vma, and needs
* us to remove next before dropping the locks.
*/
if (remove_next != 3)
__vma_unlink_prev(mm, next, vma);
else
/*
* vma is not before next if they've been
* swapped.
*
* pre-swap() next->vm_start was reduced so
* tell validate_mm_rb to ignore pre-swap()
* "next" (which is stored in post-swap()
* "vma").
*/
__vma_unlink_common(mm, next, NULL, false, vma);
if (file)
__remove_shared_vm_struct(next, file, mapping);
} else if (insert) {
/*
* split_vma has split insert from vma, and needs
* us to insert it before dropping the locks
* (it may either follow vma or precede it).
*/
__insert_vm_struct(mm, insert);
} else {
if (start_changed)
vma_gap_update(vma);
if (end_changed) {
if (!next)
mm->highest_vm_end = vm_end_gap(vma);
else if (!adjust_next)
vma_gap_update(next);
}
}
if (anon_vma) {
anon_vma_interval_tree_post_update_vma(vma);
if (adjust_next)
anon_vma_interval_tree_post_update_vma(next);
anon_vma_unlock_write(anon_vma);
}
if (mapping)
i_mmap_unlock_write(mapping);
if (root) {
uprobe_mmap(vma);
if (adjust_next)
uprobe_mmap(next);
}
if (remove_next) {
if (file)
uprobe_munmap(next, next->vm_start, next->vm_end);
if (next->anon_vma)
anon_vma_merge(vma, next);
mm->map_count--;
vm_raw_write_end(next);
put_vma(next);
/*
* In mprotect's case 6 (see comments on vma_merge),
* we must remove another next too. It would clutter
* up the code too much to do both in one go.
*/
if (remove_next != 3) {
/*
* If "next" was removed and vma->vm_end was
* expanded (up) over it, in turn
* "next->vm_prev->vm_end" changed and the
* "vma->vm_next" gap must be updated.
*/
next = vma->vm_next;
if (next)
vm_raw_write_begin(next);
} else {
/*
* For the scope of the comment "next" and
* "vma" considered pre-swap(): if "vma" was
* removed, next->vm_start was expanded (down)
* over it and the "next" gap must be updated.
* Because of the swap() the post-swap() "vma"
* actually points to pre-swap() "next"
* (post-swap() "next" as opposed is now a
* dangling pointer).
*/
next = vma;
}
if (remove_next == 2) {
remove_next = 1;
end = next->vm_end;
goto again;
}
else if (next)
vma_gap_update(next);
else {
/*
* If remove_next == 2 we obviously can't
* reach this path.
*
* If remove_next == 3 we can't reach this
* path because pre-swap() next is always not
* NULL. pre-swap() "next" is not being
* removed and its next->vm_end is not altered
* (and furthermore "end" already matches
* next->vm_end in remove_next == 3).
*
* We reach this only in the remove_next == 1
* case if the "next" vma that was removed was
* the highest vma of the mm. However in such
* case next->vm_end == "end" and the extended
* "vma" has vma->vm_end == next->vm_end so
* mm->highest_vm_end doesn't need any update
* in remove_next == 1 case.
*/
VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
}
}
if (insert && file)
uprobe_mmap(insert);
if (next && next != vma)
vm_raw_write_end(next);
if (!keep_locked)
vm_raw_write_end(vma);
validate_mm(mm);
return 0;
}
/*
* If the vma has a ->close operation then the driver probably needs to release
* per-vma resources, so we don't attempt to merge those.
*/
static inline int is_mergeable_vma(struct vm_area_struct *vma,
struct file *file, unsigned long vm_flags,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
const char __user *anon_name)
{
/*
* VM_SOFTDIRTY should not prevent from VMA merging, if we
* match the flags but dirty bit -- the caller should mark
* merged VMA as dirty. If dirty bit won't be excluded from
* comparison, we increase pressue on the memory system forcing
* the kernel to generate new VMAs when old one could be
* extended instead.
*/
if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
return 0;
if (vma->vm_file != file)
return 0;
if (vma->vm_ops && vma->vm_ops->close)
return 0;
if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
return 0;
if (vma_get_anon_name(vma) != anon_name)
return 0;
return 1;
}
static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
struct anon_vma *anon_vma2,
struct vm_area_struct *vma)
{
/*
* The list_is_singular() test is to avoid merging VMA cloned from
* parents. This can improve scalability caused by anon_vma lock.
*/
if ((!anon_vma1 || !anon_vma2) && (!vma ||
list_is_singular(&vma->anon_vma_chain)))
return 1;
return anon_vma1 == anon_vma2;
}
/*
* Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
* in front of (at a lower virtual address and file offset than) the vma.
*
* We cannot merge two vmas if they have differently assigned (non-NULL)
* anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
*
* We don't check here for the merged mmap wrapping around the end of pagecache
* indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
* wrap, nor mmaps which cover the final page at index -1UL.
*/
static int
can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file,
pgoff_t vm_pgoff,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
const char __user *anon_name)
{
if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
if (vma->vm_pgoff == vm_pgoff)
return 1;
}
return 0;
}
/*
* Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
* beyond (at a higher virtual address and file offset than) the vma.
*
* We cannot merge two vmas if they have differently assigned (non-NULL)
* anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
*/
static int
can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file,
pgoff_t vm_pgoff,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
const char __user *anon_name)
{
if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
pgoff_t vm_pglen;
vm_pglen = vma_pages(vma);
if (vma->vm_pgoff + vm_pglen == vm_pgoff)
return 1;
}
return 0;
}
/*
* Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
* figure out whether that can be merged with its predecessor or its
* successor. Or both (it neatly fills a hole).
*
* In most cases - when called for mmap, brk or mremap - [addr,end) is
* certain not to be mapped by the time vma_merge is called; but when
* called for mprotect, it is certain to be already mapped (either at
* an offset within prev, or at the start of next), and the flags of
* this area are about to be changed to vm_flags - and the no-change
* case has already been eliminated.
*
* The following mprotect cases have to be considered, where AAAA is
* the area passed down from mprotect_fixup, never extending beyond one
* vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
*
* AAAA AAAA AAAA AAAA
* PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX
* cannot merge might become might become might become
* PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or
* mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or
* mremap move: PPPPXXXXXXXX 8
* AAAA
* PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN
* might become case 1 below case 2 below case 3 below
*
* It is important for case 8 that the the vma NNNN overlapping the
* region AAAA is never going to extended over XXXX. Instead XXXX must
* be extended in region AAAA and NNNN must be removed. This way in
* all cases where vma_merge succeeds, the moment vma_adjust drops the
* rmap_locks, the properties of the merged vma will be already
* correct for the whole merged range. Some of those properties like
* vm_page_prot/vm_flags may be accessed by rmap_walks and they must
* be correct for the whole merged range immediately after the
* rmap_locks are released. Otherwise if XXXX would be removed and
* NNNN would be extended over the XXXX range, remove_migration_ptes
* or other rmap walkers (if working on addresses beyond the "end"
* parameter) may establish ptes with the wrong permissions of NNNN
* instead of the right permissions of XXXX.
*/
struct vm_area_struct *__vma_merge(struct mm_struct *mm,
struct vm_area_struct *prev, unsigned long addr,
unsigned long end, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file,
pgoff_t pgoff, struct mempolicy *policy,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
const char __user *anon_name, bool keep_locked)
{
pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
struct vm_area_struct *area, *next;
int err;
/*
* We later require that vma->vm_flags == vm_flags,
* so this tests vma->vm_flags & VM_SPECIAL, too.
*/
if (vm_flags & VM_SPECIAL)
return NULL;
if (prev)
next = prev->vm_next;
else
next = mm->mmap;
area = next;
if (area && area->vm_end == end) /* cases 6, 7, 8 */
next = next->vm_next;
/* verify some invariant that must be enforced by the caller */
VM_WARN_ON(prev && addr <= prev->vm_start);
VM_WARN_ON(area && end > area->vm_end);
VM_WARN_ON(addr >= end);
/*
* Can it merge with the predecessor?
*/
if (prev && prev->vm_end == addr &&
mpol_equal(vma_policy(prev), policy) &&
can_vma_merge_after(prev, vm_flags,
anon_vma, file, pgoff,
vm_userfaultfd_ctx,
anon_name)) {
/*
* OK, it can. Can we now merge in the successor as well?
*/
if (next && end == next->vm_start &&
mpol_equal(policy, vma_policy(next)) &&
can_vma_merge_before(next, vm_flags,
anon_vma, file,
pgoff+pglen,
vm_userfaultfd_ctx,
anon_name) &&
is_mergeable_anon_vma(prev->anon_vma,
next->anon_vma, NULL)) {
/* cases 1, 6 */
err = __vma_adjust(prev, prev->vm_start,
next->vm_end, prev->vm_pgoff, NULL,
prev, keep_locked);
} else /* cases 2, 5, 7 */
err = __vma_adjust(prev, prev->vm_start,
end, prev->vm_pgoff, NULL, prev,
keep_locked);
if (err)
return NULL;
khugepaged_enter_vma_merge(prev, vm_flags);
return prev;
}
/*
* Can this new request be merged in front of next?
*/
if (next && end == next->vm_start &&
mpol_equal(policy, vma_policy(next)) &&
can_vma_merge_before(next, vm_flags,
anon_vma, file, pgoff+pglen,
vm_userfaultfd_ctx,
anon_name)) {
if (prev && addr < prev->vm_end) /* case 4 */
err = __vma_adjust(prev, prev->vm_start,
addr, prev->vm_pgoff, NULL, next,
keep_locked);
else { /* cases 3, 8 */
err = __vma_adjust(area, addr, next->vm_end,
next->vm_pgoff - pglen, NULL, next,
keep_locked);
/*
* In case 3 area is already equal to next and
* this is a noop, but in case 8 "area" has
* been removed and next was expanded over it.
*/
area = next;
}
if (err)
return NULL;
khugepaged_enter_vma_merge(area, vm_flags);
return area;
}
return NULL;
}
/*
* Rough compatbility check to quickly see if it's even worth looking
* at sharing an anon_vma.
*
* They need to have the same vm_file, and the flags can only differ
* in things that mprotect may change.
*
* NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
* we can merge the two vma's. For example, we refuse to merge a vma if
* there is a vm_ops->close() function, because that indicates that the
* driver is doing some kind of reference counting. But that doesn't
* really matter for the anon_vma sharing case.
*/
static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
{
return a->vm_end == b->vm_start &&
mpol_equal(vma_policy(a), vma_policy(b)) &&
a->vm_file == b->vm_file &&
!((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) &&
b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
}
/*
* Do some basic sanity checking to see if we can re-use the anon_vma
* from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
* the same as 'old', the other will be the new one that is trying
* to share the anon_vma.
*
* NOTE! This runs with mm_sem held for reading, so it is possible that
* the anon_vma of 'old' is concurrently in the process of being set up
* by another page fault trying to merge _that_. But that's ok: if it
* is being set up, that automatically means that it will be a singleton
* acceptable for merging, so we can do all of this optimistically. But
* we do that READ_ONCE() to make sure that we never re-load the pointer.
*
* IOW: that the "list_is_singular()" test on the anon_vma_chain only
* matters for the 'stable anon_vma' case (ie the thing we want to avoid
* is to return an anon_vma that is "complex" due to having gone through
* a fork).
*
* We also make sure that the two vma's are compatible (adjacent,
* and with the same memory policies). That's all stable, even with just
* a read lock on the mm_sem.
*/
static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
{
if (anon_vma_compatible(a, b)) {
struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
if (anon_vma && list_is_singular(&old->anon_vma_chain))
return anon_vma;
}
return NULL;
}
/*
* find_mergeable_anon_vma is used by anon_vma_prepare, to check
* neighbouring vmas for a suitable anon_vma, before it goes off
* to allocate a new anon_vma. It checks because a repetitive
* sequence of mprotects and faults may otherwise lead to distinct
* anon_vmas being allocated, preventing vma merge in subsequent
* mprotect.
*/
struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma;
struct vm_area_struct *near;
near = vma->vm_next;
if (!near)
goto try_prev;
anon_vma = reusable_anon_vma(near, vma, near);
if (anon_vma)
return anon_vma;
try_prev:
near = vma->vm_prev;
if (!near)
goto none;
anon_vma = reusable_anon_vma(near, near, vma);
if (anon_vma)
return anon_vma;
none:
/*
* There's no absolute need to look only at touching neighbours:
* we could search further afield for "compatible" anon_vmas.
* But it would probably just be a waste of time searching,
* or lead to too many vmas hanging off the same anon_vma.
* We're trying to allow mprotect remerging later on,
* not trying to minimize memory used for anon_vmas.
*/
return NULL;
}
/*
* If a hint addr is less than mmap_min_addr change hint to be as
* low as possible but still greater than mmap_min_addr
*/
static inline unsigned long round_hint_to_min(unsigned long hint)
{
hint &= PAGE_MASK;
if (((void *)hint != NULL) &&
(hint < mmap_min_addr))
return PAGE_ALIGN(mmap_min_addr);
return hint;
}
static inline int mlock_future_check(struct mm_struct *mm,
unsigned long flags,
unsigned long len)
{
unsigned long locked, lock_limit;
/* mlock MCL_FUTURE? */
if (flags & VM_LOCKED) {
locked = len >> PAGE_SHIFT;
locked += mm->locked_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
}
return 0;
}
static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
{
if (S_ISREG(inode->i_mode))
return MAX_LFS_FILESIZE;
if (S_ISBLK(inode->i_mode))
return MAX_LFS_FILESIZE;
/* Special "we do even unsigned file positions" case */
if (file->f_mode & FMODE_UNSIGNED_OFFSET)
return 0;
/* Yes, random drivers might want more. But I'm tired of buggy drivers */
return ULONG_MAX;
}
static inline bool file_mmap_ok(struct file *file, struct inode *inode,
unsigned long pgoff, unsigned long len)
{
u64 maxsize = file_mmap_size_max(file, inode);
if (maxsize && len > maxsize)
return false;
maxsize -= len;
if (pgoff > maxsize >> PAGE_SHIFT)
return false;
return true;
}
/*
* The caller must hold down_write(&current->mm->mmap_sem).
*/
unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flags, vm_flags_t vm_flags,
unsigned long pgoff, unsigned long *populate,
struct list_head *uf)
{
struct mm_struct *mm = current->mm;
int pkey = 0;
*populate = 0;
if (!len)
return -EINVAL;
/*
* Does the application expect PROT_READ to imply PROT_EXEC?
*
* (the exception is when the underlying filesystem is noexec
* mounted, in which case we dont add PROT_EXEC.)
*/
if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
if (!(file && path_noexec(&file->f_path)))
prot |= PROT_EXEC;
if (!(flags & MAP_FIXED))
addr = round_hint_to_min(addr);
/* Careful about overflows.. */
len = PAGE_ALIGN(len);
if (!len)
return -ENOMEM;
/* offset overflow? */
if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
return -EOVERFLOW;
/* Too many mappings? */
if (mm->map_count > sysctl_max_map_count)
return -ENOMEM;
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
addr = get_unmapped_area(file, addr, len, pgoff, flags);
if (offset_in_page(addr))
return addr;
if (prot == PROT_EXEC) {
pkey = execute_only_pkey(mm);
if (pkey < 0)
pkey = 0;
}
/* Do simple checking here so the lower-level routines won't have
* to. we assume access permissions have been handled by the open
* of the memory object, so we don't do any here.
*/
vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
if (flags & MAP_LOCKED)
if (!can_do_mlock())
return -EPERM;
if (mlock_future_check(mm, vm_flags, len))
return -EAGAIN;
if (file) {
struct inode *inode = file_inode(file);
if (!file_mmap_ok(file, inode, pgoff, len))
return -EOVERFLOW;
switch (flags & MAP_TYPE) {
case MAP_SHARED:
if (prot & PROT_WRITE) {
if (!(file->f_mode & FMODE_WRITE))
return -EACCES;
if (IS_SWAPFILE(file->f_mapping->host))
return -ETXTBSY;
}
/*
* Make sure we don't allow writing to an append-only
* file..
*/
if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
return -EACCES;
/*
* Make sure there are no mandatory locks on the file.
*/
if (locks_verify_locked(file))
return -EAGAIN;
vm_flags |= VM_SHARED | VM_MAYSHARE;
if (!(file->f_mode & FMODE_WRITE))
vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
/* fall through */
case MAP_PRIVATE:
if (!(file->f_mode & FMODE_READ))
return -EACCES;
if (path_noexec(&file->f_path)) {
if (vm_flags & VM_EXEC)
return -EPERM;
vm_flags &= ~VM_MAYEXEC;
}
if (!file->f_op->mmap)
return -ENODEV;
if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
return -EINVAL;
break;
default:
return -EINVAL;
}
} else {
switch (flags & MAP_TYPE) {
case MAP_SHARED:
if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
return -EINVAL;
/*
* Ignore pgoff.
*/
pgoff = 0;
vm_flags |= VM_SHARED | VM_MAYSHARE;
break;
case MAP_PRIVATE:
/*
* Set pgoff according to addr for anon_vma.
*/
pgoff = addr >> PAGE_SHIFT;
break;
default:
return -EINVAL;
}
}
/*
* Set 'VM_NORESERVE' if we should not account for the
* memory use of this mapping.
*/
if (flags & MAP_NORESERVE) {
/* We honor MAP_NORESERVE if allowed to overcommit */
if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
vm_flags |= VM_NORESERVE;
/* hugetlb applies strict overcommit unless MAP_NORESERVE */
if (file && is_file_hugepages(file))
vm_flags |= VM_NORESERVE;
}
addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
if (!IS_ERR_VALUE(addr) &&
((vm_flags & VM_LOCKED) ||
(flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
*populate = len;
return addr;
}
SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, unsigned long, pgoff)
{
struct file *file = NULL;
unsigned long retval;
if (!(flags & MAP_ANONYMOUS)) {
audit_mmap_fd(fd, flags);
file = fget(fd);
if (!file)
return -EBADF;
if (is_file_hugepages(file))
len = ALIGN(len, huge_page_size(hstate_file(file)));
retval = -EINVAL;
if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file)))
goto out_fput;
} else if (flags & MAP_HUGETLB) {
struct user_struct *user = NULL;
struct hstate *hs;
hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
if (!hs)
return -EINVAL;
len = ALIGN(len, huge_page_size(hs));
/*
* VM_NORESERVE is used because the reservations will be
* taken when vm_ops->mmap() is called
* A dummy user value is used because we are not locking
* memory so no accounting is necessary
*/
file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
VM_NORESERVE,
&user, HUGETLB_ANONHUGE_INODE,
(flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
if (IS_ERR(file))
return PTR_ERR(file);
}
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
out_fput:
if (file)
fput(file);
return retval;
}
#ifdef __ARCH_WANT_SYS_OLD_MMAP
struct mmap_arg_struct {
unsigned long addr;
unsigned long len;
unsigned long prot;
unsigned long flags;
unsigned long fd;
unsigned long offset;
};
SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
{
struct mmap_arg_struct a;
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
if (offset_in_page(a.offset))
return -EINVAL;
return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
a.offset >> PAGE_SHIFT);
}
#endif /* __ARCH_WANT_SYS_OLD_MMAP */
/*
* Some shared mappigns will want the pages marked read-only
* to track write events. If so, we'll downgrade vm_page_prot
* to the private version (using protection_map[] without the
* VM_SHARED bit).
*/
int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
{
vm_flags_t vm_flags = vma->vm_flags;
const struct vm_operations_struct *vm_ops = vma->vm_ops;
/* If it was private or non-writable, the write bit is already clear */
if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
return 0;
/* The backer wishes to know when pages are first written to? */
if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite))
return 1;
/* The open routine did something to the protections that pgprot_modify
* won't preserve? */
if (pgprot_val(vm_page_prot) !=
pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
return 0;
/* Do we need to track softdirty? */
if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
return 1;
/* Specialty mapping? */
if (vm_flags & VM_PFNMAP)
return 0;
/* Can the mapping track the dirty pages? */
return vma->vm_file && vma->vm_file->f_mapping &&
mapping_cap_account_dirty(vma->vm_file->f_mapping);
}
/*
* We account for memory if it's a private writeable mapping,
* not hugepages and VM_NORESERVE wasn't set.
*/
static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
{
/*
* hugetlb has its own accounting separate from the core VM
* VM_HUGETLB may not be set yet so we cannot check for that flag.
*/
if (file && is_file_hugepages(file))
return 0;
return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
}
unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
struct list_head *uf)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
int error;
struct rb_node **rb_link, *rb_parent;
unsigned long charged = 0;
/* Check against address space limit. */
if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
unsigned long nr_pages;
/*
* MAP_FIXED may remove pages of mappings that intersects with
* requested mapping. Account for the pages it would unmap.
*/
nr_pages = count_vma_pages_range(mm, addr, addr + len);
if (!may_expand_vm(mm, vm_flags,
(len >> PAGE_SHIFT) - nr_pages))
return -ENOMEM;
}
/* Clear old maps */
while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
&rb_parent)) {
if (do_munmap(mm, addr, len, uf))
return -ENOMEM;
}
/*
* Private writable mapping: check memory availability
*/
if (accountable_mapping(file, vm_flags)) {
charged = len >> PAGE_SHIFT;
if (security_vm_enough_memory_mm(mm, charged))
return -ENOMEM;
vm_flags |= VM_ACCOUNT;
}
/*
* Can we just expand an old mapping?
*/
vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
if (vma)
goto out;
/*
* Determine the object being mapped and call the appropriate
* specific mapper. the address has already been validated, but
* not unmapped, but the maps are removed from the list.
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
error = -ENOMEM;
goto unacct_error;
}
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_flags = vm_flags;
vma->vm_page_prot = vm_get_page_prot(vm_flags);
vma->vm_pgoff = pgoff;
INIT_VMA(vma);
if (file) {
if (vm_flags & VM_DENYWRITE) {
error = deny_write_access(file);
if (error)
goto free_vma;
}
if (vm_flags & VM_SHARED) {
error = mapping_map_writable(file->f_mapping);
if (error)
goto allow_write_and_free_vma;
}
/* ->mmap() can change vma->vm_file, but must guarantee that
* vma_link() below can deny write-access if VM_DENYWRITE is set
* and map writably if VM_SHARED is set. This usually means the
* new file must not have been exposed to user-space, yet.
*/
vma->vm_file = get_file(file);
error = call_mmap(file, vma);
if (error)
goto unmap_and_free_vma;
/* Can addr have changed??
*
* Answer: Yes, several device drivers can do it in their
* f_op->mmap method. -DaveM
* Bug: If addr is changed, prev, rb_link, rb_parent should
* be updated for vma_link()
*/
WARN_ON_ONCE(addr != vma->vm_start);
addr = vma->vm_start;
vm_flags = vma->vm_flags;
} else if (vm_flags & VM_SHARED) {
error = shmem_zero_setup(vma);
if (error)
goto free_vma;
}
vma_link(mm, vma, prev, rb_link, rb_parent);
/* Once vma denies write, undo our temporary denial count */
if (file) {
if (vm_flags & VM_SHARED)
mapping_unmap_writable(file->f_mapping);
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
}
file = vma->vm_file;
out:
perf_event_mmap(vma);
vm_write_begin(vma);
vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current->mm)))
mm->locked_vm += (len >> PAGE_SHIFT);
else
WRITE_ONCE(vma->vm_flags,
vma->vm_flags & VM_LOCKED_CLEAR_MASK);
}
if (file)
uprobe_mmap(vma);
/*
* New (or expanded) vma always get soft dirty status.
* Otherwise user-space soft-dirty page tracker won't
* be able to distinguish situation when vma area unmapped,
* then new mapped in-place (which must be aimed as
* a completely new data area).
*/
WRITE_ONCE(vma->vm_flags, vma->vm_flags | VM_SOFTDIRTY);
vma_set_page_prot(vma);
vm_write_end(vma);
return addr;
unmap_and_free_vma:
vma->vm_file = NULL;
fput(file);
/* Undo any partial mapping done by a device driver. */
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
charged = 0;
if (vm_flags & VM_SHARED)
mapping_unmap_writable(file->f_mapping);
allow_write_and_free_vma:
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
free_vma:
kmem_cache_free(vm_area_cachep, vma);
unacct_error:
if (charged)
vm_unacct_memory(charged);
return error;
}
unsigned long unmapped_area(struct vm_unmapped_area_info *info)
{
/*
* We implement the search by looking for an rbtree node that
* immediately follows a suitable gap. That is,
* - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
* - gap_end = vma->vm_start >= info->low_limit + length;
* - gap_end - gap_start >= length
*/
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long length, low_limit, high_limit, gap_start, gap_end;
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
if (length < info->length)
return -ENOMEM;
/* Adjust search limits by the desired length */
if (info->high_limit < length)
return -ENOMEM;
high_limit = info->high_limit - length;
if (info->low_limit > high_limit)
return -ENOMEM;
low_limit = info->low_limit + length;
/* Check if rbtree root looks promising */
if (RB_EMPTY_ROOT(&mm->mm_rb))
goto check_highest;
vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
if (vma->rb_subtree_gap < length)
goto check_highest;
while (true) {
/* Visit left subtree if it looks promising */
gap_end = vm_start_gap(vma);
if (gap_end >= low_limit && vma->vm_rb.rb_left) {
struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb);
if (left->rb_subtree_gap >= length) {
vma = left;
continue;
}
}
gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
check_current:
/* Check if current node has a suitable gap */
if (gap_start > high_limit)
return -ENOMEM;
if (gap_end >= low_limit &&
gap_end > gap_start && gap_end - gap_start >= length)
goto found;
/* Visit right subtree if it looks promising */
if (vma->vm_rb.rb_right) {
struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right,
struct vm_area_struct, vm_rb);
if (right->rb_subtree_gap >= length) {
vma = right;
continue;
}
}
/* Go back up the rbtree to find next candidate node */
while (true) {
struct rb_node *prev = &vma->vm_rb;
if (!rb_parent(prev))
goto check_highest;
vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_left) {
gap_start = vm_end_gap(vma->vm_prev);
gap_end = vm_start_gap(vma);
goto check_current;
}
}
}
check_highest:
/* Check highest gap, which does not precede any rbtree node */
gap_start = mm->highest_vm_end;
gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */
if (gap_start > high_limit)
return -ENOMEM;
found:
/* We found a suitable gap. Clip it with the original low_limit. */
if (gap_start < info->low_limit)
gap_start = info->low_limit;
/* Adjust gap address to the desired alignment */
gap_start += (info->align_offset - gap_start) & info->align_mask;
VM_BUG_ON(gap_start + info->length > info->high_limit);
VM_BUG_ON(gap_start + info->length > gap_end);
return gap_start;
}
unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long length, low_limit, high_limit, gap_start, gap_end;
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
if (length < info->length)
return -ENOMEM;
/*
* Adjust search limits by the desired length.
* See implementation comment at top of unmapped_area().
*/
gap_end = info->high_limit;
if (gap_end < length)
return -ENOMEM;
high_limit = gap_end - length;
if (info->low_limit > high_limit)
return -ENOMEM;
low_limit = info->low_limit + length;
/* Check highest gap, which does not precede any rbtree node */
gap_start = mm->highest_vm_end;
if (gap_start <= high_limit)
goto found_highest;
/* Check if rbtree root looks promising */
if (RB_EMPTY_ROOT(&mm->mm_rb))
return -ENOMEM;
vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
if (vma->rb_subtree_gap < length)
return -ENOMEM;
while (true) {
/* Visit right subtree if it looks promising */
gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
if (gap_start <= high_limit && vma->vm_rb.rb_right) {
struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right,
struct vm_area_struct, vm_rb);
if (right->rb_subtree_gap >= length) {
vma = right;
continue;
}
}
check_current:
/* Check if current node has a suitable gap */
gap_end = vm_start_gap(vma);
if (gap_end < low_limit)
return -ENOMEM;
if (gap_start <= high_limit &&
gap_end > gap_start && gap_end - gap_start >= length)
goto found;
/* Visit left subtree if it looks promising */
if (vma->vm_rb.rb_left) {
struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb);
if (left->rb_subtree_gap >= length) {
vma = left;
continue;
}
}
/* Go back up the rbtree to find next candidate node */
while (true) {
struct rb_node *prev = &vma->vm_rb;
if (!rb_parent(prev))
return -ENOMEM;
vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_right) {
gap_start = vma->vm_prev ?
vm_end_gap(vma->vm_prev) : 0;
goto check_current;
}
}
}
found:
/* We found a suitable gap. Clip it with the original high_limit. */
if (gap_end > info->high_limit)
gap_end = info->high_limit;
found_highest:
/* Compute highest gap address at the desired alignment */
gap_end -= info->length;
gap_end -= (gap_end - info->align_offset) & info->align_mask;
VM_BUG_ON(gap_end < info->low_limit);
VM_BUG_ON(gap_end < gap_start);
return gap_end;
}
/* Get an address range which is currently unmapped.
* For shmat() with addr=0.
*
* Ugly calling convention alert:
* Return value with the low bits set means error value,
* ie
* if (ret & ~PAGE_MASK)
* error = ret;
*
* This function "knows" that -ENOMEM has the bits set.
*/
#ifndef HAVE_ARCH_UNMAPPED_AREA
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
struct vm_unmapped_area_info info;
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
return addr;
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
return addr;
}
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = 0;
return vm_unmapped_area(&info);
}
#endif
/*
* This mmap-allocator allocates new areas top-down from below the
* stack's low limit (the base):
*/
#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
/* requested length too big for entire address space */
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
return addr;
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
return addr;
}
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base;
info.align_mask = 0;
addr = vm_unmapped_area(&info);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
if (offset_in_page(addr)) {
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
}
return addr;
}
#endif
unsigned long
get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
unsigned long (*get_area)(struct file *, unsigned long,
unsigned long, unsigned long, unsigned long);
unsigned long error = arch_mmap_check(addr, len, flags);
if (error)
return error;
/* Careful about overflows.. */
if (len > TASK_SIZE)
return -ENOMEM;
get_area = current->mm->get_unmapped_area;
if (file) {
if (file->f_op->get_unmapped_area)
get_area = file->f_op->get_unmapped_area;
} else if (flags & MAP_SHARED) {
/*
* mmap_region() will call shmem_zero_setup() to create a file,
* so use shmem's get_unmapped_area in case it can be huge.
* do_mmap_pgoff() will clear pgoff, so match alignment.
*/
pgoff = 0;
get_area = shmem_get_unmapped_area;
}
addr = get_area(file, addr, len, pgoff, flags);
if (IS_ERR_VALUE(addr))
return addr;
if (addr > TASK_SIZE - len)
return -ENOMEM;
if (offset_in_page(addr))
return -EINVAL;
error = security_mmap_addr(addr);
return error ? error : addr;
}
EXPORT_SYMBOL(get_unmapped_area);
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
static struct vm_area_struct *__find_vma(struct mm_struct *mm,
unsigned long addr)
{
struct rb_node *rb_node;
struct vm_area_struct *vma = NULL;
rb_node = mm->mm_rb.rb_node;
while (rb_node) {
struct vm_area_struct *tmp;
tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
if (tmp->vm_end > addr) {
vma = tmp;
if (tmp->vm_start <= addr)
break;
rb_node = rb_node->rb_left;
} else
rb_node = rb_node->rb_right;
}
return vma;
}
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma;
/* Check the cache first. */
vma = vmacache_find(mm, addr);
if (likely(vma))
return vma;
vma = __find_vma(mm, addr);
if (vma)
vmacache_update(addr, vma);
return vma;
}
EXPORT_SYMBOL(find_vma);
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
struct vm_area_struct *get_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma = NULL;
read_lock(&mm->mm_rb_lock);
vma = __find_vma(mm, addr);
if (vma)
atomic_inc(&vma->vm_ref_count);
read_unlock(&mm->mm_rb_lock);
return vma;
}
#endif
/*
* Same as find_vma, but also return a pointer to the previous VMA in *pprev.
*/
struct vm_area_struct *
find_vma_prev(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct **pprev)
{
struct vm_area_struct *vma;
vma = find_vma(mm, addr);
if (vma) {
*pprev = vma->vm_prev;
} else {
struct rb_node *rb_node = mm->mm_rb.rb_node;
*pprev = NULL;
while (rb_node) {
*pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
rb_node = rb_node->rb_right;
}
}
return vma;
}
/*
* Verify that the stack growth is acceptable and
* update accounting. This is shared with both the
* grow-up and grow-down cases.
*/
static int acct_stack_growth(struct vm_area_struct *vma,
unsigned long size, unsigned long grow)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long new_start;
/* address space limit tests */
if (!may_expand_vm(mm, vma->vm_flags, grow))
return -ENOMEM;
/* Stack limit test */
if (size > rlimit(RLIMIT_STACK))
return -ENOMEM;
/* mlock limit tests */
if (vma->vm_flags & VM_LOCKED) {
unsigned long locked;
unsigned long limit;
locked = mm->locked_vm + grow;
limit = rlimit(RLIMIT_MEMLOCK);
limit >>= PAGE_SHIFT;
if (locked > limit && !capable(CAP_IPC_LOCK))
return -ENOMEM;
}
/* Check to ensure the stack will not grow into a hugetlb-only region */
new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
vma->vm_end - size;
if (is_hugepage_only_range(vma->vm_mm, new_start, size))
return -EFAULT;
/*
* Overcommit.. This must be the final test, as it will
* update security statistics.
*/
if (security_vm_enough_memory_mm(mm, grow))
return -ENOMEM;
return 0;
}
#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
/*
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *next;
unsigned long gap_addr;
int error = 0;
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
/* Guard against exceeding limits of the address space. */
address &= PAGE_MASK;
if (address >= (TASK_SIZE & PAGE_MASK))
return -ENOMEM;
address += PAGE_SIZE;
/* Enforce stack_guard_gap */
gap_addr = address + stack_guard_gap;
/* Guard against overflow */
if (gap_addr < address || gap_addr > TASK_SIZE)
gap_addr = TASK_SIZE;
next = vma->vm_next;
if (next && next->vm_start < gap_addr &&
(next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
if (!(next->vm_flags & VM_GROWSUP))
return -ENOMEM;
/* Check that both stack segments have the same anon_vma? */
}
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
*/
anon_vma_lock_write(vma->anon_vma);
/* Somebody else might have raced and expanded it already */
if (address > vma->vm_end) {
unsigned long size, grow;
size = address - vma->vm_start;
grow = (address - vma->vm_end) >> PAGE_SHIFT;
error = -ENOMEM;
if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
error = acct_stack_growth(vma, size, grow);
if (!error) {
/*
* vma_gap_update() doesn't support concurrent
* updates, but we only hold a shared mmap_sem
* lock here, so we need to protect against
* concurrent vma expansions.
* anon_vma_lock_write() doesn't help here, as
* we don't guarantee that all growable vmas
* in a mm share the same root anon vma.
* So, we reuse mm->page_table_lock to guard
* against concurrent vma expansions.
*/
spin_lock(&mm->page_table_lock);
if (vma->vm_flags & VM_LOCKED)
mm->locked_vm += grow;
vm_stat_account(mm, vma->vm_flags, grow);
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_end = address;
anon_vma_interval_tree_post_update_vma(vma);
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
mm->highest_vm_end = vm_end_gap(vma);
spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
}
}
}
anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(mm);
return error;
}
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
/*
* vma is the first one with address < vma->vm_start. Have to extend vma.
*/
int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *prev;
int error = 0;
address &= PAGE_MASK;
if (address < mmap_min_addr)
return -EPERM;
/* Enforce stack_guard_gap */
prev = vma->vm_prev;
/* Check that both stack segments have the same anon_vma? */
if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
(prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
if (address - prev->vm_end < stack_guard_gap)
return -ENOMEM;
}
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
*/
anon_vma_lock_write(vma->anon_vma);
/* Somebody else might have raced and expanded it already */
if (address < vma->vm_start) {
unsigned long size, grow;
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
error = -ENOMEM;
if (grow <= vma->vm_pgoff) {
error = acct_stack_growth(vma, size, grow);
if (!error) {
/*
* vma_gap_update() doesn't support concurrent
* updates, but we only hold a shared mmap_sem
* lock here, so we need to protect against
* concurrent vma expansions.
* anon_vma_lock_write() doesn't help here, as
* we don't guarantee that all growable vmas
* in a mm share the same root anon vma.
* So, we reuse mm->page_table_lock to guard
* against concurrent vma expansions.
*/
spin_lock(&mm->page_table_lock);
if (vma->vm_flags & VM_LOCKED)
mm->locked_vm += grow;
vm_stat_account(mm, vma->vm_flags, grow);
anon_vma_interval_tree_pre_update_vma(vma);
WRITE_ONCE(vma->vm_start, address);
WRITE_ONCE(vma->vm_pgoff, vma->vm_pgoff - grow);
anon_vma_interval_tree_post_update_vma(vma);
vma_gap_update(vma);
spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
}
}
}
anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(mm);
return error;
}
/* enforced gap between the expanding stack and other mappings. */
unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
static int __init cmdline_parse_stack_guard_gap(char *p)
{
unsigned long val;
char *endptr;
val = simple_strtoul(p, &endptr, 10);
if (!*endptr)
stack_guard_gap = val << PAGE_SHIFT;
return 0;
}
__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
#ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
return expand_upwards(vma, address);
}
struct vm_area_struct *
find_extend_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma, *prev;
addr &= PAGE_MASK;
vma = find_vma_prev(mm, addr, &prev);
if (vma && (vma->vm_start <= addr))
return vma;
/* don't alter vm_end if the coredump is running */
if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
return NULL;
if (prev->vm_flags & VM_LOCKED)
populate_vma_page_range(prev, addr, prev->vm_end, NULL);
return prev;
}
#else
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
return expand_downwards(vma, address);
}
struct vm_area_struct *
find_extend_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma;
unsigned long start;
addr &= PAGE_MASK;
vma = find_vma(mm, addr);
if (!vma)
return NULL;
if (vma->vm_start <= addr)
return vma;
if (!(vma->vm_flags & VM_GROWSDOWN))
return NULL;
/* don't alter vm_start if the coredump is running */
if (!mmget_still_valid(mm))
return NULL;
start = vma->vm_start;
if (expand_stack(vma, addr))
return NULL;
if (vma->vm_flags & VM_LOCKED)
populate_vma_page_range(vma, addr, start, NULL);
return vma;
}
#endif
EXPORT_SYMBOL_GPL(find_extend_vma);
/*
* Ok - we have the memory areas we should free on the vma list,
* so release them, and do the vma updates.
*
* Called with the mm semaphore held.
*/
static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
{
unsigned long nr_accounted = 0;
/* Update high watermark before we lower total_vm */
update_hiwater_vm(mm);
do {
long nrpages = vma_pages(vma);
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
vm_stat_account(mm, vma->vm_flags, -nrpages);
vma = remove_vma(vma);
} while (vma);
vm_unacct_memory(nr_accounted);
validate_mm(mm);
}
/*
* Get rid of page table information in the indicated region.
*
* Called with the mm semaphore held.
*/
static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long start, unsigned long end)
{
struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap;
struct mmu_gather tlb;
lru_add_drain();
tlb_gather_mmu(&tlb, mm, start, end);
update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, start, end);
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
next ? next->vm_start : USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb, start, end);
}
/*
* Create a list of vma's touched by the unmap, removing them from the mm's
* vma list as we go..
*/
static void
detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, unsigned long end)
{
struct vm_area_struct **insertion_point;
struct vm_area_struct *tail_vma = NULL;
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
do {
vma_rb_erase(vma, mm);
mm->map_count--;
tail_vma = vma;
vma = vma->vm_next;
} while (vma && vma->vm_start < end);
*insertion_point = vma;
if (vma) {
vma->vm_prev = prev;
vma_gap_update(vma);
} else
mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
tail_vma->vm_next = NULL;
/* Kill the cache */
vmacache_invalidate(mm);
}
/*
* __split_vma() bypasses sysctl_max_map_count checking. We use this where it
* has already been checked or doesn't make sense to fail.
*/
int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
struct vm_area_struct *new;
int err;
if (vma->vm_ops && vma->vm_ops->split) {
err = vma->vm_ops->split(vma, addr);
if (err)
return err;
}
new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!new)
return -ENOMEM;
/* most fields are the same, copy all, and then fixup */
*new = *vma;
INIT_VMA(new);
if (new_below)
new->vm_end = addr;
else {
new->vm_start = addr;
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
err = vma_dup_policy(vma, new);
if (err)
goto out_free_vma;
err = anon_vma_clone(new, vma);
if (err)
goto out_free_mpol;
if (new->vm_file)
get_file(new->vm_file);
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
if (new_below)
err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
((addr - new->vm_start) >> PAGE_SHIFT), new);
else
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
/* Success. */
if (!err)
return 0;
/* Clean everything up if vma_adjust failed. */
if (new->vm_ops && new->vm_ops->close)
new->vm_ops->close(new);
if (new->vm_file)
fput(new->vm_file);
unlink_anon_vmas(new);
out_free_mpol:
mpol_put(vma_policy(new));
out_free_vma:
kmem_cache_free(vm_area_cachep, new);
return err;
}
/*
* Split a vma into two pieces at address 'addr', a new vma is allocated
* either for the first part or the tail.
*/
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
if (mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
return __split_vma(mm, vma, addr, new_below);
}
/* Munmap is split into 2 main parts -- this part which finds
* what needs doing, and the areas themselves, which do the
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
struct list_head *uf)
{
unsigned long end;
struct vm_area_struct *vma, *prev, *last;
if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
return -EINVAL;
len = PAGE_ALIGN(len);
if (len == 0)
return -EINVAL;
/* Find the first overlapping VMA */
vma = find_vma(mm, start);
if (!vma)
return 0;
prev = vma->vm_prev;
/* we have start < vma->vm_end */
/* if it doesn't overlap, we have nothing.. */
end = start + len;
if (vma->vm_start >= end)
return 0;
/*
* If we need to split any vma, do it now to save pain later.
*
* Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
* unmapped vm_area_struct will remain in use: so lower split_vma
* places tmp vma above, and higher split_vma places tmp vma below.
*/
if (start > vma->vm_start) {
int error;
/*
* Make sure that map_count on return from munmap() will
* not exceed its limit; but let map_count go just above
* its limit temporarily, to help free resources as expected.
*/
if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
error = __split_vma(mm, vma, start, 0);
if (error)
return error;
prev = vma;
}
/* Does it split the last one? */
last = find_vma(mm, end);
if (last && end > last->vm_start) {
int error = __split_vma(mm, last, end, 1);
if (error)
return error;
}
vma = prev ? prev->vm_next : mm->mmap;
if (unlikely(uf)) {
/*
* If userfaultfd_unmap_prep returns an error the vmas
* will remain splitted, but userland will get a
* highly unexpected error anyway. This is no
* different than the case where the first of the two
* __split_vma fails, but we don't undo the first
* split, despite we could. This is unlikely enough
* failure that it's not worth optimizing it for.
*/
int error = userfaultfd_unmap_prep(vma, start, end, uf);
if (error)
return error;
}
/*
* unlock any mlock()ed ranges before detaching vmas
*/
if (mm->locked_vm) {
struct vm_area_struct *tmp = vma;
while (tmp && tmp->vm_start < end) {
if (tmp->vm_flags & VM_LOCKED) {
mm->locked_vm -= vma_pages(tmp);
munlock_vma_pages_all(tmp);
}
tmp = tmp->vm_next;
}
}
/*
* Remove the vma's, and unmap the actual pages
*/
detach_vmas_to_be_unmapped(mm, vma, prev, end);
unmap_region(mm, vma, prev, start, end);
arch_unmap(mm, vma, start, end);
/* Fix up all other VM information */
remove_vma_list(mm, vma);
return 0;
}
EXPORT_SYMBOL(do_munmap);
int vm_munmap(unsigned long start, size_t len)
{
int ret;
struct mm_struct *mm = current->mm;
LIST_HEAD(uf);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
ret = do_munmap(mm, start, len, &uf);
up_write(&mm->mmap_sem);
userfaultfd_unmap_complete(mm, &uf);
return ret;
}
EXPORT_SYMBOL(vm_munmap);
SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
addr = untagged_addr(addr);
profile_munmap(addr);
return vm_munmap(addr, len);
}
/*
* Emulation of deprecated remap_file_pages() syscall.
*/
SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long populate = 0;
unsigned long ret = -EINVAL;
struct file *file;
pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.txt.\n",
current->comm, current->pid);
if (prot)
return ret;
start = start & PAGE_MASK;
size = size & PAGE_MASK;
if (start + size <= start)
return ret;
/* Does pgoff wrap? */
if (pgoff + (size >> PAGE_SHIFT) < pgoff)
return ret;
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
vma = find_vma(mm, start);
if (!vma || !(vma->vm_flags & VM_SHARED))
goto out;
if (start < vma->vm_start)
goto out;
if (start + size > vma->vm_end) {
struct vm_area_struct *next;
for (next = vma->vm_next; next; next = next->vm_next) {
/* hole between vmas ? */
if (next->vm_start != next->vm_prev->vm_end)
goto out;
if (next->vm_file != vma->vm_file)
goto out;
if (next->vm_flags != vma->vm_flags)
goto out;
if (start + size <= next->vm_end)
break;
}
if (!next)
goto out;
}
prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
flags &= MAP_NONBLOCK;
flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
if (vma->vm_flags & VM_LOCKED) {
struct vm_area_struct *tmp;
flags |= MAP_LOCKED;
/* drop PG_Mlocked flag for over-mapped range */
for (tmp = vma; tmp->vm_start >= start + size;
tmp = tmp->vm_next) {
/*
* Split pmd and munlock page on the border
* of the range.
*/
vma_adjust_trans_huge(tmp, start, start + size, 0);
munlock_vma_pages_range(tmp,
max(tmp->vm_start, start),
min(tmp->vm_end, start + size));
}
}
file = get_file(vma->vm_file);
ret = do_mmap_pgoff(vma->vm_file, start, size,
prot, flags, pgoff, &populate, NULL);
fput(file);
out:
up_write(&mm->mmap_sem);
if (populate)
mm_populate(ret, populate);
if (!IS_ERR_VALUE(ret))
ret = 0;
return ret;
}
static inline void verify_mm_writelocked(struct mm_struct *mm)
{
#ifdef CONFIG_DEBUG_VM
if (unlikely(down_read_trylock(&mm->mmap_sem))) {
WARN_ON(1);
up_read(&mm->mmap_sem);
}
#endif
}
/*
* this is really a simplified "do_mmap". it only handles
* anonymous maps. eventually we may be able to do some
* brk-specific accounting here.
*/
static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
struct rb_node **rb_link, *rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
/* Until we need other flags, refuse anything except VM_EXEC. */
if ((flags & (~VM_EXEC)) != 0)
return -EINVAL;
flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
if (offset_in_page(error))
return error;
error = mlock_future_check(mm, mm->def_flags, len);
if (error)
return error;
/*
* mm->mmap_sem is required to protect against another thread
* changing the mappings in case we sleep.
*/
verify_mm_writelocked(mm);
/*
* Clear old maps. this also does some error checking for us
*/
while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
&rb_parent)) {
if (do_munmap(mm, addr, len, uf))
return -ENOMEM;
}
/* Check against address space limits *after* clearing old maps... */
if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
return -ENOMEM;
if (mm->map_count > sysctl_max_map_count)
return -ENOMEM;
if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
vma = vma_merge(mm, prev, addr, addr + len, flags,
NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
if (vma)
goto out;
/*
* create a vma struct for an anonymous mapping
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
vm_unacct_memory(len >> PAGE_SHIFT);
return -ENOMEM;
}
INIT_VMA(vma);
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_pgoff = pgoff;
vma->vm_flags = flags;
vma->vm_page_prot = vm_get_page_prot(flags);
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
perf_event_mmap(vma);
mm->total_vm += len >> PAGE_SHIFT;
mm->data_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED)
mm->locked_vm += (len >> PAGE_SHIFT);
vma->vm_flags |= VM_SOFTDIRTY;
return 0;
}
int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
{
struct mm_struct *mm = current->mm;
unsigned long len;
int ret;
bool populate;
LIST_HEAD(uf);
len = PAGE_ALIGN(request);
if (len < request)
return -ENOMEM;
if (!len)
return 0;
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
ret = do_brk_flags(addr, len, flags, &uf);
populate = ((mm->def_flags & VM_LOCKED) != 0);
up_write(&mm->mmap_sem);
userfaultfd_unmap_complete(mm, &uf);
if (populate && !ret)
mm_populate(addr, len);
return ret;
}
EXPORT_SYMBOL(vm_brk_flags);
int vm_brk(unsigned long addr, unsigned long len)
{
return vm_brk_flags(addr, len, 0);
}
EXPORT_SYMBOL(vm_brk);
/* Release all mmaps. */
void exit_mmap(struct mm_struct *mm)
{
struct mmu_gather tlb;
struct vm_area_struct *vma;
unsigned long nr_accounted = 0;
/* mm's last user has gone, and its about to be pulled down */
mmu_notifier_release(mm);
if (unlikely(mm_is_oom_victim(mm))) {
/*
* Manually reap the mm to free as much memory as possible.
* Then, as the oom reaper does, set MMF_OOM_SKIP to disregard
* this mm from further consideration. Taking mm->mmap_sem for
* write after setting MMF_OOM_SKIP will guarantee that the oom
* reaper will not run on this mm again after mmap_sem is
* dropped.
*
* Nothing can be holding mm->mmap_sem here and the above call
* to mmu_notifier_release(mm) ensures mmu notifier callbacks in
* __oom_reap_task_mm() will not block.
*
* This needs to be done before calling munlock_vma_pages_all(),
* which clears VM_LOCKED, otherwise the oom reaper cannot
* reliably test it.
*/
__oom_reap_task_mm(mm);
set_bit(MMF_OOM_SKIP, &mm->flags);
down_write(&mm->mmap_sem);
up_write(&mm->mmap_sem);
}
if (mm->locked_vm) {
vma = mm->mmap;
while (vma) {
if (vma->vm_flags & VM_LOCKED)
munlock_vma_pages_all(vma);
vma = vma->vm_next;
}
}
arch_exit_mmap(mm);
vma = mm->mmap;
if (!vma) /* Can happen if dup_mmap() received an OOM */
return;
lru_add_drain();
flush_cache_mm(mm);
tlb_gather_mmu(&tlb, mm, 0, -1);
/* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use -1 here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, vma, 0, -1);
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb, 0, -1);
/*
* Walk the list again, actually closing and freeing it,
* with preemption enabled, without holding any MM locks.
*/
while (vma) {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
vma = remove_vma(vma);
}
vm_unacct_memory(nr_accounted);
}
/* Insert vm structure into process list sorted by address
* and into the inode's i_mmap tree. If vm_file is non-NULL
* then i_mmap_rwsem is taken here.
*/
int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
{
struct vm_area_struct *prev;
struct rb_node **rb_link, *rb_parent;
if (find_vma_links(mm, vma->vm_start, vma->vm_end,
&prev, &rb_link, &rb_parent))
return -ENOMEM;
if ((vma->vm_flags & VM_ACCOUNT) &&
security_vm_enough_memory_mm(mm, vma_pages(vma)))
return -ENOMEM;
/*
* The vm_pgoff of a purely anonymous vma should be irrelevant
* until its first write fault, when page's anon_vma and index
* are set. But now set the vm_pgoff it will almost certainly
* end up with (unless mremap moves it elsewhere before that
* first wfault), so /proc/pid/maps tells a consistent story.
*
* By setting it to reflect the virtual start address of the
* vma, merges and splits can happen in a seamless way, just
* using the existing file pgoff checks and manipulations.
* Similarly in do_mmap_pgoff and in do_brk.
*/
if (vma_is_anonymous(vma)) {
BUG_ON(vma->anon_vma);
vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
}
vma_link(mm, vma, prev, rb_link, rb_parent);
return 0;
}
/*
* Copy the vma structure to a new location in the same mm,
* prior to moving page table entries, to effect an mremap move.
*/
struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
unsigned long addr, unsigned long len, pgoff_t pgoff,
bool *need_rmap_locks)
{
struct vm_area_struct *vma = *vmap;
unsigned long vma_start = vma->vm_start;
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *new_vma, *prev;
struct rb_node **rb_link, *rb_parent;
bool faulted_in_anon_vma = true;
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
*/
if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
pgoff = addr >> PAGE_SHIFT;
faulted_in_anon_vma = false;
}
if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
return NULL; /* should never get here */
/* There is 3 cases to manage here in
* AAAA AAAA AAAA AAAA
* PPPP.... PPPP......NNNN PPPP....NNNN PP........NN
* PPPPPPPP(A) PPPP..NNNNNNNN(B) PPPPPPPPPPPP(1) NULL
* PPPPPPPPNNNN(2)
* PPPPNNNNNNNN(3)
*
* new_vma == prev in case A,1,2
* new_vma == next in case B,3
*/
new_vma = __vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
vma->anon_vma, vma->vm_file, pgoff,
vma_policy(vma), vma->vm_userfaultfd_ctx,
vma_get_anon_name(vma), true);
if (new_vma) {
/*
* Source vma may have been merged into new_vma
*/
if (unlikely(vma_start >= new_vma->vm_start &&
vma_start < new_vma->vm_end)) {
/*
* The only way we can get a vma_merge with
* self during an mremap is if the vma hasn't
* been faulted in yet and we were allowed to
* reset the dst vma->vm_pgoff to the
* destination address of the mremap to allow
* the merge to happen. mremap must change the
* vm_pgoff linearity between src and dst vmas
* (in turn preventing a vma_merge) to be
* safe. It is only safe to keep the vm_pgoff
* linear if there are no pages mapped yet.
*/
VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
*vmap = vma = new_vma;
}
*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
} else {
new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!new_vma)
goto out;
*new_vma = *vma;
new_vma->vm_start = addr;
new_vma->vm_end = addr + len;
new_vma->vm_pgoff = pgoff;
if (vma_dup_policy(vma, new_vma))
goto out_free_vma;
INIT_VMA(new_vma);
if (anon_vma_clone(new_vma, vma))
goto out_free_mempol;
if (new_vma->vm_file)
get_file(new_vma->vm_file);
if (new_vma->vm_ops && new_vma->vm_ops->open)
new_vma->vm_ops->open(new_vma);
/*
* As the VMA is linked right now, it may be hit by the
* speculative page fault handler. But we don't want it to
* to start mapping page in this area until the caller has
* potentially move the pte from the moved VMA. To prevent
* that we protect it right now, and let the caller unprotect
* it once the move is done.
*/
vm_raw_write_begin(new_vma);
vma_link(mm, new_vma, prev, rb_link, rb_parent);
*need_rmap_locks = false;
}
return new_vma;
out_free_mempol:
mpol_put(vma_policy(new_vma));
out_free_vma:
kmem_cache_free(vm_area_cachep, new_vma);
out:
return NULL;
}
/*
* Return true if the calling process may expand its vm space by the passed
* number of pages
*/
bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
{
if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
return false;
if (is_data_mapping(flags) &&
mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
/* Workaround for Valgrind */
if (rlimit(RLIMIT_DATA) == 0 &&
mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
return true;
if (!ignore_rlimit_data) {
pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits or use boot option ignore_rlimit_data.\n",
current->comm, current->pid,
(mm->data_vm + npages) << PAGE_SHIFT,
rlimit(RLIMIT_DATA));
return false;
}
}
return true;
}
void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
{
mm->total_vm += npages;
if (is_exec_mapping(flags))
mm->exec_vm += npages;
else if (is_stack_mapping(flags))
mm->stack_vm += npages;
else if (is_data_mapping(flags))
mm->data_vm += npages;
}
static int special_mapping_fault(struct vm_fault *vmf);
/*
* Having a close hook prevents vma merging regardless of flags.
*/
static void special_mapping_close(struct vm_area_struct *vma)
{
}
static const char *special_mapping_name(struct vm_area_struct *vma)
{
return ((struct vm_special_mapping *)vma->vm_private_data)->name;
}
static int special_mapping_mremap(struct vm_area_struct *new_vma)
{
struct vm_special_mapping *sm = new_vma->vm_private_data;
if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
return -EFAULT;
if (sm->mremap)
return sm->mremap(sm, new_vma);
return 0;
}
static const struct vm_operations_struct special_mapping_vmops = {
.close = special_mapping_close,
.fault = special_mapping_fault,
.mremap = special_mapping_mremap,
.name = special_mapping_name,
};
static const struct vm_operations_struct legacy_special_mapping_vmops = {
.close = special_mapping_close,
.fault = special_mapping_fault,
};
static int special_mapping_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
pgoff_t pgoff;
struct page **pages;
if (vma->vm_ops == &legacy_special_mapping_vmops) {
pages = vma->vm_private_data;
} else {
struct vm_special_mapping *sm = vma->vm_private_data;
if (sm->fault)
return sm->fault(sm, vmf->vma, vmf);
pages = sm->pages;
}
for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
pgoff--;
if (*pages) {
struct page *page = *pages;
get_page(page);
vmf->page = page;
return 0;
}
return VM_FAULT_SIGBUS;
}
static struct vm_area_struct *__install_special_mapping(
struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long vm_flags, void *priv,
const struct vm_operations_struct *ops)
{
int ret;
struct vm_area_struct *vma;
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (unlikely(vma == NULL))
return ERR_PTR(-ENOMEM);
INIT_VMA(vma);
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_ops = ops;
vma->vm_private_data = priv;
ret = insert_vm_struct(mm, vma);
if (ret)
goto out;
vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
perf_event_mmap(vma);
return vma;
out:
kmem_cache_free(vm_area_cachep, vma);
return ERR_PTR(ret);
}
bool vma_is_special_mapping(const struct vm_area_struct *vma,
const struct vm_special_mapping *sm)
{
return vma->vm_private_data == sm &&
(vma->vm_ops == &special_mapping_vmops ||
vma->vm_ops == &legacy_special_mapping_vmops);
}
/*
* Called with mm->mmap_sem held for writing.
* Insert a new vma covering the given region, with the given flags.
* Its pages are supplied by the given array of struct page *.
* The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
* The region past the last page supplied will always produce SIGBUS.
* The array pointer and the pages it points to are assumed to stay alive
* for as long as this mapping might exist.
*/
struct vm_area_struct *_install_special_mapping(
struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long vm_flags, const struct vm_special_mapping *spec)
{
return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
&special_mapping_vmops);
}
int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long vm_flags, struct page **pages)
{
struct vm_area_struct *vma = __install_special_mapping(
mm, addr, len, vm_flags, (void *)pages,
&legacy_special_mapping_vmops);
return PTR_ERR_OR_ZERO(vma);
}
static DEFINE_MUTEX(mm_all_locks_mutex);
static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
{
if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
/*
* The LSB of head.next can't change from under us
* because we hold the mm_all_locks_mutex.
*/
down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
/*
* We can safely modify head.next after taking the
* anon_vma->root->rwsem. If some other vma in this mm shares
* the same anon_vma we won't take it again.
*
* No need of atomic instructions here, head.next
* can't change from under us thanks to the
* anon_vma->root->rwsem.
*/
if (__test_and_set_bit(0, (unsigned long *)
&anon_vma->root->rb_root.rb_root.rb_node))
BUG();
}
}
static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
{
if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
/*
* AS_MM_ALL_LOCKS can't change from under us because
* we hold the mm_all_locks_mutex.
*
* Operations on ->flags have to be atomic because
* even if AS_MM_ALL_LOCKS is stable thanks to the
* mm_all_locks_mutex, there may be other cpus
* changing other bitflags in parallel to us.
*/
if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
BUG();
down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem);
}
}
/*
* This operation locks against the VM for all pte/vma/mm related
* operations that could ever happen on a certain mm. This includes
* vmtruncate, try_to_unmap, and all page faults.
*
* The caller must take the mmap_sem in write mode before calling
* mm_take_all_locks(). The caller isn't allowed to release the
* mmap_sem until mm_drop_all_locks() returns.
*
* mmap_sem in write mode is required in order to block all operations
* that could modify pagetables and free pages without need of
* altering the vma layout. It's also needed in write mode to avoid new
* anon_vmas to be associated with existing vmas.
*
* A single task can't take more than one mm_take_all_locks() in a row
* or it would deadlock.
*
* The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
* mapping->flags avoid to take the same lock twice, if more than one
* vma in this mm is backed by the same anon_vma or address_space.
*
* We take locks in following order, accordingly to comment at beginning
* of mm/rmap.c:
* - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
* hugetlb mapping);
* - all i_mmap_rwsem locks;
* - all anon_vma->rwseml
*
* We can take all locks within these types randomly because the VM code
* doesn't nest them and we protected from parallel mm_take_all_locks() by
* mm_all_locks_mutex.
*
* mm_take_all_locks() and mm_drop_all_locks are expensive operations
* that may have to take thousand of locks.
*
* mm_take_all_locks() can fail if it's interrupted by signals.
*/
int mm_take_all_locks(struct mm_struct *mm)
{
struct vm_area_struct *vma;
struct anon_vma_chain *avc;
BUG_ON(down_read_trylock(&mm->mmap_sem));
mutex_lock(&mm_all_locks_mutex);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (signal_pending(current))
goto out_unlock;
if (vma->vm_file && vma->vm_file->f_mapping &&
is_vm_hugetlb_page(vma))
vm_lock_mapping(mm, vma->vm_file->f_mapping);
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (signal_pending(current))
goto out_unlock;
if (vma->vm_file && vma->vm_file->f_mapping &&
!is_vm_hugetlb_page(vma))
vm_lock_mapping(mm, vma->vm_file->f_mapping);
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (signal_pending(current))
goto out_unlock;
if (vma->anon_vma)
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
vm_lock_anon_vma(mm, avc->anon_vma);
}
return 0;
out_unlock:
mm_drop_all_locks(mm);
return -EINTR;
}
static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
{
if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
/*
* The LSB of head.next can't change to 0 from under
* us because we hold the mm_all_locks_mutex.
*
* We must however clear the bitflag before unlocking
* the vma so the users using the anon_vma->rb_root will
* never see our bitflag.
*
* No need of atomic instructions here, head.next
* can't change from under us until we release the
* anon_vma->root->rwsem.
*/
if (!__test_and_clear_bit(0, (unsigned long *)
&anon_vma->root->rb_root.rb_root.rb_node))
BUG();
anon_vma_unlock_write(anon_vma);
}
}
static void vm_unlock_mapping(struct address_space *mapping)
{
if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
/*
* AS_MM_ALL_LOCKS can't change to 0 from under us
* because we hold the mm_all_locks_mutex.
*/
i_mmap_unlock_write(mapping);
if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
&mapping->flags))
BUG();
}
}
/*
* The mmap_sem cannot be released by the caller until
* mm_drop_all_locks() returns.
*/
void mm_drop_all_locks(struct mm_struct *mm)
{
struct vm_area_struct *vma;
struct anon_vma_chain *avc;
BUG_ON(down_read_trylock(&mm->mmap_sem));
BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->anon_vma)
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
vm_unlock_anon_vma(avc->anon_vma);
if (vma->vm_file && vma->vm_file->f_mapping)
vm_unlock_mapping(vma->vm_file->f_mapping);
}
mutex_unlock(&mm_all_locks_mutex);
}
/*
* initialise the percpu counter for VM
*/
void __init mmap_init(void)
{
int ret;
ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
VM_BUG_ON(ret);
}
/*
* Initialise sysctl_user_reserve_kbytes.
*
* This is intended to prevent a user from starting a single memory hogging
* process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
* mode.
*
* The default value is min(3% of free memory, 128MB)
* 128MB is enough to recover with sshd/login, bash, and top/kill.
*/
static int init_user_reserve(void)
{
unsigned long free_kbytes;
free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
return 0;
}
subsys_initcall(init_user_reserve);
/*
* Initialise sysctl_admin_reserve_kbytes.
*
* The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
* to log in and kill a memory hogging process.
*
* Systems with more than 256MB will reserve 8MB, enough to recover
* with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
* only reserve 3% of free pages by default.
*/
static int init_admin_reserve(void)
{
unsigned long free_kbytes;
free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
return 0;
}
subsys_initcall(init_admin_reserve);
/*
* Reinititalise user and admin reserves if memory is added or removed.
*
* The default user reserve max is 128MB, and the default max for the
* admin reserve is 8MB. These are usually, but not always, enough to
* enable recovery from a memory hogging process using login/sshd, a shell,
* and tools like top. It may make sense to increase or even disable the
* reserve depending on the existence of swap or variations in the recovery
* tools. So, the admin may have changed them.
*
* If memory is added and the reserves have been eliminated or increased above
* the default max, then we'll trust the admin.
*
* If memory is removed and there isn't enough free memory, then we
* need to reset the reserves.
*
* Otherwise keep the reserve set by the admin.
*/
static int reserve_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
unsigned long tmp, free_kbytes;
switch (action) {
case MEM_ONLINE:
/* Default max is 128MB. Leave alone if modified by operator. */
tmp = sysctl_user_reserve_kbytes;
if (0 < tmp && tmp < (1UL << 17))
init_user_reserve();
/* Default max is 8MB. Leave alone if modified by operator. */
tmp = sysctl_admin_reserve_kbytes;
if (0 < tmp && tmp < (1UL << 13))
init_admin_reserve();
break;
case MEM_OFFLINE:
free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
if (sysctl_user_reserve_kbytes > free_kbytes) {
init_user_reserve();
pr_info("vm.user_reserve_kbytes reset to %lu\n",
sysctl_user_reserve_kbytes);
}
if (sysctl_admin_reserve_kbytes > free_kbytes) {
init_admin_reserve();
pr_info("vm.admin_reserve_kbytes reset to %lu\n",
sysctl_admin_reserve_kbytes);
}
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block reserve_mem_nb = {
.notifier_call = reserve_mem_notifier,
};
static int __meminit init_reserve_notifier(void)
{
if (register_hotmemory_notifier(&reserve_mem_nb))
pr_err("Failed registering memory add/remove notifier for admin reserve\n");
return 0;
}
subsys_initcall(init_reserve_notifier);