mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
* remotes/origin/tmp-33d930e: Linux 4.14-rc5 x86/microcode: Do the family check first locking/lockdep: Disable cross-release features for now x86/mm: Flush more aggressively in lazy TLB mode mm, swap: use page-cluster as max window of VMA based swap readahead mm: page_vma_mapped: ensure pmd is loaded with READ_ONCE outside of lock kmemleak: clear stale pointers from task stacks fs/binfmt_misc.c: node could be NULL when evicting inode fs/mpage.c: fix mpage_writepage() for pages with buffers linux/kernel.h: add/correct kernel-doc notation tty: fall back to N_NULL if switching to N_TTY fails during hangup Revert "vmalloc: back off when the current task is killed" mm/cma.c: take __GFP_NOWARN into account in cma_alloc() scripts/kallsyms.c: ignore symbol type 'n' userfaultfd: selftest: exercise -EEXIST only in background transfer mm: only display online cpus of the numa node mm: remove unnecessary WARN_ONCE in page_vma_mapped_walk(). mm/mempolicy: fix NUMA_INTERLEAVE_HIT counter include/linux/of.h: provide of_n_{addr,size}_cells wrappers for !CONFIG_OF mm/madvise.c: add description for MADV_WIPEONFORK and MADV_KEEPONFORK lib/Kconfig.debug: kernel hacking menu: runtime testing: keep tests together mm/migrate: fix indexing bug (off by one) and avoid out of bound access iommu/amd: Finish TLB flush in amd_iommu_unmap() powerpc/perf: Fix IMC initialization crash scripts: fix faddr2line to work on last symbol drm/msm: fix _NO_IMPLICIT fencing case drm/msm: fix error path cleanup device property: preserve usecount for node passed to of_fwnode_graph_get_port_parent() drivers: of: increase MAX_RESERVED_REGIONS to 32 of: do not leak console options drm/msm/mdp5: Remove extra pm_runtime_put call in mdp5_crtc_cursor_set() drm/msm/dsi: Use correct pm_runtime_put variant during host_init x86/apic: Update TSC_DEADLINE quirk with additional SKX stepping x86/apic: Silence "FW_BUG TSC_DEADLINE disabled due to Errata" on hypervisors drm/msm: fix return value check in _msm_gem_kernel_new() drm/msm: use proper memory barriers for updating tail/head drm/msm/mdp5: add missing max size for 8x74 v1 drm/amdgpu: fix placement flags in amdgpu_ttm_bind powerpc/perf: Add ___GFP_NOWARN flag to alloc_pages_node() powerpc/perf: Fix for core/nest imc call trace on cpuhotplug MAINTAINERS: Add Paul Mackerras as maintainer for KVM/powerpc KVM: nVMX: fix guest CR4 loading when emulating L2 to L1 exit iommu/exynos: Remove initconst attribute to avoid potential kernel oops ACPI: properties: Fix __acpi_node_get_property_reference() return codes ACPI: properties: Align return codes of __acpi_node_get_property_reference() remoteproc: imx_rproc: fix return value check in imx_rproc_addr_init() drm/i915/bios: parse DDI ports also for CHV for HDMI DDC pin and DP AUX channel xfs: handle error if xfs_btree_get_bufs fails xfs: reinit btree pointer on attr tree inactivation walk xfs: Fix bool initialization/comparison xfs: don't change inode mode if ACL update fails xfs: move more RT specific code under CONFIG_XFS_RT xfs: Don't log uninitialised fields in inode structures 9p: set page uptodate when required in write_end() x86/mm: Disable various instrumentations of mm/mem_encrypt.c and mm/tlb.c ALSA: caiaq: Fix stray URB at probe error path HID: hid-elecom: extend to fix descriptor for HUGE trackball HID: usbhid: fix out-of-bounds bug livepatch: unpatch all klp_objects if klp_module_coming fails usb: usbtest: fix NULL pointer dereference usb: gadget: configfs: Fix memory leak of interface directory data usb: gadget: composite: Fix use-after-free in usb_composite_overwrite_options usb: misc: usbtest: Fix overflow in usbtest_do_ioctl() usb: renesas_usbhs: Fix DMAC sequence for receiving zero-length packet USB: dummy-hcd: Fix deadlock caused by disconnect detection usb: phy: tegra: Fix phy suspend for UDC gpu: ipu-v3: pre: implement workaround for ERR009624 gpu: ipu-v3: prg: wait for double buffers to be filled on channel startup gpu: ipu-v3: Allow channel burst locking on i.MX6 only ALSA: seq: Fix use-after-free at creating a port bio_copy_user_iov(): don't ignore ->iov_offset more bio_map_user_iov() leak fixes fix unbalanced page refcounting in bio_map_user_iov direct-io: Prevent NULL pointer access in submit_page_section PCI: aardvark: Move to struct pci_host_bridge IRQ mapping functions Revert "PCI: tegra: Do not allocate MSI target memory" seccomp: make function __get_seccomp_filter static remoteproc: qcom: fix RPMSG_QCOM_GLINK_SMEM dependencies remoteproc: imx_rproc: fix a couple off by one bugs rpmsg: glink: Fix memory leak in qcom_glink_alloc_intent() rpmsg: glink: Unlock on error in qcom_glink_request_intent() iommu/amd: Do not disable SWIOTLB if SME is active crypto: shash - Fix zero-length shash ahash digest crash quota: Generate warnings for DQUOT_SPACE_NOFAIL allocations KVM: MMU: always terminate page walks at level 1 KVM: nVMX: update last_nonleaf_level when initializing nested EPT xen/vcpu: Use a unified name about cpu hotplug state for pv and pvhvm ALSA: usb-audio: Kill stray URB at exiting x86/hyperv: Fix hypercalls with extended CPU ranges for TLB flushing x86/hyperv: Don't use percpu areas for pcpu_flush/pcpu_flush_ex structures x86/hyperv: Clear vCPU banks between calls to avoid flushing unneeded vCPUs perf/x86/intel/uncore: Fix memory leaks on allocation failures x86/unwind: Disable unwinder warnings on 32-bit x86/unwind: Align stack pointer in unwinder dump x86/unwind: Use MSB for frame pointer encoding on 32-bit x86/unwind: Fix dereference of untrusted pointer powerpc: Don't call lockdep_assert_cpus_held() from arch_update_cpu_topology() powerpc/lib/sstep: Fix count leading zeros instructions sched/core: Ensure load_balance() respects the active_mask sched/core: Address more wake_affine() regressions sched/core: Fix wake_affine() performance regression perf/core: Fix cgroup time when scheduling descendants perf/core: Avoid freeing static PMU contexts when PMU is unregistered locking/selftest: Avoid false BUG report locking/lockdep: Fix stacktrace mess powerpc/livepatch: Fix livepatch stack access device property: Track owner device of device property waitid(): Add missing access_ok() checks cdc_ether: flag the u-blox TOBY-L2 and SARA-U2 as wwan tools include uapi bpf.h: Sync kernel ABI header with tooling header perf pmu: Unbreak perf record for arm/arm64 with events with explicit PMU net: thunderx: mark expected switch fall-throughs in nicvf_main() drm/i915: Read timings from the correct transcoder in intel_crtc_mode_get() drm/i915: Order two completing nop_submit_request drm/i915: Silence compiler warning for hsw_power_well_enable() drm/i915: Use crtc_state_is_legacy_gamma in intel_color_check drm/i915/edp: Increase the T12 delay quirk to 1300ms drm/i915/edp: Get the Panel Power Off timestamp after panel is off udp: fix bcast packet reception netlink: do not set cb_running if dump's start() errs ipv4: Fix traffic triggered IPsec connections. ipv6: Fix traffic triggered IPsec connections. sync_file: Return consistent status in SYNC_IOC_FILE_INFO ixgbe: incorrect XDP ring accounting in ethtool tx_frame param net: ixgbe: Use new PCI_DEV_FLAGS_NO_RELAXED_ORDERING flag Revert commit 1a8b6d76dc5b ("net:add one common config...") ixgbe: fix masking of bits read from IXGBE_VXLANCTRL register ixgbe: Return error when getting PHY address if PHY access is not supported MIPS: math-emu: Remove pr_err() calls from fpu_emu() MIPS: Fix generic-board-config.sh for builds using O= MIPS: Fix cmpxchg on 32b signed ints for 64b kernel with !kernel_uses_llsc ALSA: line6: Fix leftover URB at error-path during probe ALSA: line6: Fix NULL dereference at podhd_disconnect() ALSA: line6: Fix missing initialization before error path netfilter: xt_bpf: Fix XT_BPF_MODE_FD_PINNED mode of 'xt_bpf_info_v1' MIPS: loongson1: set default number of rx and tx queues for stmmac MIPS: bpf: Fix uninitialised target compiler error drm/atomic: Unref duplicated drm_atomic_state in drm_atomic_helper_resume() ALSA: seq: Fix copy_from_user() call inside lock ALSA: usb-audio: Add sample rate quirk for Plantronics P610 x86/alternatives: Fix alt_max_short macro to really be a max() x86/mm/64: Fix reboot interaction with CR4.PCIDE genirq/cpuhotplug: Enforce affinity setting on startup of managed irqs genirq/cpuhotplug: Add sanity check for effective affinity mask genirq: Warn when effective affinity is not updated netfilter: SYNPROXY: skip non-tcp packet in {ipv4, ipv6}_synproxy_hook USB: serial: console: fix use-after-free after failed setup USB: serial: console: fix use-after-free on disconnect tipc: Unclone message at secondary destination lookup tipc: correct initialization of skb list gso: fix payload length when gso_size is zero mlxsw: spectrum_router: Avoid expensive lookup during route removal pinctrl: cherryview: fix issues caused by dynamic gpio irqs mapping bpf: fix liveness marking doc: Fix typo "8023.ad" in bonding documentation ipv6: fix net.ipv6.conf.all.accept_dad behaviour for real gpio: omap: Fix lost edge interrupts crypto: skcipher - Fix crash on zero-length input crypto: shash - Fix a sleep-in-atomic bug in shash_setkey_unaligned crypto: xts - Fix an error handling path in 'create()' crypto: stm32 - Try to fix hash padding ppp: fix race in ppp device destruction netfilter: x_tables: avoid stack-out-of-bounds read in xt_copy_counters_from_user netfilter: nf_tables: do not dump chain counters if not enabled perf script: Add missing separator for "-F ip,brstack" (and brstackoff) powerpc/tm: Fix illegal TM state in signal handler powerpc/64s: Use emergency stack for kernel TM Bad Thing program checks selftests/net: rxtimestamp: Fix an off by one nfsd4: define nfsd4_secinfo_no_name_release() selftests: mqueue: fix regression in silencing output from RUN_TESTS selftests: x86: sysret_ss_attrs doesn't build on a PIE build perf callchain: Compare dsos (as well) for CCKEY_FUNCTION x86/mce: Hide mca_cfg RAS/CEC: Use the right length for "cec_disable" NFSv4/pnfs: Fix an infinite layoutget loop nl80211: Define policy for packet pattern attributes binder: fix use-after-free in binder_transaction() Drivers: hv: vmbus: Fix bugs in rescind handling mei: me: add gemini lake devices id mei: always use domain runtime pm callbacks. objtool: Upgrade libelf-devel warning to error for CONFIG_ORC_UNWINDER kprobes/x86: Remove IRQ disabling from jprobe handlers kprobes/x86: Set up frame pointer in kprobe trampoline f2fs: fix potential panic during fstrim USB: serial: qcserial: add Dell DW5818, DW5819 netfilter: nf_tables: Release memory obtained by kasprintf nfs/filelayout: fix oops when freeing filelayout segment sunrpc: remove redundant initialization of sock NFS: Fix uninitialized rpc_wait_queue NFS: Cleanup error handling in nfs_idmap_request_key() nfs: RPC_MAX_AUTH_SIZE is in bytes netfilter: ebtables: fix race condition in frame_filter_net_init() netfilter: nf_tables: fix update chain error netfilter: ipset: Fix race between dump and swap dmaengine: altera: fix spinlock usage dmaengine: altera: fix response FIFO emptying xfrm: don't call xfrm_policy_cache_flush under xfrm_state_lock pinctrl/amd: Fix build dependency on pinmux code netfilter: ipset: pernet ops must be unregistered last netfilter: ipset: Fix adding an IPv4 range containing more than 2^31 addresses netfilter: xt_socket: Restore mark from full sockets only netfilter: ipvs: full-functionality option for ECN encapsulation in tunnel crypto: axis - hide an unused variable dmaengine: ti-dma-crossbar: Fix possible race condition with dma_inuse dmaengine: edma: Align the memcpy acnt array size with the transfer pinctrl: bcm2835: fix build warning in bcm2835_gpio_irq_handle_bank gpio: omap: omap_gpio_show_rev is not __init USB: serial: cp210x: add support for ELV TFD500 gpio: acpi: work around false-positive -Wstring-overflow warning gpio: thunderx: select IRQ_DOMAIN_HIERARCHY instead of depends on USB: serial: cp210x: fix partnum regression MAINTAINERS: associate linux/fs.h with VFS instead of file locking USB: serial: option: add support for TP-Link LTE module USB: serial: ftdi_sio: add id for Cypress WICED dev board vti: fix NULL dereference in xfrm_input() xfrm: Fix negative device refcount on offload failure. xfrm: Fix deletion of offloaded SAs on failure. Conflicts: lib/Kconfig.debug Change-Id: I4188c5b0f29b19e9470116f26ca8da2622b8d250 Signed-off-by: Runmin Wang <runminw@codeaurora.org>
523 lines
14 KiB
C
523 lines
14 KiB
C
/*
|
|
* Contiguous Memory Allocator
|
|
*
|
|
* Copyright (c) 2010-2011 by Samsung Electronics.
|
|
* Copyright IBM Corporation, 2013
|
|
* Copyright LG Electronics Inc., 2014
|
|
* Written by:
|
|
* Marek Szyprowski <m.szyprowski@samsung.com>
|
|
* Michal Nazarewicz <mina86@mina86.com>
|
|
* Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
|
|
* Joonsoo Kim <iamjoonsoo.kim@lge.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License as
|
|
* published by the Free Software Foundation; either version 2 of the
|
|
* License or (at your optional) any later version of the license.
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "cma: " fmt
|
|
|
|
#ifdef CONFIG_CMA_DEBUG
|
|
#ifndef DEBUG
|
|
# define DEBUG
|
|
#endif
|
|
#endif
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include <linux/memblock.h>
|
|
#include <linux/err.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/sizes.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/log2.h>
|
|
#include <linux/cma.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/io.h>
|
|
#include <trace/events/cma.h>
|
|
|
|
#include "cma.h"
|
|
|
|
struct cma cma_areas[MAX_CMA_AREAS];
|
|
unsigned cma_area_count;
|
|
static DEFINE_MUTEX(cma_mutex);
|
|
|
|
phys_addr_t cma_get_base(const struct cma *cma)
|
|
{
|
|
return PFN_PHYS(cma->base_pfn);
|
|
}
|
|
|
|
unsigned long cma_get_size(const struct cma *cma)
|
|
{
|
|
return cma->count << PAGE_SHIFT;
|
|
}
|
|
|
|
const char *cma_get_name(const struct cma *cma)
|
|
{
|
|
return cma->name ? cma->name : "(undefined)";
|
|
}
|
|
|
|
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
|
|
unsigned int align_order)
|
|
{
|
|
if (align_order <= cma->order_per_bit)
|
|
return 0;
|
|
return (1UL << (align_order - cma->order_per_bit)) - 1;
|
|
}
|
|
|
|
/*
|
|
* Find the offset of the base PFN from the specified align_order.
|
|
* The value returned is represented in order_per_bits.
|
|
*/
|
|
static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
|
|
unsigned int align_order)
|
|
{
|
|
return (cma->base_pfn & ((1UL << align_order) - 1))
|
|
>> cma->order_per_bit;
|
|
}
|
|
|
|
static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
|
|
unsigned long pages)
|
|
{
|
|
return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
|
|
}
|
|
|
|
static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
|
|
unsigned int count)
|
|
{
|
|
unsigned long bitmap_no, bitmap_count;
|
|
|
|
bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
|
|
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
|
|
|
|
mutex_lock(&cma->lock);
|
|
bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
|
|
mutex_unlock(&cma->lock);
|
|
}
|
|
|
|
static int __init cma_activate_area(struct cma *cma)
|
|
{
|
|
int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
|
|
unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
|
|
unsigned i = cma->count >> pageblock_order;
|
|
struct zone *zone;
|
|
|
|
cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
|
|
|
if (!cma->bitmap)
|
|
return -ENOMEM;
|
|
|
|
WARN_ON_ONCE(!pfn_valid(pfn));
|
|
zone = page_zone(pfn_to_page(pfn));
|
|
|
|
do {
|
|
unsigned j;
|
|
|
|
base_pfn = pfn;
|
|
for (j = pageblock_nr_pages; j; --j, pfn++) {
|
|
WARN_ON_ONCE(!pfn_valid(pfn));
|
|
/*
|
|
* alloc_contig_range requires the pfn range
|
|
* specified to be in the same zone. Make this
|
|
* simple by forcing the entire CMA resv range
|
|
* to be in the same zone.
|
|
*/
|
|
if (page_zone(pfn_to_page(pfn)) != zone)
|
|
goto not_in_zone;
|
|
}
|
|
init_cma_reserved_pageblock(pfn_to_page(base_pfn));
|
|
} while (--i);
|
|
|
|
mutex_init(&cma->lock);
|
|
|
|
#ifdef CONFIG_CMA_DEBUGFS
|
|
INIT_HLIST_HEAD(&cma->mem_head);
|
|
spin_lock_init(&cma->mem_head_lock);
|
|
#endif
|
|
|
|
if (!PageHighMem(pfn_to_page(cma->base_pfn)))
|
|
kmemleak_free_part(__va(cma->base_pfn << PAGE_SHIFT),
|
|
cma->count << PAGE_SHIFT);
|
|
|
|
return 0;
|
|
|
|
not_in_zone:
|
|
pr_err("CMA area %s could not be activated\n", cma->name);
|
|
kfree(cma->bitmap);
|
|
cma->count = 0;
|
|
return -EINVAL;
|
|
}
|
|
|
|
static int __init cma_init_reserved_areas(void)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < cma_area_count; i++) {
|
|
int ret = cma_activate_area(&cma_areas[i]);
|
|
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
core_initcall(cma_init_reserved_areas);
|
|
|
|
/**
|
|
* cma_init_reserved_mem() - create custom contiguous area from reserved memory
|
|
* @base: Base address of the reserved area
|
|
* @size: Size of the reserved area (in bytes),
|
|
* @order_per_bit: Order of pages represented by one bit on bitmap.
|
|
* @res_cma: Pointer to store the created cma region.
|
|
*
|
|
* This function creates custom contiguous area from already reserved memory.
|
|
*/
|
|
int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
|
unsigned int order_per_bit,
|
|
const char *name,
|
|
struct cma **res_cma)
|
|
{
|
|
struct cma *cma;
|
|
phys_addr_t alignment;
|
|
|
|
/* Sanity checks */
|
|
if (cma_area_count == ARRAY_SIZE(cma_areas)) {
|
|
pr_err("Not enough slots for CMA reserved regions!\n");
|
|
return -ENOSPC;
|
|
}
|
|
|
|
if (!size || !memblock_is_region_reserved(base, size))
|
|
return -EINVAL;
|
|
|
|
/* ensure minimal alignment required by mm core */
|
|
alignment = PAGE_SIZE <<
|
|
max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
|
|
|
|
/* alignment should be aligned with order_per_bit */
|
|
if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
|
|
return -EINVAL;
|
|
|
|
if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Each reserved area must be initialised later, when more kernel
|
|
* subsystems (like slab allocator) are available.
|
|
*/
|
|
cma = &cma_areas[cma_area_count];
|
|
if (name) {
|
|
cma->name = name;
|
|
} else {
|
|
cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
|
|
if (!cma->name)
|
|
return -ENOMEM;
|
|
}
|
|
cma->base_pfn = PFN_DOWN(base);
|
|
cma->count = size >> PAGE_SHIFT;
|
|
cma->order_per_bit = order_per_bit;
|
|
*res_cma = cma;
|
|
cma_area_count++;
|
|
totalcma_pages += (size / PAGE_SIZE);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* cma_declare_contiguous() - reserve custom contiguous area
|
|
* @base: Base address of the reserved area optional, use 0 for any
|
|
* @size: Size of the reserved area (in bytes),
|
|
* @limit: End address of the reserved memory (optional, 0 for any).
|
|
* @alignment: Alignment for the CMA area, should be power of 2 or zero
|
|
* @order_per_bit: Order of pages represented by one bit on bitmap.
|
|
* @fixed: hint about where to place the reserved area
|
|
* @res_cma: Pointer to store the created cma region.
|
|
*
|
|
* This function reserves memory from early allocator. It should be
|
|
* called by arch specific code once the early allocator (memblock or bootmem)
|
|
* has been activated and all other subsystems have already allocated/reserved
|
|
* memory. This function allows to create custom reserved areas.
|
|
*
|
|
* If @fixed is true, reserve contiguous area at exactly @base. If false,
|
|
* reserve in range from @base to @limit.
|
|
*/
|
|
int __init cma_declare_contiguous(phys_addr_t base,
|
|
phys_addr_t size, phys_addr_t limit,
|
|
phys_addr_t alignment, unsigned int order_per_bit,
|
|
bool fixed, const char *name, struct cma **res_cma)
|
|
{
|
|
phys_addr_t memblock_end = memblock_end_of_DRAM();
|
|
phys_addr_t highmem_start;
|
|
int ret = 0;
|
|
|
|
/*
|
|
* We can't use __pa(high_memory) directly, since high_memory
|
|
* isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
|
|
* complain. Find the boundary by adding one to the last valid
|
|
* address.
|
|
*/
|
|
highmem_start = __pa(high_memory - 1) + 1;
|
|
pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
|
|
__func__, &size, &base, &limit, &alignment);
|
|
|
|
if (cma_area_count == ARRAY_SIZE(cma_areas)) {
|
|
pr_err("Not enough slots for CMA reserved regions!\n");
|
|
return -ENOSPC;
|
|
}
|
|
|
|
if (!size)
|
|
return -EINVAL;
|
|
|
|
if (alignment && !is_power_of_2(alignment))
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Sanitise input arguments.
|
|
* Pages both ends in CMA area could be merged into adjacent unmovable
|
|
* migratetype page by page allocator's buddy algorithm. In the case,
|
|
* you couldn't get a contiguous memory, which is not what we want.
|
|
*/
|
|
alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
|
|
max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
|
|
base = ALIGN(base, alignment);
|
|
size = ALIGN(size, alignment);
|
|
limit &= ~(alignment - 1);
|
|
|
|
if (!base)
|
|
fixed = false;
|
|
|
|
/* size should be aligned with order_per_bit */
|
|
if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* If allocating at a fixed base the request region must not cross the
|
|
* low/high memory boundary.
|
|
*/
|
|
if (fixed && base < highmem_start && base + size > highmem_start) {
|
|
ret = -EINVAL;
|
|
pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
|
|
&base, &highmem_start);
|
|
goto err;
|
|
}
|
|
|
|
/*
|
|
* If the limit is unspecified or above the memblock end, its effective
|
|
* value will be the memblock end. Set it explicitly to simplify further
|
|
* checks.
|
|
*/
|
|
if (limit == 0 || limit > memblock_end)
|
|
limit = memblock_end;
|
|
|
|
/* Reserve memory */
|
|
if (fixed) {
|
|
if (memblock_is_region_reserved(base, size) ||
|
|
memblock_reserve(base, size) < 0) {
|
|
ret = -EBUSY;
|
|
goto err;
|
|
}
|
|
} else {
|
|
phys_addr_t addr = 0;
|
|
|
|
/*
|
|
* All pages in the reserved area must come from the same zone.
|
|
* If the requested region crosses the low/high memory boundary,
|
|
* try allocating from high memory first and fall back to low
|
|
* memory in case of failure.
|
|
*/
|
|
if (base < highmem_start && limit > highmem_start) {
|
|
addr = memblock_alloc_range(size, alignment,
|
|
highmem_start, limit,
|
|
MEMBLOCK_NONE);
|
|
limit = highmem_start;
|
|
}
|
|
|
|
if (!addr) {
|
|
addr = memblock_alloc_range(size, alignment, base,
|
|
limit,
|
|
MEMBLOCK_NONE);
|
|
if (!addr) {
|
|
ret = -ENOMEM;
|
|
goto err;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* kmemleak scans/reads tracked objects for pointers to other
|
|
* objects but this address isn't mapped and accessible
|
|
*/
|
|
kmemleak_ignore_phys(addr);
|
|
base = addr;
|
|
}
|
|
|
|
ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
|
|
if (ret)
|
|
goto err;
|
|
|
|
pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
|
|
&base);
|
|
return 0;
|
|
|
|
err:
|
|
pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
|
|
return ret;
|
|
}
|
|
|
|
#ifdef CONFIG_CMA_DEBUG
|
|
static void cma_debug_show_areas(struct cma *cma)
|
|
{
|
|
unsigned long next_zero_bit, next_set_bit;
|
|
unsigned long start = 0;
|
|
unsigned int nr_zero, nr_total = 0;
|
|
|
|
mutex_lock(&cma->lock);
|
|
pr_info("number of available pages: ");
|
|
for (;;) {
|
|
next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start);
|
|
if (next_zero_bit >= cma->count)
|
|
break;
|
|
next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit);
|
|
nr_zero = next_set_bit - next_zero_bit;
|
|
pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit);
|
|
nr_total += nr_zero;
|
|
start = next_zero_bit + nr_zero;
|
|
}
|
|
pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count);
|
|
mutex_unlock(&cma->lock);
|
|
}
|
|
#else
|
|
static inline void cma_debug_show_areas(struct cma *cma) { }
|
|
#endif
|
|
|
|
/**
|
|
* cma_alloc() - allocate pages from contiguous area
|
|
* @cma: Contiguous memory region for which the allocation is performed.
|
|
* @count: Requested number of pages.
|
|
* @align: Requested alignment of pages (in PAGE_SIZE order).
|
|
*
|
|
* This function allocates part of contiguous memory on specific
|
|
* contiguous memory area.
|
|
*/
|
|
struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
|
|
gfp_t gfp_mask)
|
|
{
|
|
unsigned long mask, offset;
|
|
unsigned long pfn = -1;
|
|
unsigned long start = 0;
|
|
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
|
|
struct page *page = NULL;
|
|
int ret = -ENOMEM;
|
|
|
|
if (!cma || !cma->count)
|
|
return NULL;
|
|
|
|
pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
|
|
count, align);
|
|
|
|
if (!count)
|
|
return NULL;
|
|
|
|
mask = cma_bitmap_aligned_mask(cma, align);
|
|
offset = cma_bitmap_aligned_offset(cma, align);
|
|
bitmap_maxno = cma_bitmap_maxno(cma);
|
|
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
|
|
|
|
if (bitmap_count > bitmap_maxno)
|
|
return NULL;
|
|
|
|
for (;;) {
|
|
mutex_lock(&cma->lock);
|
|
bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
|
|
bitmap_maxno, start, bitmap_count, mask,
|
|
offset);
|
|
if (bitmap_no >= bitmap_maxno) {
|
|
mutex_unlock(&cma->lock);
|
|
break;
|
|
}
|
|
bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
|
|
/*
|
|
* It's safe to drop the lock here. We've marked this region for
|
|
* our exclusive use. If the migration fails we will take the
|
|
* lock again and unmark it.
|
|
*/
|
|
mutex_unlock(&cma->lock);
|
|
|
|
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
|
|
mutex_lock(&cma_mutex);
|
|
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
|
|
gfp_mask);
|
|
mutex_unlock(&cma_mutex);
|
|
if (ret == 0) {
|
|
page = pfn_to_page(pfn);
|
|
break;
|
|
}
|
|
|
|
cma_clear_bitmap(cma, pfn, count);
|
|
if (ret != -EBUSY)
|
|
break;
|
|
|
|
pr_debug("%s(): memory range at %p is busy, retrying\n",
|
|
__func__, pfn_to_page(pfn));
|
|
/* try again with a bit different memory target */
|
|
start = bitmap_no + mask + 1;
|
|
}
|
|
|
|
trace_cma_alloc(pfn, page, count, align);
|
|
|
|
if (ret && !(gfp_mask & __GFP_NOWARN)) {
|
|
pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n",
|
|
__func__, count, ret);
|
|
cma_debug_show_areas(cma);
|
|
}
|
|
|
|
pr_debug("%s(): returned %p\n", __func__, page);
|
|
return page;
|
|
}
|
|
|
|
/**
|
|
* cma_release() - release allocated pages
|
|
* @cma: Contiguous memory region for which the allocation is performed.
|
|
* @pages: Allocated pages.
|
|
* @count: Number of allocated pages.
|
|
*
|
|
* This function releases memory allocated by alloc_cma().
|
|
* It returns false when provided pages do not belong to contiguous area and
|
|
* true otherwise.
|
|
*/
|
|
bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
|
|
{
|
|
unsigned long pfn;
|
|
|
|
if (!cma || !pages)
|
|
return false;
|
|
|
|
pr_debug("%s(page %p)\n", __func__, (void *)pages);
|
|
|
|
pfn = page_to_pfn(pages);
|
|
|
|
if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
|
|
return false;
|
|
|
|
VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
|
|
|
|
free_contig_range(pfn, count);
|
|
cma_clear_bitmap(cma, pfn, count);
|
|
trace_cma_release(pfn, pages, count);
|
|
|
|
return true;
|
|
}
|
|
|
|
int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < cma_area_count; i++) {
|
|
int ret = it(&cma_areas[i], data);
|
|
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|