mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "9 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/vmstat: fix overflow in mod_zone_page_state() ocfs2/dlm: clear migration_pending when migration target goes down mm/memory_hotplug.c: check for missing sections in test_pages_in_a_zone() ocfs2: fix flock panic issue m32r: add io*_rep helpers m32r: fix build failure arch/x86/xen/suspend.c: include xen/xen.h mm: memcontrol: fix possible memcg leak due to interrupted reclaim ocfs2: fix BUG when calculate new backup super
This commit is contained in:
commit
866be88a1a
@ -3,6 +3,7 @@ generic-y += clkdev.h
|
|||||||
generic-y += cputime.h
|
generic-y += cputime.h
|
||||||
generic-y += exec.h
|
generic-y += exec.h
|
||||||
generic-y += irq_work.h
|
generic-y += irq_work.h
|
||||||
|
generic-y += kvm_para.h
|
||||||
generic-y += mcs_spinlock.h
|
generic-y += mcs_spinlock.h
|
||||||
generic-y += mm-arch-hooks.h
|
generic-y += mm-arch-hooks.h
|
||||||
generic-y += module.h
|
generic-y += module.h
|
||||||
|
@ -168,13 +168,21 @@ static inline void _writel(unsigned long l, unsigned long addr)
|
|||||||
#define writew_relaxed writew
|
#define writew_relaxed writew
|
||||||
#define writel_relaxed writel
|
#define writel_relaxed writel
|
||||||
|
|
||||||
#define ioread8 read
|
#define ioread8 readb
|
||||||
#define ioread16 readw
|
#define ioread16 readw
|
||||||
#define ioread32 readl
|
#define ioread32 readl
|
||||||
#define iowrite8 writeb
|
#define iowrite8 writeb
|
||||||
#define iowrite16 writew
|
#define iowrite16 writew
|
||||||
#define iowrite32 writel
|
#define iowrite32 writel
|
||||||
|
|
||||||
|
#define ioread8_rep(p, dst, count) insb((unsigned long)(p), (dst), (count))
|
||||||
|
#define ioread16_rep(p, dst, count) insw((unsigned long)(p), (dst), (count))
|
||||||
|
#define ioread32_rep(p, dst, count) insl((unsigned long)(p), (dst), (count))
|
||||||
|
|
||||||
|
#define iowrite8_rep(p, src, count) outsb((unsigned long)(p), (src), (count))
|
||||||
|
#define iowrite16_rep(p, src, count) outsw((unsigned long)(p), (src), (count))
|
||||||
|
#define iowrite32_rep(p, src, count) outsl((unsigned long)(p), (src), (count))
|
||||||
|
|
||||||
#define ioread16be(addr) be16_to_cpu(readw(addr))
|
#define ioread16be(addr) be16_to_cpu(readw(addr))
|
||||||
#define ioread32be(addr) be32_to_cpu(readl(addr))
|
#define ioread32be(addr) be32_to_cpu(readl(addr))
|
||||||
#define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr))
|
#define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr))
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/tick.h>
|
#include <linux/tick.h>
|
||||||
|
|
||||||
|
#include <xen/xen.h>
|
||||||
#include <xen/interface/xen.h>
|
#include <xen/interface/xen.h>
|
||||||
#include <xen/grant_table.h>
|
#include <xen/grant_table.h>
|
||||||
#include <xen/events.h>
|
#include <xen/events.h>
|
||||||
|
@ -2843,6 +2843,8 @@ again:
|
|||||||
res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
|
res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
|
||||||
if (!ret)
|
if (!ret)
|
||||||
BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
|
BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
|
||||||
|
else
|
||||||
|
res->migration_pending = 0;
|
||||||
spin_unlock(&res->spinlock);
|
spin_unlock(&res->spinlock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -67,7 +67,10 @@ static int ocfs2_do_flock(struct file *file, struct inode *inode,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
locks_lock_file_wait(file,
|
locks_lock_file_wait(file,
|
||||||
&(struct file_lock){.fl_type = F_UNLCK});
|
&(struct file_lock) {
|
||||||
|
.fl_type = F_UNLCK,
|
||||||
|
.fl_flags = FL_FLOCK
|
||||||
|
});
|
||||||
|
|
||||||
ocfs2_file_unlock(file);
|
ocfs2_file_unlock(file);
|
||||||
}
|
}
|
||||||
|
@ -54,11 +54,12 @@
|
|||||||
static u16 ocfs2_calc_new_backup_super(struct inode *inode,
|
static u16 ocfs2_calc_new_backup_super(struct inode *inode,
|
||||||
struct ocfs2_group_desc *gd,
|
struct ocfs2_group_desc *gd,
|
||||||
u16 cl_cpg,
|
u16 cl_cpg,
|
||||||
|
u16 old_bg_clusters,
|
||||||
int set)
|
int set)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
u16 backups = 0;
|
u16 backups = 0;
|
||||||
u32 cluster;
|
u32 cluster, lgd_cluster;
|
||||||
u64 blkno, gd_blkno, lgd_blkno = le64_to_cpu(gd->bg_blkno);
|
u64 blkno, gd_blkno, lgd_blkno = le64_to_cpu(gd->bg_blkno);
|
||||||
|
|
||||||
for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
|
for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
|
||||||
@ -71,6 +72,12 @@ static u16 ocfs2_calc_new_backup_super(struct inode *inode,
|
|||||||
else if (gd_blkno > lgd_blkno)
|
else if (gd_blkno > lgd_blkno)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
/* check if already done backup super */
|
||||||
|
lgd_cluster = ocfs2_blocks_to_clusters(inode->i_sb, lgd_blkno);
|
||||||
|
lgd_cluster += old_bg_clusters;
|
||||||
|
if (lgd_cluster >= cluster)
|
||||||
|
continue;
|
||||||
|
|
||||||
if (set)
|
if (set)
|
||||||
ocfs2_set_bit(cluster % cl_cpg,
|
ocfs2_set_bit(cluster % cl_cpg,
|
||||||
(unsigned long *)gd->bg_bitmap);
|
(unsigned long *)gd->bg_bitmap);
|
||||||
@ -99,6 +106,7 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
|
|||||||
u16 chain, num_bits, backups = 0;
|
u16 chain, num_bits, backups = 0;
|
||||||
u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
|
u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
|
||||||
u16 cl_cpg = le16_to_cpu(cl->cl_cpg);
|
u16 cl_cpg = le16_to_cpu(cl->cl_cpg);
|
||||||
|
u16 old_bg_clusters;
|
||||||
|
|
||||||
trace_ocfs2_update_last_group_and_inode(new_clusters,
|
trace_ocfs2_update_last_group_and_inode(new_clusters,
|
||||||
first_new_cluster);
|
first_new_cluster);
|
||||||
@ -112,6 +120,7 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
|
|||||||
|
|
||||||
group = (struct ocfs2_group_desc *)group_bh->b_data;
|
group = (struct ocfs2_group_desc *)group_bh->b_data;
|
||||||
|
|
||||||
|
old_bg_clusters = le16_to_cpu(group->bg_bits) / cl_bpc;
|
||||||
/* update the group first. */
|
/* update the group first. */
|
||||||
num_bits = new_clusters * cl_bpc;
|
num_bits = new_clusters * cl_bpc;
|
||||||
le16_add_cpu(&group->bg_bits, num_bits);
|
le16_add_cpu(&group->bg_bits, num_bits);
|
||||||
@ -125,7 +134,7 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
|
|||||||
OCFS2_FEATURE_COMPAT_BACKUP_SB)) {
|
OCFS2_FEATURE_COMPAT_BACKUP_SB)) {
|
||||||
backups = ocfs2_calc_new_backup_super(bm_inode,
|
backups = ocfs2_calc_new_backup_super(bm_inode,
|
||||||
group,
|
group,
|
||||||
cl_cpg, 1);
|
cl_cpg, old_bg_clusters, 1);
|
||||||
le16_add_cpu(&group->bg_free_bits_count, -1 * backups);
|
le16_add_cpu(&group->bg_free_bits_count, -1 * backups);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,7 +172,7 @@ out_rollback:
|
|||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
ocfs2_calc_new_backup_super(bm_inode,
|
ocfs2_calc_new_backup_super(bm_inode,
|
||||||
group,
|
group,
|
||||||
cl_cpg, 0);
|
cl_cpg, old_bg_clusters, 0);
|
||||||
le16_add_cpu(&group->bg_free_bits_count, backups);
|
le16_add_cpu(&group->bg_free_bits_count, backups);
|
||||||
le16_add_cpu(&group->bg_bits, -1 * num_bits);
|
le16_add_cpu(&group->bg_bits, -1 * num_bits);
|
||||||
le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);
|
le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);
|
||||||
|
@ -176,11 +176,11 @@ extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
|
|||||||
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
|
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
|
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
|
||||||
void __inc_zone_page_state(struct page *, enum zone_stat_item);
|
void __inc_zone_page_state(struct page *, enum zone_stat_item);
|
||||||
void __dec_zone_page_state(struct page *, enum zone_stat_item);
|
void __dec_zone_page_state(struct page *, enum zone_stat_item);
|
||||||
|
|
||||||
void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
|
void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
|
||||||
void inc_zone_page_state(struct page *, enum zone_stat_item);
|
void inc_zone_page_state(struct page *, enum zone_stat_item);
|
||||||
void dec_zone_page_state(struct page *, enum zone_stat_item);
|
void dec_zone_page_state(struct page *, enum zone_stat_item);
|
||||||
|
|
||||||
@ -205,7 +205,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat,
|
|||||||
* The functions directly modify the zone and global counters.
|
* The functions directly modify the zone and global counters.
|
||||||
*/
|
*/
|
||||||
static inline void __mod_zone_page_state(struct zone *zone,
|
static inline void __mod_zone_page_state(struct zone *zone,
|
||||||
enum zone_stat_item item, int delta)
|
enum zone_stat_item item, long delta)
|
||||||
{
|
{
|
||||||
zone_page_state_add(delta, zone, item);
|
zone_page_state_add(delta, zone, item);
|
||||||
}
|
}
|
||||||
|
@ -903,14 +903,20 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
|
|||||||
if (prev && reclaim->generation != iter->generation)
|
if (prev && reclaim->generation != iter->generation)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
do {
|
while (1) {
|
||||||
pos = READ_ONCE(iter->position);
|
pos = READ_ONCE(iter->position);
|
||||||
|
if (!pos || css_tryget(&pos->css))
|
||||||
|
break;
|
||||||
/*
|
/*
|
||||||
* A racing update may change the position and
|
* css reference reached zero, so iter->position will
|
||||||
* put the last reference, hence css_tryget(),
|
* be cleared by ->css_released. However, we should not
|
||||||
* or retry to see the updated position.
|
* rely on this happening soon, because ->css_released
|
||||||
|
* is called from a work queue, and by busy-waiting we
|
||||||
|
* might block it. So we clear iter->position right
|
||||||
|
* away.
|
||||||
*/
|
*/
|
||||||
} while (pos && !css_tryget(&pos->css));
|
(void)cmpxchg(&iter->position, pos, NULL);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pos)
|
if (pos)
|
||||||
@ -956,17 +962,13 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (reclaim) {
|
if (reclaim) {
|
||||||
if (cmpxchg(&iter->position, pos, memcg) == pos) {
|
|
||||||
if (memcg)
|
|
||||||
css_get(&memcg->css);
|
|
||||||
if (pos)
|
|
||||||
css_put(&pos->css);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* pairs with css_tryget when dereferencing iter->position
|
* The position could have already been updated by a competing
|
||||||
* above.
|
* thread, so check that the value hasn't changed since we read
|
||||||
|
* it to avoid reclaiming from the same cgroup twice.
|
||||||
*/
|
*/
|
||||||
|
(void)cmpxchg(&iter->position, pos, memcg);
|
||||||
|
|
||||||
if (pos)
|
if (pos)
|
||||||
css_put(&pos->css);
|
css_put(&pos->css);
|
||||||
|
|
||||||
@ -999,6 +1001,28 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
|
|||||||
css_put(&prev->css);
|
css_put(&prev->css);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
|
||||||
|
{
|
||||||
|
struct mem_cgroup *memcg = dead_memcg;
|
||||||
|
struct mem_cgroup_reclaim_iter *iter;
|
||||||
|
struct mem_cgroup_per_zone *mz;
|
||||||
|
int nid, zid;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
while ((memcg = parent_mem_cgroup(memcg))) {
|
||||||
|
for_each_node(nid) {
|
||||||
|
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
|
||||||
|
mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
|
||||||
|
for (i = 0; i <= DEF_PRIORITY; i++) {
|
||||||
|
iter = &mz->iter[i];
|
||||||
|
cmpxchg(&iter->position,
|
||||||
|
dead_memcg, NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Iteration constructs for visiting all cgroups (under a tree). If
|
* Iteration constructs for visiting all cgroups (under a tree). If
|
||||||
* loops are exited prematurely (break), mem_cgroup_iter_break() must
|
* loops are exited prematurely (break), mem_cgroup_iter_break() must
|
||||||
@ -4324,6 +4348,13 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
|
|||||||
wb_memcg_offline(memcg);
|
wb_memcg_offline(memcg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
|
||||||
|
{
|
||||||
|
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||||
|
|
||||||
|
invalidate_reclaim_iterators(memcg);
|
||||||
|
}
|
||||||
|
|
||||||
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
|
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
|
||||||
{
|
{
|
||||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||||
@ -5185,6 +5216,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
|
|||||||
.css_alloc = mem_cgroup_css_alloc,
|
.css_alloc = mem_cgroup_css_alloc,
|
||||||
.css_online = mem_cgroup_css_online,
|
.css_online = mem_cgroup_css_online,
|
||||||
.css_offline = mem_cgroup_css_offline,
|
.css_offline = mem_cgroup_css_offline,
|
||||||
|
.css_released = mem_cgroup_css_released,
|
||||||
.css_free = mem_cgroup_css_free,
|
.css_free = mem_cgroup_css_free,
|
||||||
.css_reset = mem_cgroup_css_reset,
|
.css_reset = mem_cgroup_css_reset,
|
||||||
.can_attach = mem_cgroup_can_attach,
|
.can_attach = mem_cgroup_can_attach,
|
||||||
|
@ -1375,23 +1375,30 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
|
|||||||
*/
|
*/
|
||||||
int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
|
int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
|
||||||
{
|
{
|
||||||
unsigned long pfn;
|
unsigned long pfn, sec_end_pfn;
|
||||||
struct zone *zone = NULL;
|
struct zone *zone = NULL;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int i;
|
int i;
|
||||||
for (pfn = start_pfn;
|
for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
|
||||||
pfn < end_pfn;
|
pfn < end_pfn;
|
||||||
pfn += MAX_ORDER_NR_PAGES) {
|
pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
|
||||||
i = 0;
|
/* Make sure the memory section is present first */
|
||||||
/* This is just a CONFIG_HOLES_IN_ZONE check.*/
|
if (!present_section_nr(pfn_to_section_nr(pfn)))
|
||||||
while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
|
|
||||||
i++;
|
|
||||||
if (i == MAX_ORDER_NR_PAGES)
|
|
||||||
continue;
|
continue;
|
||||||
page = pfn_to_page(pfn + i);
|
for (; pfn < sec_end_pfn && pfn < end_pfn;
|
||||||
if (zone && page_zone(page) != zone)
|
pfn += MAX_ORDER_NR_PAGES) {
|
||||||
return 0;
|
i = 0;
|
||||||
zone = page_zone(page);
|
/* This is just a CONFIG_HOLES_IN_ZONE check.*/
|
||||||
|
while ((i < MAX_ORDER_NR_PAGES) &&
|
||||||
|
!pfn_valid_within(pfn + i))
|
||||||
|
i++;
|
||||||
|
if (i == MAX_ORDER_NR_PAGES)
|
||||||
|
continue;
|
||||||
|
page = pfn_to_page(pfn + i);
|
||||||
|
if (zone && page_zone(page) != zone)
|
||||||
|
return 0;
|
||||||
|
zone = page_zone(page);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
10
mm/vmstat.c
10
mm/vmstat.c
@ -219,7 +219,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat,
|
|||||||
* particular counter cannot be updated from interrupt context.
|
* particular counter cannot be updated from interrupt context.
|
||||||
*/
|
*/
|
||||||
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
||||||
int delta)
|
long delta)
|
||||||
{
|
{
|
||||||
struct per_cpu_pageset __percpu *pcp = zone->pageset;
|
struct per_cpu_pageset __percpu *pcp = zone->pageset;
|
||||||
s8 __percpu *p = pcp->vm_stat_diff + item;
|
s8 __percpu *p = pcp->vm_stat_diff + item;
|
||||||
@ -318,8 +318,8 @@ EXPORT_SYMBOL(__dec_zone_page_state);
|
|||||||
* 1 Overstepping half of threshold
|
* 1 Overstepping half of threshold
|
||||||
* -1 Overstepping minus half of threshold
|
* -1 Overstepping minus half of threshold
|
||||||
*/
|
*/
|
||||||
static inline void mod_state(struct zone *zone,
|
static inline void mod_state(struct zone *zone, enum zone_stat_item item,
|
||||||
enum zone_stat_item item, int delta, int overstep_mode)
|
long delta, int overstep_mode)
|
||||||
{
|
{
|
||||||
struct per_cpu_pageset __percpu *pcp = zone->pageset;
|
struct per_cpu_pageset __percpu *pcp = zone->pageset;
|
||||||
s8 __percpu *p = pcp->vm_stat_diff + item;
|
s8 __percpu *p = pcp->vm_stat_diff + item;
|
||||||
@ -357,7 +357,7 @@ static inline void mod_state(struct zone *zone,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
||||||
int delta)
|
long delta)
|
||||||
{
|
{
|
||||||
mod_state(zone, item, delta, 0);
|
mod_state(zone, item, delta, 0);
|
||||||
}
|
}
|
||||||
@ -384,7 +384,7 @@ EXPORT_SYMBOL(dec_zone_page_state);
|
|||||||
* Use interrupt disable to serialize counter updates
|
* Use interrupt disable to serialize counter updates
|
||||||
*/
|
*/
|
||||||
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
||||||
int delta)
|
long delta)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user