mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
ANDROID: ashmem: Add shmem_set_file to mm/shmem.c
NOT FOR STAGING This patch re-adds the original shmem_set_file to mm/shmem.c and converts ashmem.c back to using it. Change-Id: Ie604c9f8f4d0ee6bc2aae1a96d261c8373a1a2dc CC: Brian Swetland <swetland@google.com> CC: Colin Cross <ccross@android.com> CC: Arve Hjønnevåg <arve@android.com> CC: Dima Zavin <dima@android.com> CC: Robert Love <rlove@google.com> CC: Greg KH <greg@kroah.com> Signed-off-by: John Stultz <john.stultz@linaro.org>
This commit is contained in:
parent
72818893ef
commit
5e1676886d
@ -414,22 +414,14 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
|
|||||||
}
|
}
|
||||||
get_file(asma->file);
|
get_file(asma->file);
|
||||||
|
|
||||||
/*
|
if (vma->vm_flags & VM_SHARED)
|
||||||
* XXX - Reworked to use shmem_zero_setup() instead of
|
shmem_set_file(vma, asma->file);
|
||||||
* shmem_set_file while we're in staging. -jstultz
|
else {
|
||||||
*/
|
if (vma->vm_file)
|
||||||
if (vma->vm_flags & VM_SHARED) {
|
fput(vma->vm_file);
|
||||||
ret = shmem_zero_setup(vma);
|
vma->vm_file = asma->file;
|
||||||
if (ret) {
|
|
||||||
fput(asma->file);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vma->vm_file)
|
|
||||||
fput(vma->vm_file);
|
|
||||||
vma->vm_file = asma->file;
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&ashmem_mutex);
|
mutex_unlock(&ashmem_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -356,7 +356,7 @@ enum page_entry_size {
|
|||||||
/*
|
/*
|
||||||
* These are the virtual MM functions - opening of an area, closing and
|
* These are the virtual MM functions - opening of an area, closing and
|
||||||
* unmapping it (needed to keep files on disk up-to-date etc), pointer
|
* unmapping it (needed to keep files on disk up-to-date etc), pointer
|
||||||
* to the functions called when a no-page or a wp-page exception occurs.
|
* to the functions called when a no-page or a wp-page exception occurs.
|
||||||
*/
|
*/
|
||||||
struct vm_operations_struct {
|
struct vm_operations_struct {
|
||||||
void (*open)(struct vm_area_struct * area);
|
void (*open)(struct vm_area_struct * area);
|
||||||
@ -1181,6 +1181,7 @@ extern void pagefault_out_of_memory(void);
|
|||||||
#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
|
#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
|
||||||
|
|
||||||
extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
|
extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
|
||||||
|
void shmem_set_file(struct vm_area_struct *vma, struct file *file);
|
||||||
|
|
||||||
extern bool can_do_mlock(void);
|
extern bool can_do_mlock(void);
|
||||||
extern int user_shm_lock(size_t, struct user_struct *);
|
extern int user_shm_lock(size_t, struct user_struct *);
|
||||||
|
13
mm/shmem.c
13
mm/shmem.c
@ -4216,6 +4216,14 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(shmem_file_setup);
|
EXPORT_SYMBOL_GPL(shmem_file_setup);
|
||||||
|
|
||||||
|
void shmem_set_file(struct vm_area_struct *vma, struct file *file)
|
||||||
|
{
|
||||||
|
if (vma->vm_file)
|
||||||
|
fput(vma->vm_file);
|
||||||
|
vma->vm_file = file;
|
||||||
|
vma->vm_ops = &shmem_vm_ops;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* shmem_zero_setup - setup a shared anonymous mapping
|
* shmem_zero_setup - setup a shared anonymous mapping
|
||||||
* @vma: the vma to be mmapped is prepared by do_mmap_pgoff
|
* @vma: the vma to be mmapped is prepared by do_mmap_pgoff
|
||||||
@ -4235,10 +4243,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
|
|||||||
if (IS_ERR(file))
|
if (IS_ERR(file))
|
||||||
return PTR_ERR(file);
|
return PTR_ERR(file);
|
||||||
|
|
||||||
if (vma->vm_file)
|
shmem_set_file(vma, file);
|
||||||
fput(vma->vm_file);
|
|
||||||
vma->vm_file = file;
|
|
||||||
vma->vm_ops = &shmem_vm_ops;
|
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
|
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
|
||||||
((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
|
((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
|
||||||
|
Loading…
x
Reference in New Issue
Block a user