bpf: Avoid allocating small buffers for map keys and values

Most, if not all, map keys and values are rather small and can fit on
the stack, eliminating the need to allocate them dynamically. Reserve
some small stack buffers for them to avoid dynamic memory allocation.

Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com>
Signed-off-by: azrim <mirzaspc@gmail.com>
This commit is contained in:
Sultan Alsawaf 2021-02-05 00:08:23 -08:00 committed by azrim
parent 621166e994
commit 86b3127828
No known key found for this signature in database
GPG Key ID: 497F8FB059B45D1C

View File

@ -500,6 +500,8 @@ static int map_lookup_elem(union bpf_attr *attr)
int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *value, *ptr;
u8 key_onstack[SZ_16] __aligned(sizeof(long));
u8 value_onstack[SZ_64] __aligned(sizeof(long));
u32 value_size;
struct fd f;
int err;
@ -517,10 +519,18 @@ static int map_lookup_elem(union bpf_attr *attr)
goto err_put;
}
key = memdup_user(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
goto err_put;
if (map->key_size <= sizeof(key_onstack)) {
key = key_onstack;
if (copy_from_user(key, ukey, map->key_size)) {
err = -EFAULT;
goto err_put;
}
} else {
key = memdup_user(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
goto err_put;
}
}
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
@ -533,9 +543,13 @@ static int map_lookup_elem(union bpf_attr *attr)
value_size = map->value_size;
err = -ENOMEM;
value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
if (!value)
goto free_key;
if (value_size <= sizeof(value_onstack)) {
value = value_onstack;
} else {
value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
if (!value)
goto free_key;
}
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
@ -571,9 +585,11 @@ static int map_lookup_elem(union bpf_attr *attr)
err = 0;
free_value:
kfree(value);
if (value != value_onstack)
kfree(value);
free_key:
kfree(key);
if (key != key_onstack)
kfree(key);
err_put:
fdput(f);
return err;
@ -599,6 +615,8 @@ static int map_update_elem(union bpf_attr *attr)
int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *value;
u8 key_onstack[SZ_16] __aligned(sizeof(long));
u8 value_onstack[SZ_64] __aligned(sizeof(long));
u32 value_size;
struct fd f;
int err;
@ -616,10 +634,18 @@ static int map_update_elem(union bpf_attr *attr)
goto err_put;
}
key = memdup_user(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
goto err_put;
if (map->key_size <= sizeof(key_onstack)) {
key = key_onstack;
if (copy_from_user(key, ukey, map->key_size)) {
err = -EFAULT;
goto err_put;
}
} else {
key = memdup_user(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
goto err_put;
}
}
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
@ -629,10 +655,14 @@ static int map_update_elem(union bpf_attr *attr)
else
value_size = map->value_size;
err = -ENOMEM;
value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
if (!value)
goto free_key;
if (value_size <= sizeof(value_onstack)) {
value = value_onstack;
} else {
err = -ENOMEM;
value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
if (!value)
goto free_key;
}
err = -EFAULT;
if (copy_from_user(value, uvalue, value_size) != 0)
@ -673,9 +703,11 @@ static int map_update_elem(union bpf_attr *attr)
if (!err)
trace_bpf_map_update_elem(map, ufd, key, value);
free_value:
kfree(value);
if (value != value_onstack)
kfree(value);
free_key:
kfree(key);
if (key != key_onstack)
kfree(key);
err_put:
fdput(f);
return err;
@ -690,6 +722,7 @@ static int map_delete_elem(union bpf_attr *attr)
struct bpf_map *map;
struct fd f;
void *key;
u8 key_onstack[SZ_16] __aligned(sizeof(long));
int err;
if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
@ -705,10 +738,18 @@ static int map_delete_elem(union bpf_attr *attr)
goto err_put;
}
key = memdup_user(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
goto err_put;
if (map->key_size <= sizeof(key_onstack)) {
key = key_onstack;
if (copy_from_user(key, ukey, map->key_size)) {
err = -EFAULT;
goto err_put;
}
} else {
key = memdup_user(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
goto err_put;
}
}
preempt_disable();
@ -722,7 +763,8 @@ static int map_delete_elem(union bpf_attr *attr)
if (!err)
trace_bpf_map_delete_elem(map, ufd, key);
kfree(key);
if (key != key_onstack)
kfree(key);
err_put:
fdput(f);
return err;
@ -738,6 +780,8 @@ static int map_get_next_key(union bpf_attr *attr)
int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *next_key;
u8 key_onstack[SZ_16] __aligned(sizeof(long));
u8 next_key_onstack[SZ_64] __aligned(sizeof(long));
struct fd f;
int err;
@ -755,19 +799,31 @@ static int map_get_next_key(union bpf_attr *attr)
}
if (ukey) {
key = memdup_user(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
goto err_put;
if (map->key_size <= sizeof(key_onstack)) {
key = key_onstack;
if (copy_from_user(key, ukey, map->key_size)) {
err = -EFAULT;
goto err_put;
}
} else {
key = memdup_user(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
goto err_put;
}
}
} else {
key = NULL;
}
err = -ENOMEM;
next_key = kmalloc(map->key_size, GFP_USER);
if (!next_key)
goto free_key;
if (map->key_size <= sizeof(next_key_onstack)) {
next_key = next_key_onstack;
} else {
next_key = kmalloc(map->key_size, GFP_USER);
if (!next_key)
goto free_key;
}
rcu_read_lock();
err = map->ops->map_get_next_key(map, key, next_key);
@ -783,9 +839,11 @@ static int map_get_next_key(union bpf_attr *attr)
err = 0;
free_next_key:
kfree(next_key);
if (next_key != next_key_onstack)
kfree(next_key);
free_key:
kfree(key);
if (key != key_onstack)
kfree(key);
err_put:
fdput(f);
return err;