mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
drm/msm: Recycle atomic state allocations to speed up atomic commits
Constantly allocating and freeing all of the data structures associated with atomic commits adds up and incurs a lot of latency not only when allocating, but also when freeing. Since we know what the maximum number of CRTCs, planes, and connectors is, we can skip the constant allocation- and-free for the same structures and instead just recycle them via a lock- less list. This also moves the commit cleanup so that it comes after CRTC waiters are woken up, allowing the ioctl to proceed without waiting around for some housekeeping to finish. Since it's difficult to audit which parameters, if any, could exceed the defined maximums in the msm_kms driver, dynamic allocations are retained as a fallback so that userspace can't craft a malicious ioctl that results in buffer overflows. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> Signed-off-by: KenHV <yo@kenharris.xyz> Signed-off-by: Forenche <prahul2003@gmail.com>
This commit is contained in:
parent
9f6abbf1da
commit
37c1db3881
@ -1097,9 +1097,21 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
|
||||
struct __drm_connnectors_state *c;
|
||||
int alloc = max(index + 1, config->num_connector);
|
||||
|
||||
c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
|
||||
if (!c)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (state->connectors_preallocated) {
|
||||
state->connectors_preallocated = false;
|
||||
c = kmalloc(alloc * sizeof(*state->connectors),
|
||||
GFP_KERNEL);
|
||||
if (!c)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
memcpy(c, state->connectors,
|
||||
sizeof(*state->connectors) * state->num_connector);
|
||||
} else {
|
||||
c = krealloc(state->connectors,
|
||||
alloc * sizeof(*state->connectors),
|
||||
GFP_KERNEL);
|
||||
if (!c)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
state->connectors = c;
|
||||
memset(&state->connectors[state->num_connector], 0,
|
||||
|
@ -28,16 +28,9 @@
|
||||
|
||||
#define MULTIPLE_CONN_DETECTED(x) (x > 1)
|
||||
|
||||
struct msm_commit {
|
||||
struct drm_device *dev;
|
||||
struct drm_atomic_state *state;
|
||||
uint32_t crtc_mask;
|
||||
uint32_t plane_mask;
|
||||
bool nonblock;
|
||||
struct kthread_work commit_work;
|
||||
};
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(msm_drm_notifier_list);
|
||||
static LLIST_HEAD(msm_atomic_state_cache);
|
||||
static DEFINE_SPINLOCK(msm_atomic_cache_lock);
|
||||
|
||||
/**
|
||||
* msm_drm_register_client - register a client notifier
|
||||
@ -126,13 +119,6 @@ static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask,
|
||||
spin_unlock(&priv->pending_crtcs_event.lock);
|
||||
}
|
||||
|
||||
static void commit_destroy(struct msm_commit *c)
|
||||
{
|
||||
end_atomic(c->dev->dev_private, c->crtc_mask, c->plane_mask);
|
||||
if (c->nonblock)
|
||||
kfree(c);
|
||||
}
|
||||
|
||||
static inline bool _msm_seamless_for_crtc(struct drm_atomic_state *state,
|
||||
struct drm_crtc_state *crtc_state, bool enable)
|
||||
{
|
||||
@ -576,12 +562,21 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
|
||||
SDE_ATRACE_END("msm_enable");
|
||||
}
|
||||
|
||||
static void complete_commit_cleanup(struct msm_commit *c)
|
||||
{
|
||||
struct msm_kms_state *state = container_of(c, typeof(*state), commit);
|
||||
struct drm_atomic_state *s = &state->base;
|
||||
|
||||
drm_atomic_state_put(s);
|
||||
}
|
||||
|
||||
/* The (potentially) asynchronous part of the commit. At this point
|
||||
* nothing can fail short of armageddon.
|
||||
*/
|
||||
static void complete_commit(struct msm_commit *c)
|
||||
{
|
||||
struct drm_atomic_state *state = c->state;
|
||||
struct msm_kms_state *kstate = container_of(c, typeof(*kstate), commit);
|
||||
struct drm_atomic_state *state = &kstate->base;
|
||||
struct drm_device *dev = state->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_kms *kms = priv->kms;
|
||||
@ -615,14 +610,12 @@ static void complete_commit(struct msm_commit *c)
|
||||
|
||||
kms->funcs->complete_commit(kms, state);
|
||||
|
||||
drm_atomic_state_put(state);
|
||||
|
||||
commit_destroy(c);
|
||||
end_atomic(priv, c->crtc_mask, c->plane_mask);
|
||||
}
|
||||
|
||||
static void _msm_drm_commit_work_cb(struct kthread_work *work)
|
||||
{
|
||||
struct msm_commit *commit = container_of(work, typeof(*ccommit),
|
||||
struct msm_commit *commit = container_of(work, typeof(*commit),
|
||||
commit_work);
|
||||
struct pm_qos_request req = {
|
||||
.type = PM_QOS_REQ_AFFINE_CORES,
|
||||
@ -639,37 +632,19 @@ static void _msm_drm_commit_work_cb(struct kthread_work *work)
|
||||
complete_commit(commit);
|
||||
SDE_ATRACE_END("complete_commit");
|
||||
pm_qos_remove_request(&req);
|
||||
}
|
||||
|
||||
static struct msm_commit *commit_init(struct drm_atomic_state *state,
|
||||
bool nonblock)
|
||||
{
|
||||
struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
|
||||
if (!c)
|
||||
return NULL;
|
||||
|
||||
c->dev = state->dev;
|
||||
c->state = state;
|
||||
c->nonblock = nonblock;
|
||||
|
||||
kthread_init_work(&c->commit_work, _msm_drm_commit_work_cb);
|
||||
|
||||
return c;
|
||||
complete_commit_cleanup(commit);
|
||||
}
|
||||
|
||||
/* Start display thread function */
|
||||
static void msm_atomic_commit_dispatch(struct drm_device *dev,
|
||||
struct drm_atomic_state *state, struct msm_commit *commit)
|
||||
struct drm_atomic_state *state, struct msm_commit *commit,
|
||||
bool nonblock)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct drm_crtc *crtc = NULL;
|
||||
struct drm_crtc_state *crtc_state = NULL;
|
||||
int ret = -EINVAL, i = 0, j = 0;
|
||||
bool nonblock;
|
||||
|
||||
/* cache since work will kfree commit in non-blocking case */
|
||||
nonblock = commit->nonblock;
|
||||
|
||||
for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
for (j = 0; j < priv->num_crtcs; j++) {
|
||||
@ -709,13 +684,10 @@ static void msm_atomic_commit_dispatch(struct drm_device *dev,
|
||||
*/
|
||||
DRM_ERROR("failed to dispatch commit to any CRTC\n");
|
||||
complete_commit(commit);
|
||||
complete_commit_cleanup(commit);
|
||||
} else if (!nonblock) {
|
||||
kthread_flush_work(&commit->commit_work);
|
||||
}
|
||||
|
||||
/* free nonblocking commits in this context, after processing */
|
||||
if (!nonblock)
|
||||
kfree(commit);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -753,11 +725,7 @@ int msm_atomic_commit(struct drm_device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
c = commit_init(state, nonblock);
|
||||
if (!c) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
c = &to_kms_state(state)->commit;
|
||||
|
||||
/*
|
||||
* Figure out what crtcs we have:
|
||||
@ -795,7 +763,7 @@ retry:
|
||||
*/
|
||||
ret = start_atomic(dev->dev_private, c->crtc_mask, c->plane_mask);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
goto error;
|
||||
|
||||
BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
|
||||
|
||||
@ -803,6 +771,7 @@ retry:
|
||||
pm_qos_update_request(&priv->pm_irq_req, 100);
|
||||
mod_delayed_work(system_unbound_wq, &priv->pm_unreq_dwork, HZ / 10);
|
||||
|
||||
#ifdef CONFIG_DRM_MSM_MDP5
|
||||
/*
|
||||
* This is the point of no return - everything below never fails except
|
||||
* when the hw goes bonghits. Which means we can commit the new state on
|
||||
@ -812,6 +781,7 @@ retry:
|
||||
*/
|
||||
if (to_kms_state(state)->state)
|
||||
priv->kms->funcs->swap_state(priv->kms, state);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Provide the driver a chance to prepare for output fences. This is
|
||||
@ -839,13 +809,11 @@ retry:
|
||||
*/
|
||||
|
||||
drm_atomic_state_get(state);
|
||||
msm_atomic_commit_dispatch(dev, state, c);
|
||||
msm_atomic_commit_dispatch(dev, state, c, nonblock);
|
||||
|
||||
SDE_ATRACE_END("atomic_commit");
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
kfree(c);
|
||||
error:
|
||||
drm_atomic_helper_cleanup_planes(dev, state);
|
||||
SDE_ATRACE_END("atomic_commit");
|
||||
@ -854,13 +822,54 @@ error:
|
||||
|
||||
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev)
|
||||
{
|
||||
struct msm_kms_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
struct msm_kms_state *state;
|
||||
struct drm_atomic_state *s;
|
||||
struct llist_node *n;
|
||||
|
||||
if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
|
||||
kfree(state);
|
||||
return NULL;
|
||||
if (unlikely(dev->mode_config.num_crtc > MAX_CRTCS ||
|
||||
dev->mode_config.num_total_plane > MAX_PLANES)) {
|
||||
/*
|
||||
* Don't include the large preallocated arrays. Allocate up to
|
||||
* the offset of `crtcs` and then one more byte so that the
|
||||
* address of where `crtcs` would be if the struct were fully
|
||||
* allocated is reserved, to make the msm_atomic_state_free()
|
||||
* check guaranteed to be reliable.
|
||||
*/
|
||||
state = kzalloc(offsetof(typeof(*state), crtcs) + 1, GFP_KERNEL);
|
||||
if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
|
||||
kfree(state);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
goto init_commit_work;
|
||||
}
|
||||
|
||||
spin_lock(&msm_atomic_cache_lock);
|
||||
n = llist_del_first(&msm_atomic_state_cache);
|
||||
spin_unlock(&msm_atomic_cache_lock);
|
||||
if (likely(n)) {
|
||||
state = container_of(n, typeof(*state), llist);
|
||||
memset(state, 0, sizeof(*state));
|
||||
} else {
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
if (!state)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
s = &state->base;
|
||||
s->connectors = state->connectors;
|
||||
s->num_connector = MAX_CONNECTORS;
|
||||
s->connectors_preallocated = true;
|
||||
|
||||
/* Perform the generic init done in drm_atomic_state_init() */
|
||||
kref_init(&s->ref);
|
||||
s->allow_modeset = true;
|
||||
s->crtcs = state->crtcs;
|
||||
s->planes = state->planes;
|
||||
s->dev = dev;
|
||||
|
||||
init_commit_work:
|
||||
kthread_init_work(&state->commit.commit_work, _msm_drm_commit_work_cb);
|
||||
return &state->base;
|
||||
}
|
||||
|
||||
@ -868,13 +877,30 @@ void msm_atomic_state_clear(struct drm_atomic_state *s)
|
||||
{
|
||||
struct msm_kms_state *state = to_kms_state(s);
|
||||
drm_atomic_state_default_clear(&state->base);
|
||||
#ifdef CONFIG_DRM_MSM_MDP5
|
||||
kfree(state->state);
|
||||
state->state = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
void msm_atomic_state_free(struct drm_atomic_state *state)
|
||||
void msm_atomic_state_free(struct drm_atomic_state *s)
|
||||
{
|
||||
kfree(to_kms_state(state)->state);
|
||||
drm_atomic_state_default_release(state);
|
||||
kfree(state);
|
||||
struct msm_kms_state *state = to_kms_state(s);
|
||||
|
||||
#ifdef CONFIG_DRM_MSM_MDP5
|
||||
kfree(state->state);
|
||||
#endif
|
||||
/*
|
||||
* Check if this was a kms struct with preallocated arrays by looking at
|
||||
* the address of `crtcs` and seeing if it points inside the kms struct.
|
||||
*/
|
||||
if (likely(s->crtcs == state->crtcs)) {
|
||||
if (!s->connectors_preallocated)
|
||||
kfree(s->connectors);
|
||||
kfree(s->private_objs);
|
||||
llist_add(&state->llist, &msm_atomic_state_cache);
|
||||
} else {
|
||||
drm_atomic_state_default_release(s);
|
||||
kfree(state);
|
||||
}
|
||||
}
|
||||
|
@ -137,6 +137,12 @@ struct msm_kms {
|
||||
struct msm_gem_address_space *aspace;
|
||||
};
|
||||
|
||||
struct msm_commit {
|
||||
uint32_t crtc_mask;
|
||||
uint32_t plane_mask;
|
||||
struct kthread_work commit_work;
|
||||
};
|
||||
|
||||
/**
|
||||
* Subclass of drm_atomic_state, to allow kms backend to have driver
|
||||
* private global state. The kms backend can do whatever it wants
|
||||
@ -145,7 +151,20 @@ struct msm_kms {
|
||||
*/
|
||||
struct msm_kms_state {
|
||||
struct drm_atomic_state base;
|
||||
#ifdef CONFIG_DRM_MSM_MDP5
|
||||
void *state;
|
||||
#endif
|
||||
struct msm_commit commit;
|
||||
/*
|
||||
* Everything below `commit` may not be allocated in the struct. The
|
||||
* `crtcs` member must come right after `commit`, as its placement is
|
||||
* used to determine if the struct was partially allocated or fully
|
||||
* allocated.
|
||||
*/
|
||||
struct __drm_crtcs_state crtcs[MAX_CRTCS];
|
||||
struct __drm_connnectors_state connectors[MAX_CONNECTORS];
|
||||
struct __drm_planes_state planes[MAX_PLANES];
|
||||
struct llist_node llist;
|
||||
};
|
||||
#define to_kms_state(x) container_of(x, struct msm_kms_state, base)
|
||||
|
||||
|
@ -227,6 +227,7 @@ struct drm_atomic_state {
|
||||
bool allow_modeset : 1;
|
||||
bool legacy_cursor_update : 1;
|
||||
bool async_update : 1;
|
||||
bool connectors_preallocated : 1;
|
||||
struct __drm_planes_state *planes;
|
||||
struct __drm_crtcs_state *crtcs;
|
||||
int num_connector;
|
||||
|
Loading…
x
Reference in New Issue
Block a user