Merge "dfc: fix null pointer access"

This commit is contained in:
qctecmdr 2020-06-29 12:49:22 -07:00 committed by Gerrit - the friendly Code Review server
commit 0a5474b451
6 changed files with 168 additions and 12 deletions

View File

@ -126,7 +126,7 @@ static void rmnet_vnd_uninit(struct net_device *dev)
gro_cells_destroy(&priv->gro_cells);
free_percpu(priv->pcpu_stats);
qos = priv->qos_info;
qos = rcu_dereference(priv->qos_info);
RCU_INIT_POINTER(priv->qos_info, NULL);
qmi_rmnet_qos_exit_pre(qos);
}
@ -370,7 +370,8 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
priv->mux_id = id;
priv->qos_info = qmi_rmnet_qos_init(real_dev, id);
rcu_assign_pointer(priv->qos_info,
qmi_rmnet_qos_init(real_dev, rmnet_dev, id));
netdev_dbg(rmnet_dev, "rmnet dev created\n");
}

View File

@ -1034,14 +1034,18 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
itm->grant_size = adjusted_grant;
/* No further query if the adjusted grant is less
* than 20% of the original grant
* than 20% of the original grant. Add to watch to
* recover if no indication is received.
*/
if (dfc_qmap && is_query &&
itm->grant_size < (fc_info->num_bytes / 5))
itm->grant_size < (fc_info->num_bytes / 5)) {
itm->grant_thresh = itm->grant_size;
else
qmi_rmnet_watchdog_add(itm);
} else {
itm->grant_thresh =
qmi_rmnet_grant_per(itm->grant_size);
qmi_rmnet_watchdog_remove(itm);
}
itm->seq = fc_info->seq_num;
itm->ack_req = ack_req;
@ -1143,6 +1147,7 @@ static void dfc_update_tx_link_status(struct net_device *dev,
itm->grant_size = 0;
itm->tcp_bidir = false;
itm->bytes_in_flight = 0;
qmi_rmnet_watchdog_remove(itm);
dfc_bearer_flow_ctl(dev, itm, qos);
} else if (itm->grant_size == 0 && tx_status && !itm->rat_switch) {
itm->grant_size = DEFAULT_GRANT;

View File

@ -55,6 +55,7 @@ MODULE_PARM_DESC(rmnet_wq_frequency, "Frequency of PS check in ms");
1 : rmnet_wq_frequency/10) * (HZ/100))
#define NO_DELAY (0x0000 * HZ)
#define PS_INTERVAL_KT (ms_to_ktime(1000))
#define WATCHDOG_EXPIRE_JF (msecs_to_jiffies(50))
#ifdef CONFIG_QCOM_QMI_DFC
static unsigned int qmi_rmnet_scale_factor = 5;
@ -235,6 +236,89 @@ static void qmi_rmnet_reset_txq(struct net_device *dev, unsigned int txq)
}
}
/**
* qmi_rmnet_watchdog_fn - watchdog timer func
*/
static void qmi_rmnet_watchdog_fn(struct timer_list *t)
{
struct rmnet_bearer_map *bearer;
bearer = container_of(t, struct rmnet_bearer_map, watchdog);
trace_dfc_watchdog(bearer->qos->mux_id, bearer->bearer_id, 2);
spin_lock_bh(&bearer->qos->qos_lock);
if (bearer->watchdog_quit)
goto done;
/*
* Possible stall, try to recover. Enable 80% query and jumpstart
* the bearer if disabled.
*/
bearer->watchdog_expire_cnt++;
bearer->bytes_in_flight = 0;
if (!bearer->grant_size) {
bearer->grant_size = DEFAULT_GRANT;
bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
dfc_bearer_flow_ctl(bearer->qos->vnd_dev, bearer, bearer->qos);
} else {
bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
}
done:
bearer->watchdog_started = false;
spin_unlock_bh(&bearer->qos->qos_lock);
}
/**
* qmi_rmnet_watchdog_add - add the bearer to watch
* Needs to be called with qos_lock
*/
void qmi_rmnet_watchdog_add(struct rmnet_bearer_map *bearer)
{
bearer->watchdog_quit = false;
if (bearer->watchdog_started)
return;
bearer->watchdog_started = true;
mod_timer(&bearer->watchdog, jiffies + WATCHDOG_EXPIRE_JF);
trace_dfc_watchdog(bearer->qos->mux_id, bearer->bearer_id, 1);
}
/**
* qmi_rmnet_watchdog_remove - remove the bearer from watch
* Needs to be called with qos_lock
*/
void qmi_rmnet_watchdog_remove(struct rmnet_bearer_map *bearer)
{
bearer->watchdog_quit = true;
if (!bearer->watchdog_started)
return;
if (try_to_del_timer_sync(&bearer->watchdog) >= 0)
bearer->watchdog_started = false;
trace_dfc_watchdog(bearer->qos->mux_id, bearer->bearer_id, 0);
}
/**
* qmi_rmnet_bearer_clean - clean the removed bearer
* Needs to be called with rtn_lock but not qos_lock
*/
static void qmi_rmnet_bearer_clean(struct qos_info *qos)
{
if (qos->removed_bearer) {
qos->removed_bearer->watchdog_quit = true;
del_timer_sync(&qos->removed_bearer->watchdog);
kfree(qos->removed_bearer);
qos->removed_bearer = NULL;
}
}
static struct rmnet_bearer_map *__qmi_rmnet_bearer_get(
struct qos_info *qos_info, u8 bearer_id)
{
@ -254,6 +338,8 @@ static struct rmnet_bearer_map *__qmi_rmnet_bearer_get(
bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
bearer->mq_idx = INVALID_MQ;
bearer->ack_mq_idx = INVALID_MQ;
bearer->qos = qos_info;
timer_setup(&bearer->watchdog, qmi_rmnet_watchdog_fn, 0);
list_add(&bearer->list, &qos_info->bearer_head);
}
@ -289,7 +375,7 @@ static void __qmi_rmnet_bearer_put(struct net_device *dev,
/* Remove from bearer map */
list_del(&bearer->list);
kfree(bearer);
qos_info->removed_bearer = bearer;
}
}
@ -420,6 +506,9 @@ again:
done:
spin_unlock_bh(&qos_info->qos_lock);
qmi_rmnet_bearer_clean(qos_info);
return rc;
}
@ -463,6 +552,9 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
netif_tx_wake_all_queues(dev);
spin_unlock_bh(&qos_info->qos_lock);
qmi_rmnet_bearer_clean(qos_info);
return 0;
}
@ -744,6 +836,8 @@ void qmi_rmnet_enable_all_flows(struct net_device *dev)
bearer->tcp_bidir = false;
bearer->rat_switch = false;
qmi_rmnet_watchdog_remove(bearer);
if (bearer->tx_off)
continue;
@ -906,7 +1000,8 @@ inline unsigned int qmi_rmnet_grant_per(unsigned int grant)
}
EXPORT_SYMBOL(qmi_rmnet_grant_per);
void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
void *qmi_rmnet_qos_init(struct net_device *real_dev,
struct net_device *vnd_dev, u8 mux_id)
{
struct qos_info *qos;
@ -916,6 +1011,7 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
qos->mux_id = mux_id;
qos->real_dev = real_dev;
qos->vnd_dev = vnd_dev;
qos->tran_num = 0;
INIT_LIST_HEAD(&qos->flow_head);
INIT_LIST_HEAD(&qos->bearer_head);
@ -927,10 +1023,18 @@ EXPORT_SYMBOL(qmi_rmnet_qos_init);
void qmi_rmnet_qos_exit_pre(void *qos)
{
struct qos_info *qosi = (struct qos_info *)qos;
struct rmnet_bearer_map *bearer;
if (!qos)
return;
list_add(&((struct qos_info *)qos)->list, &qos_cleanup_list);
list_for_each_entry(bearer, &qosi->bearer_head, list) {
bearer->watchdog_quit = true;
del_timer_sync(&bearer->watchdog);
}
list_add(&qosi->list, &qos_cleanup_list);
}
EXPORT_SYMBOL(qmi_rmnet_qos_exit_pre);
@ -952,6 +1056,7 @@ EXPORT_SYMBOL(qmi_rmnet_qos_exit_post);
static struct workqueue_struct *rmnet_ps_wq;
static struct rmnet_powersave_work *rmnet_work;
static bool rmnet_work_quit;
static bool rmnet_work_inited;
static LIST_HEAD(ps_list);
struct rmnet_powersave_work {
@ -1182,6 +1287,7 @@ void qmi_rmnet_work_init(void *port)
rmnet_work_quit = false;
qmi_rmnet_work_set_active(rmnet_work->port, 1);
queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, PS_INTERVAL);
rmnet_work_inited = true;
}
EXPORT_SYMBOL(qmi_rmnet_work_init);
@ -1190,7 +1296,7 @@ void qmi_rmnet_work_maybe_restart(void *port)
struct qmi_info *qmi;
qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
if (unlikely(!qmi))
if (unlikely(!qmi || !rmnet_work_inited))
return;
if (!test_and_set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active))
@ -1206,6 +1312,7 @@ void qmi_rmnet_work_exit(void *port)
rmnet_work_quit = true;
synchronize_rcu();
rmnet_work_inited = false;
alarm_cancel(&rmnet_work->atimer);
cancel_delayed_work_sync(&rmnet_work->work);
destroy_workqueue(rmnet_ps_wq);

View File

@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
#define MAX_MQ_NUM 16
#define MAX_CLIENT_NUM 2
@ -33,6 +34,8 @@
extern int dfc_mode;
extern int dfc_qmap;
struct qos_info;
struct rmnet_bearer_map {
struct list_head list;
u8 bearer_id;
@ -51,6 +54,11 @@ struct rmnet_bearer_map {
u32 ack_txid;
u32 mq_idx;
u32 ack_mq_idx;
struct qos_info *qos;
struct timer_list watchdog;
bool watchdog_started;
bool watchdog_quit;
u32 watchdog_expire_cnt;
};
struct rmnet_flow_map {
@ -76,11 +84,13 @@ struct qos_info {
struct list_head list;
u8 mux_id;
struct net_device *real_dev;
struct net_device *vnd_dev;
struct list_head flow_head;
struct list_head bearer_head;
struct mq_map mq[MAX_MQ_NUM];
u32 tran_num;
spinlock_t qos_lock;
struct rmnet_bearer_map *removed_bearer;
};
struct qmi_info {
@ -151,6 +161,11 @@ void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type);
struct rmnet_bearer_map *qmi_rmnet_get_bearer_noref(struct qos_info *qos_info,
u8 bearer_id);
void qmi_rmnet_watchdog_add(struct rmnet_bearer_map *bearer);
void qmi_rmnet_watchdog_remove(struct rmnet_bearer_map *bearer);
#else
static inline struct rmnet_flow_map *
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
@ -194,6 +209,10 @@ dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
static inline void dfc_qmap_client_exit(void *dfc_data)
{
}
static inline void qmi_rmnet_watchdog_remove(struct rmnet_bearer_map *bearer)
{
}
#endif
#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -52,7 +52,8 @@ qmi_rmnet_all_flows_enabled(struct net_device *dev)
#endif
#ifdef CONFIG_QCOM_QMI_DFC
void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id);
void *qmi_rmnet_qos_init(struct net_device *real_dev,
struct net_device *vnd_dev, u8 mux_id);
void qmi_rmnet_qos_exit_pre(void *qos);
void qmi_rmnet_qos_exit_post(void);
void qmi_rmnet_burst_fc_check(struct net_device *dev,
@ -60,7 +61,8 @@ void qmi_rmnet_burst_fc_check(struct net_device *dev,
int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb);
#else
static inline void *
qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
qmi_rmnet_qos_init(struct net_device *real_dev,
struct net_device *vnd_dev, u8 mux_id)
{
return NULL;
}

View File

@ -291,6 +291,28 @@ TRACE_EVENT(dfc_adjust_grant,
__entry->rx_bytes, __entry->inflight, __entry->a_grant)
);
TRACE_EVENT(dfc_watchdog,
TP_PROTO(u8 mux_id, u8 bearer_id, u8 event),
TP_ARGS(mux_id, bearer_id, event),
TP_STRUCT__entry(
__field(u8, mux_id)
__field(u8, bearer_id)
__field(u8, event)
),
TP_fast_assign(
__entry->mux_id = mux_id;
__entry->bearer_id = bearer_id;
__entry->event = event;
),
TP_printk("mid=%u bid=%u event=%u",
__entry->mux_id, __entry->bearer_id, __entry->event)
);
#endif /* _TRACE_DFC_H */
/* This part must be outside protection */