Merge "soc: qcom: dfc: Support multiq"

This commit is contained in:
qctecmdr Service 2018-09-09 08:39:39 -07:00 committed by Gerrit - the friendly Code Review server
commit d8f81fe4d6
7 changed files with 152 additions and 202 deletions

View File

@ -496,7 +496,8 @@ EXPORT_SYMBOL(rmnet_get_qmi_pt);
void *rmnet_get_qos_pt(struct net_device *dev)
{
if (dev)
return ((struct rmnet_priv *)netdev_priv(dev))->qos_info;
return rcu_dereference(
((struct rmnet_priv *)netdev_priv(dev))->qos_info);
return NULL;
}
@ -520,14 +521,9 @@ struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id)
struct rmnet_endpoint *ep;
if (port) {
struct net_device *dev;
ep = rmnet_get_endpoint((struct rmnet_port *)port, mux_id);
if (ep) {
dev = ep->egress_dev;
return dev;
}
if (ep)
return ep->egress_dev;
}
return NULL;

View File

@ -107,7 +107,7 @@ struct rmnet_priv {
struct rmnet_pcpu_stats __percpu *pcpu_stats;
struct gro_cells gro_cells;
struct rmnet_priv_stats stats;
void *qos_info;
void __rcu *qos_info;
};
int rmnet_is_real_dev_registered(const struct net_device *real_dev);

View File

@ -16,6 +16,7 @@
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <net/pkt_sched.h>
#include "rmnet_config.h"
#include "rmnet_handlers.h"
@ -61,12 +62,19 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct rmnet_priv *priv;
int ip_type;
u32 mark;
unsigned int len;
priv = netdev_priv(dev);
if (priv->real_dev) {
ip_type = (ip_hdr(skb)->version == 4) ?
AF_INET : AF_INET6;
mark = skb->mark;
len = skb->len;
trace_rmnet_xmit_skb(skb);
qmi_rmnet_burst_fc_check(dev, skb);
rmnet_egress_handler(skb);
qmi_rmnet_burst_fc_check(dev, ip_type, mark, len);
} else {
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
kfree_skb(skb);
@ -111,12 +119,15 @@ static int rmnet_vnd_init(struct net_device *dev)
static void rmnet_vnd_uninit(struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
void *qos;
gro_cells_destroy(&priv->gro_cells);
free_percpu(priv->pcpu_stats);
qmi_rmnet_qos_exit(dev);
priv->qos_info = NULL;
qos = priv->qos_info;
RCU_INIT_POINTER(priv->qos_info, NULL);
synchronize_rcu();
qmi_rmnet_qos_exit(dev, qos);
}
static void rmnet_get_stats64(struct net_device *dev,
@ -150,6 +161,14 @@ static void rmnet_get_stats64(struct net_device *dev,
s->tx_dropped = total_stats.tx_drops;
}
static u16 rmnet_vnd_select_queue(struct net_device *dev,
struct sk_buff *skb,
void *accel_priv,
select_queue_fallback_t fallback)
{
return 0;
}
static const struct net_device_ops rmnet_vnd_ops = {
.ndo_start_xmit = rmnet_vnd_start_xmit,
.ndo_change_mtu = rmnet_vnd_change_mtu,
@ -159,6 +178,7 @@ static const struct net_device_ops rmnet_vnd_ops = {
.ndo_init = rmnet_vnd_init,
.ndo_uninit = rmnet_vnd_uninit,
.ndo_get_stats64 = rmnet_get_stats64,
.ndo_select_queue = rmnet_vnd_select_queue,
};
static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {

View File

@ -16,7 +16,6 @@
#include <linux/soc/qcom/qmi.h>
#include <soc/qcom/rmnet_qmi.h>
#include <linux/ip.h>
#include "qmi_rmnet_i.h"
#define CREATE_TRACE_POINTS
#include <trace/events/dfc.h>
@ -61,20 +60,6 @@ struct dfc_qmi_data {
int restart_state;
};
struct dfc_svc_ind {
struct work_struct work;
struct dfc_qmi_data *data;
void *dfc_info;
};
struct dfc_burst_ind {
struct work_struct work;
struct net_device *dev;
struct qos_info *qos;
struct rmnet_bearer_map *bearer;
struct dfc_qmi_data *data;
};
static void dfc_svc_init(struct work_struct *work);
static void dfc_do_burst_flow_control(struct work_struct *work);
@ -257,6 +242,12 @@ struct dfc_flow_status_ind_msg_v01 {
u8 eod_ack_reqd;
};
struct dfc_svc_ind {
struct work_struct work;
struct dfc_qmi_data *data;
struct dfc_flow_status_ind_msg_v01 dfc_info;
};
static struct qmi_elem_info dfc_bind_client_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
@ -596,7 +587,7 @@ static int dfc_bearer_flow_ctl(struct net_device *dev,
itm = list_entry(p, struct rmnet_flow_map, list);
if (itm->bearer_id == bearer->bearer_id) {
qlen = tc_qdisc_flow_control(dev, itm->tcm_handle,
qlen = qmi_rmnet_flow_control(dev, itm->tcm_handle,
enable);
trace_dfc_qmi_tc(itm->bearer_id, itm->flow_id,
bearer->grant_size, qlen,
@ -618,10 +609,9 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev,
struct dfc_flow_status_info_type_v01 *fc_info)
{
struct list_head *p;
struct rmnet_flow_map *flow_itm;
struct rmnet_bearer_map *bearer_itm;
struct rmnet_bearer_map *bearer_itm = NULL;
int enable;
int rc = 0, len;
int rc = 0;
list_for_each(p, &qos->bearer_head) {
bearer_itm = list_entry(p, struct rmnet_bearer_map, list);
@ -635,15 +625,12 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev,
enable = fc_info->num_bytes > 0 ? 1 : 0;
list_for_each(p, &qos->flow_head) {
flow_itm = list_entry(p, struct rmnet_flow_map, list);
if (enable)
netif_tx_wake_all_queues(dev);
else
netif_tx_stop_all_queues(dev);
len = tc_qdisc_flow_control(dev, flow_itm->tcm_handle, enable);
trace_dfc_qmi_tc(flow_itm->bearer_id, flow_itm->flow_id,
fc_info->num_bytes, len,
flow_itm->tcm_handle, enable);
rc++;
}
trace_dfc_qmi_tc(0xFF, 0, fc_info->num_bytes, 0, 0, enable);
if (enable == 0 && ack_req)
dfc_send_ack(dev, fc_info->bearer_id,
@ -686,31 +673,19 @@ static void dfc_do_burst_flow_control(struct work_struct *work)
{
struct dfc_svc_ind *svc_ind = (struct dfc_svc_ind *)work;
struct dfc_flow_status_ind_msg_v01 *ind =
(struct dfc_flow_status_ind_msg_v01 *)svc_ind->dfc_info;
(struct dfc_flow_status_ind_msg_v01 *)&svc_ind->dfc_info;
struct net_device *dev;
struct qos_info *qos;
struct dfc_flow_status_info_type_v01 *flow_status;
u8 ack_req = ind->eod_ack_reqd_valid ? ind->eod_ack_reqd : 0;
int i, rc;
int i;
if (!svc_ind->data->rmnet_port) {
kfree(ind);
if (unlikely(svc_ind->data->restart_state)) {
kfree(svc_ind);
return;
}
while (!rtnl_trylock()) {
if (!svc_ind->data->restart_state) {
cond_resched();
} else {
kfree(ind);
kfree(svc_ind);
return;
}
}
if (unlikely(svc_ind->data->restart_state))
goto clean_out;
rcu_read_lock();
for (i = 0; i < ind->flow_status_len; i++) {
flow_status = &ind->flow_status[i];
@ -729,67 +704,20 @@ static void dfc_do_burst_flow_control(struct work_struct *work)
if (!qos)
continue;
spin_lock_bh(&qos->qos_lock);
if (unlikely(flow_status->bearer_id == 0xFF))
rc = dfc_all_bearer_flow_ctl(
dfc_all_bearer_flow_ctl(
dev, qos, ack_req, flow_status);
else
rc = dfc_update_fc_map(dev, qos, ack_req, flow_status);
dfc_update_fc_map(dev, qos, ack_req, flow_status);
spin_unlock_bh(&qos->qos_lock);
}
clean_out:
kfree(ind);
rcu_read_unlock();
kfree(svc_ind);
rtnl_unlock();
}
static void dfc_bearer_limit_work(struct work_struct *work)
{
struct dfc_burst_ind *dfc_ind = (struct dfc_burst_ind *)work;
struct rmnet_flow_map *itm;
struct list_head *p;
int qlen, fc;
/* enable transmit on device so that the other
* flows which transmit proceed normally.
*/
netif_start_queue(dfc_ind->dev);
while (!rtnl_trylock()) {
if (!dfc_ind->data->restart_state) {
cond_resched();
} else {
kfree(dfc_ind);
return;
}
}
fc = dfc_ind->bearer->grant_size ? 1 : 0;
/* if grant size is non zero here, we must have already
* got an updated grant. do nothing in that case
*/
if (fc)
goto done;
list_for_each(p, &dfc_ind->qos->flow_head) {
itm = list_entry(p, struct rmnet_flow_map, list);
if (itm->bearer_id == dfc_ind->bearer->bearer_id) {
qlen = tc_qdisc_flow_control(dfc_ind->dev,
itm->tcm_handle, fc);
trace_dfc_qmi_tc_limit(itm->bearer_id, itm->flow_id,
dfc_ind->bearer->grant_size,
qlen, itm->tcm_handle, fc);
}
}
if (dfc_ind->bearer->ack_req)
dfc_send_ack(dfc_ind->dev, dfc_ind->bearer->bearer_id,
dfc_ind->bearer->seq, dfc_ind->qos->mux_id,
DFC_ACK_TYPE_DISABLE);
done:
kfree(dfc_ind);
rtnl_unlock();
}
static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
@ -800,9 +728,6 @@ static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
struct dfc_flow_status_ind_msg_v01 *ind_msg;
struct dfc_svc_ind *svc_ind;
if (!dfc->rmnet_port)
return;
if (qmi != &dfc->handle)
return;
@ -820,14 +745,10 @@ static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
INIT_WORK((struct work_struct *)svc_ind,
dfc_do_burst_flow_control);
svc_ind->dfc_info = kzalloc(sizeof(*ind_msg), GFP_ATOMIC);
if (!svc_ind->dfc_info) {
kfree(svc_ind);
return;
}
memcpy(svc_ind->dfc_info, ind_msg, sizeof(*ind_msg));
memcpy(&svc_ind->dfc_info, ind_msg, sizeof(*ind_msg));
svc_ind->data = dfc;
queue_work(dfc->dfc_wq, (struct work_struct *)svc_ind);
}
}
@ -965,35 +886,32 @@ void dfc_qmi_client_exit(void *dfc_data)
}
void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
struct sk_buff *skb, struct qmi_info *qmi)
int ip_type, u32 mark, unsigned int len)
{
struct rmnet_bearer_map *bearer;
struct dfc_burst_ind *dfc_ind;
struct rmnet_flow_map *itm;
struct dfc_qmi_data *data;
int ip_type;
u32 start_grant;
ip_type = (ip_hdr(skb)->version == IP_VER_6) ? AF_INET6 : AF_INET;
spin_lock(&qos->qos_lock);
itm = qmi_rmnet_get_flow_map(qos, skb->mark, ip_type);
itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
if (unlikely(!itm))
return;
goto out;
bearer = qmi_rmnet_get_bearer_map(qos, itm->bearer_id);
if (unlikely(!bearer))
return;
goto out;
trace_dfc_flow_check(bearer->bearer_id, skb->len, bearer->grant_size);
trace_dfc_flow_check(bearer->bearer_id, len, bearer->grant_size);
if (!bearer->grant_size)
return;
goto out;
start_grant = bearer->grant_size;
if (skb->len >= bearer->grant_size)
if (len >= bearer->grant_size)
bearer->grant_size = 0;
else
bearer->grant_size -= skb->len;
bearer->grant_size -= len;
if (start_grant > bearer->grant_thresh &&
bearer->grant_size <= bearer->grant_thresh) {
@ -1002,27 +920,9 @@ void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
DFC_ACK_TYPE_THRESHOLD);
}
if (bearer->grant_size)
return;
if (!bearer->grant_size)
dfc_bearer_flow_ctl(dev, bearer, qos);
data = (struct dfc_qmi_data *)qmi_rmnet_has_dfc_client(qmi);
if (!data)
return;
dfc_ind = kzalloc(sizeof(*dfc_ind), GFP_ATOMIC);
if (!dfc_ind)
return;
INIT_WORK((struct work_struct *)dfc_ind, dfc_bearer_limit_work);
dfc_ind->dev = dev;
dfc_ind->qos = qos;
dfc_ind->bearer = bearer;
dfc_ind->data = data;
/* stop the flow in hope that the worker thread is
* immediately scheduled beyond this point of time
*/
netif_stop_queue(dev);
queue_work(data->dfc_wq, (struct work_struct *)dfc_ind);
out:
spin_unlock(&qos->qos_lock);
}

View File

@ -26,8 +26,9 @@
#define NLMSG_CLIENT_SETUP 4
#define NLMSG_CLIENT_DELETE 5
#define FLAG_DFC_MASK 0x0001
#define FLAG_DFC_MASK 0x000F
#define FLAG_POWERSAVE_MASK 0x0010
#define DFC_MODE_MULTIQ 2
#define PS_INTERVAL (0x0004 * HZ)
#define NO_DELAY (0x0000 * HZ)
@ -69,22 +70,11 @@ struct qmi_elem_info data_ep_id_type_v01_ei[] = {
};
EXPORT_SYMBOL(data_ep_id_type_v01_ei);
static struct qmi_info *qmi_rmnet_qmi_init(void)
{
struct qmi_info *qmi_info;
qmi_info = kzalloc(sizeof(*qmi_info), GFP_KERNEL);
if (!qmi_info)
return NULL;
return qmi_info;
}
void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
{
int i;
if (!qmi || !(qmi->flag & FLAG_DFC_MASK))
if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ))
return NULL;
for (i = 0; i < MAX_CLIENT_NUM; i++) {
@ -197,6 +187,25 @@ static void qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
itm->tcm_handle = new_map->tcm_handle;
}
int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable)
{
struct netdev_queue *q;
if (unlikely(tcm_handle >= dev->num_tx_queues))
return 0;
q = netdev_get_tx_queue(dev, tcm_handle);
if (unlikely(!q))
return 0;
if (enable)
netif_tx_wake_queue(q);
else
netif_tx_stop_queue(q);
return 0;
}
static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
struct qmi_info *qmi)
{
@ -221,14 +230,18 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
trace_dfc_flow_info(new_map.bearer_id, new_map.flow_id,
new_map.ip_type, new_map.tcm_handle, 1);
spin_lock_bh(&qos_info->qos_lock);
itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
new_map.ip_type);
if (itm) {
qmi_rmnet_update_flow_map(itm, &new_map);
} else {
itm = kzalloc(sizeof(*itm), GFP_KERNEL);
if (!itm)
itm = kzalloc(sizeof(*itm), GFP_ATOMIC);
if (!itm) {
spin_unlock_bh(&qos_info->qos_lock);
return -ENOMEM;
}
qmi_rmnet_update_flow_link(qmi, dev, itm, 1);
qmi_rmnet_update_flow_map(itm, &new_map);
@ -238,9 +251,11 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
if (bearer) {
bearer->flow_ref++;
} else {
bearer = kzalloc(sizeof(*bearer), GFP_KERNEL);
if (!bearer)
bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC);
if (!bearer) {
spin_unlock_bh(&qos_info->qos_lock);
return -ENOMEM;
}
bearer->bearer_id = new_map.bearer_id;
bearer->flow_ref = 1;
@ -251,10 +266,12 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
list_add(&bearer->list, &qos_info->bearer_head);
}
tc_qdisc_flow_control(dev, itm->tcm_handle,
qmi_rmnet_flow_control(dev, itm->tcm_handle,
bearer->grant_size > 0 ? 1 : 0);
}
spin_unlock_bh(&qos_info->qos_lock);
return 0;
}
@ -277,6 +294,8 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
* tcm->tcm_ifindex - ip_type
*/
spin_lock_bh(&qos_info->qos_lock);
new_map.bearer_id = tcm->tcm__pad1;
new_map.flow_id = tcm->tcm_parent;
new_map.ip_type = tcm->tcm_ifindex;
@ -300,6 +319,8 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
kfree(bearer);
}
spin_unlock_bh(&qos_info->qos_lock);
return 0;
}
@ -309,7 +330,7 @@ static int qmi_rmnet_enable_all_flows(struct qmi_info *qmi)
struct qos_info *qos;
struct rmnet_flow_map *m;
struct rmnet_bearer_map *bearer;
int qlen, need_enable = 0;
int qlen;
if (!qmi_rmnet_has_dfc_client(qmi) || (qmi->flow_cnt == 0))
return 0;
@ -319,21 +340,25 @@ static int qmi_rmnet_enable_all_flows(struct qmi_info *qmi)
for (i = 0; i < qmi->flow_cnt; i++) {
qos = (struct qos_info *)rmnet_get_qos_pt(qmi->flow[i].dev);
m = qmi->flow[i].itm;
spin_lock_bh(&qos->qos_lock);
bearer = qmi_rmnet_get_bearer_map(qos, m->bearer_id);
if (bearer) {
if (bearer->grant_size == 0)
need_enable = 1;
bearer->grant_size = DEFAULT_GRANT;
bearer->grant_thresh =
qmi_rmnet_grant_per(DEFAULT_GRANT);
if (need_enable) {
qlen = tc_qdisc_flow_control(qmi->flow[i].dev,
m->tcm_handle, 1);
trace_dfc_qmi_tc(m->bearer_id, m->flow_id,
bearer->grant_size, qlen,
m->tcm_handle, 1);
}
bearer->seq = 0;
bearer->ack_req = 0;
}
qlen = qmi_rmnet_flow_control(qmi->flow[i].dev,
m->tcm_handle, 1);
trace_dfc_qmi_tc(m->bearer_id, m->flow_id,
DEFAULT_GRANT, qlen,
m->tcm_handle, 1);
spin_unlock_bh(&qos->qos_lock);
}
return 0;
@ -410,7 +435,7 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
idx = (tcm->tcm_handle == 0) ? 0 : 1;
if (!qmi) {
qmi = qmi_rmnet_qmi_init();
qmi = kzalloc(sizeof(struct qmi_info), GFP_KERNEL);
if (!qmi)
return -ENOMEM;
@ -422,7 +447,7 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
qmi->fc_info[idx].svc.ep_type = tcm->tcm_info;
qmi->fc_info[idx].svc.iface_id = tcm->tcm_parent;
if ((tcm->tcm_ifindex & FLAG_DFC_MASK) &&
if (((tcm->tcm_ifindex & FLAG_DFC_MASK) == DFC_MODE_MULTIQ) &&
(qmi->fc_info[idx].dfc_client == NULL)) {
rc = dfc_qmi_client_init(port, idx, qmi);
if (rc < 0)
@ -484,20 +509,20 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)
switch (tcm->tcm_family) {
case NLMSG_FLOW_ACTIVATE:
if (!qmi || !(qmi->flag & FLAG_DFC_MASK) ||
if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ) ||
!qmi_rmnet_has_dfc_client(qmi))
return;
qmi_rmnet_add_flow(dev, tcm, qmi);
break;
case NLMSG_FLOW_DEACTIVATE:
if (!qmi || !(qmi->flag & FLAG_DFC_MASK))
if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ))
return;
qmi_rmnet_del_flow(dev, tcm, qmi);
break;
case NLMSG_CLIENT_SETUP:
if (!(tcm->tcm_ifindex & FLAG_DFC_MASK) &&
if (((tcm->tcm_ifindex & FLAG_DFC_MASK) != DFC_MODE_MULTIQ) &&
!(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
return;
@ -553,16 +578,15 @@ void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
EXPORT_SYMBOL(qmi_rmnet_qmi_exit);
#ifdef CONFIG_QCOM_QMI_DFC
void qmi_rmnet_burst_fc_check(struct net_device *dev, struct sk_buff *skb)
void qmi_rmnet_burst_fc_check(struct net_device *dev,
int ip_type, u32 mark, unsigned int len)
{
void *port = rmnet_get_rmnet_port(dev);
struct qmi_info *qmi = rmnet_get_qmi_pt(port);
struct qos_info *qos = rmnet_get_qos_pt(dev);
if (!qmi || !qos)
if (!qos)
return;
dfc_qmi_burst_check(dev, qos, skb, qmi);
dfc_qmi_burst_check(dev, qos, ip_type, mark, len);
}
EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);
@ -586,21 +610,22 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
qos->tran_num = 0;
INIT_LIST_HEAD(&qos->flow_head);
INIT_LIST_HEAD(&qos->bearer_head);
spin_lock_init(&qos->qos_lock);
return qos;
}
EXPORT_SYMBOL(qmi_rmnet_qos_init);
void qmi_rmnet_qos_exit(struct net_device *dev)
void qmi_rmnet_qos_exit(struct net_device *dev, void *qos)
{
void *port = rmnet_get_rmnet_port(dev);
struct qmi_info *qmi = rmnet_get_qmi_pt(port);
struct qos_info *qos = (struct qos_info *)rmnet_get_qos_pt(dev);
struct qos_info *qos_info = (struct qos_info *)qos;
if (!qmi || !qos)
return;
qmi_rmnet_clean_flow_list(qmi, dev, qos);
qmi_rmnet_clean_flow_list(qmi, dev, qos_info);
kfree(qos);
}
EXPORT_SYMBOL(qmi_rmnet_qos_exit);
@ -651,6 +676,7 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
struct rmnet_powersave_work *real_work;
u64 rxd, txd;
u64 rx, tx;
unsigned long lock_delay;
real_work = container_of(to_delayed_work(work),
struct rmnet_powersave_work, work);
@ -658,8 +684,11 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
if (unlikely(!real_work || !real_work->port))
return;
lock_delay = qmi_rmnet_work_get_active(real_work->port) ?
PS_INTERVAL : (HZ / 50);
if (!rtnl_trylock()) {
queue_delayed_work(rmnet_ps_wq, &real_work->work, PS_INTERVAL);
queue_delayed_work(rmnet_ps_wq, &real_work->work, lock_delay);
return;
}
if (!qmi_rmnet_work_get_active(real_work->port)) {

View File

@ -60,6 +60,7 @@ struct qos_info {
struct list_head bearer_head;
u32 default_grant;
u32 tran_num;
spinlock_t qos_lock;
};
struct flow_info {
@ -110,9 +111,11 @@ int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi);
void dfc_qmi_client_exit(void *dfc_data);
void dfc_qmi_burst_check(struct net_device *dev,
struct qos_info *qos, struct sk_buff *skb,
struct qmi_info *qmi);
void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
int ip_type, u32 mark, unsigned int len);
int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable);
#else
static inline struct rmnet_flow_map *
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
@ -139,7 +142,7 @@ static inline void dfc_qmi_client_exit(void *dfc_data)
static inline void
dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
struct sk_buff *skb, struct qmi_info *qmi)
int ip_type, u32 mark, unsigned int len)
{
}
#endif

View File

@ -33,8 +33,9 @@ qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)
#ifdef CONFIG_QCOM_QMI_DFC
void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id);
void qmi_rmnet_qos_exit(struct net_device *dev);
void qmi_rmnet_burst_fc_check(struct net_device *dev, struct sk_buff *skb);
void qmi_rmnet_qos_exit(struct net_device *dev, void *qos);
void qmi_rmnet_burst_fc_check(struct net_device *dev,
int ip_type, u32 mark, unsigned int len);
#else
static inline void *
qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
@ -42,12 +43,13 @@ qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
return NULL;
}
static inline void qmi_rmnet_qos_exit(struct net_device *dev)
static inline void qmi_rmnet_qos_exit(struct net_device *dev, void *qos)
{
}
static inline void
qmi_rmnet_burst_fc_check(struct net_device *dev, struct sk_buff *skb)
qmi_rmnet_burst_fc_check(struct net_device *dev,
int ip_type, u32 mark, unsigned int len)
{
}
#endif