ufs: snapshot of UFS controller & PHY driver

This is a snapshot of UFS driver (host controller and PHY) taken as of
msm-4.9 commit <2e98d6c8> ("Merge "u_ether: Handle memory allocation
failure case on tx path"").

Snapshot is taken of following files:
	drivers/scsi/ufs/*
	drivers/phy/phy-qcom-ufs*
	Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt

Fixed some of the minor compilation errrors after taking the snapshot.

Change-Id: Ie96535f0d781a19dbec5d00e410bab5f19281696
Signed-off-by: Can Guo <cang@codeaurora.org>
This commit is contained in:
Can Guo 2018-03-07 05:47:08 -08:00 committed by Gerrit - the friendly Code Review server
parent 41839bb7b2
commit 8c69b9d4a6
13 changed files with 744 additions and 128 deletions

View File

@ -144,6 +144,20 @@ Note: The instantaneous bandwidth (IB) value in the vectors-KBps field should
- qcom,bus-vector-names: specifies string IDs for the corresponding
bus vectors in the same order as qcom,msm-bus,vectors-KBps property.
* The following parameters are optional, but required in order for PM QoS to be
enabled and functional in the driver:
- qcom,pm-qos-cpu-groups: arrays of unsigned integers representing the cpu groups.
The number of values in the array defines the number of cpu-groups.
Each value is a bit-mask defining the cpus that take part in that cpu group.
i.e. if bit N is set, then cpuN is a part of the cpu group. So basically,
a cpu group corelated to a cpu cluster.
A PM QoS request object is maintained for each cpu-group.
- qcom,pm-qos-cpu-group-latency-us: array of values used for PM QoS voting, one for each cpu-group defined.
the number of values must match the number of values defined in
qcom,pm-qos-cpu-mask property.
- qcom,pm-qos-default-cpu: PM QoS voting is based on the cpu associated with each IO request by the block layer.
This defined the default cpu used for PM QoS voting in case a specific cpu value is not available.
- qcom,vddp-ref-clk-supply : reference clock to ufs device. Controlled by the host driver.
- qcom,vddp-ref-clk-max-microamp : specifies max. load that can be drawn for
ref-clk supply.

View File

@ -77,6 +77,7 @@ struct ufs_qcom_phy_vreg {
int min_uV;
int max_uV;
bool enabled;
bool is_always_on;
};
struct ufs_qcom_phy {

View File

@ -81,7 +81,35 @@ void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
static int ufs_qcom_phy_qmp_14nm_init(struct phy *generic_phy)
{
return 0;
struct ufs_qcom_phy_qmp_14nm *phy = phy_get_drvdata(generic_phy);
struct ufs_qcom_phy *phy_common = &phy->common_cfg;
int err;
err = ufs_qcom_phy_init_clks(phy_common);
if (err) {
dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
__func__, err);
goto out;
}
err = ufs_qcom_phy_init_vregulators(phy_common);
if (err) {
dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
__func__, err);
goto out;
}
ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
if (phy_common->quirks & UFS_QCOM_PHY_QUIRK_VCO_MANUAL_TUNING) {
phy_common->vco_tune1_mode1 = readl_relaxed(phy_common->mmio +
QSERDES_COM_VCO_TUNE1_MODE1);
dev_info(phy_common->dev, "%s: vco_tune1_mode1 0x%x\n",
__func__, phy_common->vco_tune1_mode1);
}
out:
return err;
}
static int ufs_qcom_phy_qmp_14nm_exit(struct phy *generic_phy)
@ -239,27 +267,12 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
&ufs_qcom_phy_qmp_14nm_phy_ops, &phy_14nm_ops);
if (!generic_phy) {
dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
__func__);
err = -EIO;
goto out;
}
err = ufs_qcom_phy_init_clks(phy_common);
if (err)
goto out;
err = ufs_qcom_phy_init_vregulators(phy_common);
if (err)
goto out;
ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
if (phy_common->quirks & UFS_QCOM_PHY_QUIRK_VCO_MANUAL_TUNING) {
phy_common->vco_tune1_mode1 = readl_relaxed(phy_common->mmio +
QSERDES_COM_VCO_TUNE1_MODE1);
dev_info(phy_common->dev, "%s: vco_tune1_mode1 0x%x\n",
__func__, phy_common->vco_tune1_mode1);
}
phy_set_drvdata(generic_phy, phy);
strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name));

View File

@ -221,6 +221,7 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0A),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x00),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),

View File

@ -223,7 +223,15 @@ skip_txrx_clk:
err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk",
&phy_common->ref_clk);
if (err)
goto out;
/*
* "ref_aux_clk" is optional and only supported by certain
* phy versions, don't abort init if it's not found.
*/
__ufs_qcom_phy_clk_get(phy_common->dev, "ref_aux_clk",
&phy_common->ref_aux_clk, false);
out:
return err;
}
@ -269,6 +277,9 @@ static int ufs_qcom_phy_init_vreg(struct device *dev,
}
err = 0;
}
snprintf(prop_name, MAX_PROP_NAME, "%s-always-on", name);
vreg->is_always_on = of_property_read_bool(dev->of_node,
prop_name);
}
if (!strcmp(name, "vdda-pll")) {
@ -317,6 +328,8 @@ static int ufs_qcom_phy_cfg_vreg(struct device *dev,
int min_uV;
int uA_load;
WARN_ON(!vreg);
if (regulator_count_voltages(reg) > 0) {
min_uV = on ? vreg->min_uV : 0;
ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
@ -442,7 +455,7 @@ static int ufs_qcom_phy_disable_vreg(struct device *dev,
{
int ret = 0;
if (!vreg || !vreg->enabled)
if (!vreg || !vreg->enabled || vreg->is_always_on)
goto out;
ret = regulator_disable(vreg->reg);
@ -462,6 +475,13 @@ out:
static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy)
{
if (phy->is_ref_clk_enabled) {
/*
* "ref_aux_clk" is optional clock and only supported by
* certain phy versions, hence make sure that clk reference
* is available before trying to disable the clock.
*/
if (phy->ref_aux_clk)
clk_disable_unprepare(phy->ref_aux_clk);
clk_disable_unprepare(phy->ref_clk);
/*
* "ref_clk_parent" is optional clock hence make sure that clk

View File

@ -242,6 +242,39 @@ static const struct file_operations ufs_qcom_dbg_dbg_regs_desc = {
.read = seq_read,
};
static int ufs_qcom_dbg_pm_qos_show(struct seq_file *file, void *data)
{
struct ufs_qcom_host *host = (struct ufs_qcom_host *)file->private;
unsigned long flags;
int i;
spin_lock_irqsave(host->hba->host->host_lock, flags);
seq_printf(file, "enabled: %d\n", host->pm_qos.is_enabled);
for (i = 0; i < host->pm_qos.num_groups && host->pm_qos.groups; i++)
seq_printf(file,
"CPU Group #%d(mask=0x%lx): active_reqs=%d, state=%d, latency=%d\n",
i, host->pm_qos.groups[i].mask.bits[0],
host->pm_qos.groups[i].active_reqs,
host->pm_qos.groups[i].state,
host->pm_qos.groups[i].latency_us);
spin_unlock_irqrestore(host->hba->host->host_lock, flags);
return 0;
}
static int ufs_qcom_dbg_pm_qos_open(struct inode *inode,
struct file *file)
{
return single_open(file, ufs_qcom_dbg_pm_qos_show, inode->i_private);
}
static const struct file_operations ufs_qcom_dbg_pm_qos_desc = {
.open = ufs_qcom_dbg_pm_qos_open,
.read = seq_read,
};
void ufs_qcom_dbg_add_debugfs(struct ufs_hba *hba, struct dentry *root)
{
struct ufs_qcom_host *host;
@ -330,6 +363,17 @@ void ufs_qcom_dbg_add_debugfs(struct ufs_hba *hba, struct dentry *root)
goto err;
}
host->debugfs_files.pm_qos =
debugfs_create_file("pm_qos", 0400,
host->debugfs_files.debugfs_root, host,
&ufs_qcom_dbg_pm_qos_desc);
if (!host->debugfs_files.dbg_regs) {
dev_err(host->hba->dev,
"%s: failed create dbg_regs debugfs entry\n",
__func__);
goto err;
}
return;
err:

View File

@ -27,6 +27,8 @@
#define UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN 0
static struct workqueue_struct *ice_workqueue;
static void ufs_qcom_ice_dump_regs(struct ufs_qcom_host *qcom_host, int offset,
int len, char *prefix)
{
@ -170,17 +172,15 @@ out:
static void ufs_qcom_ice_cfg_work(struct work_struct *work)
{
unsigned long flags;
struct ice_data_setting ice_set;
struct ufs_qcom_host *qcom_host =
container_of(work, struct ufs_qcom_host, ice_cfg_work);
struct request *req_pending = NULL;
if (!qcom_host->ice.vops->config_start)
return;
spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
req_pending = qcom_host->req_pending;
if (!req_pending) {
if (!qcom_host->req_pending) {
qcom_host->work_pending = false;
spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
return;
}
@ -189,24 +189,14 @@ static void ufs_qcom_ice_cfg_work(struct work_struct *work)
/*
* config_start is called again as previous attempt returned -EAGAIN,
* this call shall now take care of the necessary key setup.
* 'ice_set' will not actually be used, instead the next call to
* config_start() for this request, in the normal call flow, will
* succeed as the key has now been setup.
*/
qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
qcom_host->req_pending, &ice_set, false);
qcom_host->req_pending, NULL, false);
spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
qcom_host->req_pending = NULL;
qcom_host->work_pending = false;
spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
/*
* Resume with requests processing. We assume config_start has been
* successful, but even if it wasn't we still must resume in order to
* allow for the request to be retried.
*/
ufshcd_scsi_unblock_requests(qcom_host->hba);
}
/**
@ -235,6 +225,13 @@ int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host)
}
qcom_host->dbg_print_en |= UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN;
ice_workqueue = alloc_workqueue("ice-set-key",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!ice_workqueue) {
dev_err(ufs_dev, "%s: workqueue allocation failed.\n",
__func__);
goto out;
}
INIT_WORK(&qcom_host->ice_cfg_work, ufs_qcom_ice_cfg_work);
out:
@ -285,21 +282,17 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
* requires a non-atomic context, this means we should
* call the function again from the worker thread to do
* the configuration. For this request the error will
* propagate so it will be re-queued and until the
* configuration is is completed we block further
* request processing.
* propagate so it will be re-queued.
*/
if (err == -EAGAIN) {
dev_dbg(qcom_host->hba->dev,
"%s: scheduling task for ice setup\n",
__func__);
if (!qcom_host->req_pending) {
ufshcd_scsi_block_requests(
qcom_host->hba);
if (!qcom_host->work_pending) {
qcom_host->req_pending = cmd->request;
if (!schedule_work(
if (!queue_work(ice_workqueue,
&qcom_host->ice_cfg_work)) {
qcom_host->req_pending = NULL;
@ -307,10 +300,9 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
&qcom_host->ice_work_lock,
flags);
ufshcd_scsi_unblock_requests(
qcom_host->hba);
return err;
}
qcom_host->work_pending = true;
}
} else {
@ -409,9 +401,7 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
* requires a non-atomic context, this means we should
* call the function again from the worker thread to do
* the configuration. For this request the error will
* propagate so it will be re-queued and until the
* configuration is is completed we block further
* request processing.
* propagate so it will be re-queued.
*/
if (err == -EAGAIN) {
@ -419,11 +409,10 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
"%s: scheduling task for ice setup\n",
__func__);
if (!qcom_host->req_pending) {
ufshcd_scsi_block_requests(
qcom_host->hba);
if (!qcom_host->work_pending) {
qcom_host->req_pending = cmd->request;
if (!schedule_work(
if (!queue_work(ice_workqueue,
&qcom_host->ice_cfg_work)) {
qcom_host->req_pending = NULL;
@ -431,10 +420,9 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
&qcom_host->ice_work_lock,
flags);
ufshcd_scsi_unblock_requests(
qcom_host->hba);
return err;
}
qcom_host->work_pending = true;
}
} else {

View File

@ -35,6 +35,8 @@
#define MAX_PROP_SIZE 32
#define VDDP_REF_CLK_MIN_UV 1200000
#define VDDP_REF_CLK_MAX_UV 1200000
/* TODO: further tuning for this parameter may be required */
#define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US (10000) /* microseconds */
#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
(UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
@ -61,6 +63,7 @@ static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg);
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
u32 clk_cycles);
static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host);
static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
char *prefix)
@ -812,6 +815,8 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto out;
}
}
/* Unvote PM QoS */
ufs_qcom_pm_qos_suspend(host);
out:
return ret;
@ -1258,7 +1263,7 @@ static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
host->bus_vote.max_bus_bw.attr.mode = 0644;
err = device_create_file(dev, &host->bus_vote.max_bus_bw);
out:
return err;
@ -1586,6 +1591,395 @@ out:
return err;
}
#ifdef CONFIG_SMP /* CONFIG_SMP */
static int ufs_qcom_cpu_to_group(struct ufs_qcom_host *host, int cpu)
{
int i;
if (cpu >= 0 && cpu < num_possible_cpus())
for (i = 0; i < host->pm_qos.num_groups; i++)
if (cpumask_test_cpu(cpu, &host->pm_qos.groups[i].mask))
return i;
return host->pm_qos.default_cpu;
}
static void ufs_qcom_pm_qos_req_start(struct ufs_hba *hba, struct request *req)
{
unsigned long flags;
struct ufs_qcom_host *host;
struct ufs_qcom_pm_qos_cpu_group *group;
if (!hba || !req)
return;
host = ufshcd_get_variant(hba);
if (!host->pm_qos.groups)
return;
group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req->cpu)];
spin_lock_irqsave(hba->host->host_lock, flags);
if (!host->pm_qos.is_enabled)
goto out;
group->active_reqs++;
if (group->state != PM_QOS_REQ_VOTE &&
group->state != PM_QOS_VOTED) {
group->state = PM_QOS_REQ_VOTE;
queue_work(host->pm_qos.workq, &group->vote_work);
}
out:
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
/* hba->host->host_lock is assumed to be held by caller */
static void __ufs_qcom_pm_qos_req_end(struct ufs_qcom_host *host, int req_cpu)
{
struct ufs_qcom_pm_qos_cpu_group *group;
if (!host->pm_qos.groups || !host->pm_qos.is_enabled)
return;
group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req_cpu)];
if (--group->active_reqs)
return;
group->state = PM_QOS_REQ_UNVOTE;
queue_work(host->pm_qos.workq, &group->unvote_work);
}
static void ufs_qcom_pm_qos_req_end(struct ufs_hba *hba, struct request *req,
bool should_lock)
{
unsigned long flags = 0;
if (!hba || !req)
return;
if (should_lock)
spin_lock_irqsave(hba->host->host_lock, flags);
__ufs_qcom_pm_qos_req_end(ufshcd_get_variant(hba), req->cpu);
if (should_lock)
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static void ufs_qcom_pm_qos_vote_work(struct work_struct *work)
{
struct ufs_qcom_pm_qos_cpu_group *group =
container_of(work, struct ufs_qcom_pm_qos_cpu_group, vote_work);
struct ufs_qcom_host *host = group->host;
unsigned long flags;
spin_lock_irqsave(host->hba->host->host_lock, flags);
if (!host->pm_qos.is_enabled || !group->active_reqs) {
spin_unlock_irqrestore(host->hba->host->host_lock, flags);
return;
}
group->state = PM_QOS_VOTED;
spin_unlock_irqrestore(host->hba->host->host_lock, flags);
pm_qos_update_request(&group->req, group->latency_us);
}
static void ufs_qcom_pm_qos_unvote_work(struct work_struct *work)
{
struct ufs_qcom_pm_qos_cpu_group *group = container_of(work,
struct ufs_qcom_pm_qos_cpu_group, unvote_work);
struct ufs_qcom_host *host = group->host;
unsigned long flags;
/*
* Check if new requests were submitted in the meantime and do not
* unvote if so.
*/
spin_lock_irqsave(host->hba->host->host_lock, flags);
if (!host->pm_qos.is_enabled || group->active_reqs) {
spin_unlock_irqrestore(host->hba->host->host_lock, flags);
return;
}
group->state = PM_QOS_UNVOTED;
spin_unlock_irqrestore(host->hba->host->host_lock, flags);
pm_qos_update_request_timeout(&group->req,
group->latency_us, UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US);
}
static ssize_t ufs_qcom_pm_qos_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev->parent);
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
return snprintf(buf, PAGE_SIZE, "%d\n", host->pm_qos.is_enabled);
}
static ssize_t ufs_qcom_pm_qos_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev->parent);
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
unsigned long value;
unsigned long flags;
bool enable;
int i;
if (kstrtoul(buf, 0, &value))
return -EINVAL;
enable = !!value;
/*
* Must take the spinlock and save irqs before changing the enabled
* flag in order to keep correctness of PM QoS release.
*/
spin_lock_irqsave(hba->host->host_lock, flags);
if (enable == host->pm_qos.is_enabled) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
return count;
}
host->pm_qos.is_enabled = enable;
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (!enable)
for (i = 0; i < host->pm_qos.num_groups; i++) {
cancel_work_sync(&host->pm_qos.groups[i].vote_work);
cancel_work_sync(&host->pm_qos.groups[i].unvote_work);
spin_lock_irqsave(hba->host->host_lock, flags);
host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
host->pm_qos.groups[i].active_reqs = 0;
spin_unlock_irqrestore(hba->host->host_lock, flags);
pm_qos_update_request(&host->pm_qos.groups[i].req,
PM_QOS_DEFAULT_VALUE);
}
return count;
}
static ssize_t ufs_qcom_pm_qos_latency_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev->parent);
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int ret;
int i;
int offset = 0;
for (i = 0; i < host->pm_qos.num_groups; i++) {
ret = snprintf(&buf[offset], PAGE_SIZE,
"cpu group #%d(mask=0x%lx): %d\n", i,
host->pm_qos.groups[i].mask.bits[0],
host->pm_qos.groups[i].latency_us);
if (ret > 0)
offset += ret;
else
break;
}
return offset;
}
static ssize_t ufs_qcom_pm_qos_latency_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev->parent);
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
unsigned long value;
unsigned long flags;
char *strbuf;
char *strbuf_copy;
char *token;
int i;
int ret;
/* reserve one byte for null termination */
strbuf = kmalloc(count + 1, GFP_KERNEL);
if (!strbuf)
return -ENOMEM;
strbuf_copy = strbuf;
strlcpy(strbuf, buf, count + 1);
for (i = 0; i < host->pm_qos.num_groups; i++) {
token = strsep(&strbuf, ",");
if (!token)
break;
ret = kstrtoul(token, 0, &value);
if (ret)
break;
spin_lock_irqsave(hba->host->host_lock, flags);
host->pm_qos.groups[i].latency_us = value;
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
kfree(strbuf_copy);
return count;
}
static int ufs_qcom_pm_qos_init(struct ufs_qcom_host *host)
{
struct device_node *node = host->hba->dev->of_node;
struct device_attribute *attr;
int ret = 0;
int num_groups;
int num_values;
char wq_name[sizeof("ufs_pm_qos_00")];
int i;
num_groups = of_property_count_u32_elems(node,
"qcom,pm-qos-cpu-groups");
if (num_groups <= 0)
goto no_pm_qos;
num_values = of_property_count_u32_elems(node,
"qcom,pm-qos-cpu-group-latency-us");
if (num_values <= 0)
goto no_pm_qos;
if (num_values != num_groups || num_groups > num_possible_cpus()) {
dev_err(host->hba->dev, "%s: invalid count: num_groups=%d, num_values=%d, num_possible_cpus=%d\n",
__func__, num_groups, num_values, num_possible_cpus());
goto no_pm_qos;
}
host->pm_qos.num_groups = num_groups;
host->pm_qos.groups = kcalloc(host->pm_qos.num_groups,
sizeof(struct ufs_qcom_pm_qos_cpu_group), GFP_KERNEL);
if (!host->pm_qos.groups)
return -ENOMEM;
for (i = 0; i < host->pm_qos.num_groups; i++) {
u32 mask;
ret = of_property_read_u32_index(node, "qcom,pm-qos-cpu-groups",
i, &mask);
if (ret)
goto free_groups;
host->pm_qos.groups[i].mask.bits[0] = mask;
if (!cpumask_subset(&host->pm_qos.groups[i].mask,
cpu_possible_mask)) {
dev_err(host->hba->dev, "%s: invalid mask 0x%x for cpu group\n",
__func__, mask);
goto free_groups;
}
ret = of_property_read_u32_index(node,
"qcom,pm-qos-cpu-group-latency-us", i,
&host->pm_qos.groups[i].latency_us);
if (ret)
goto free_groups;
host->pm_qos.groups[i].req.type = PM_QOS_REQ_AFFINE_CORES;
host->pm_qos.groups[i].req.cpus_affine =
host->pm_qos.groups[i].mask;
host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
host->pm_qos.groups[i].active_reqs = 0;
host->pm_qos.groups[i].host = host;
INIT_WORK(&host->pm_qos.groups[i].vote_work,
ufs_qcom_pm_qos_vote_work);
INIT_WORK(&host->pm_qos.groups[i].unvote_work,
ufs_qcom_pm_qos_unvote_work);
}
ret = of_property_read_u32(node, "qcom,pm-qos-default-cpu",
&host->pm_qos.default_cpu);
if (ret || host->pm_qos.default_cpu > num_possible_cpus())
host->pm_qos.default_cpu = 0;
/*
* Use a single-threaded workqueue to assure work submitted to the queue
* is performed in order. Consider the following 2 possible cases:
*
* 1. A new request arrives and voting work is scheduled for it. Before
* the voting work is performed the request is finished and unvote
* work is also scheduled.
* 2. A request is finished and unvote work is scheduled. Before the
* work is performed a new request arrives and voting work is also
* scheduled.
*
* In both cases a vote work and unvote work wait to be performed.
* If ordering is not guaranteed, then the end state might be the
* opposite of the desired state.
*/
snprintf(wq_name, ARRAY_SIZE(wq_name), "%s_%d", "ufs_pm_qos",
host->hba->host->host_no);
host->pm_qos.workq = create_singlethread_workqueue(wq_name);
if (!host->pm_qos.workq) {
dev_err(host->hba->dev, "%s: failed to create the workqueue\n",
__func__);
ret = -ENOMEM;
goto free_groups;
}
/* Initialization was ok, add all PM QoS requests */
for (i = 0; i < host->pm_qos.num_groups; i++)
pm_qos_add_request(&host->pm_qos.groups[i].req,
PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
/* PM QoS latency sys-fs attribute */
attr = &host->pm_qos.latency_attr;
attr->show = ufs_qcom_pm_qos_latency_show;
attr->store = ufs_qcom_pm_qos_latency_store;
sysfs_attr_init(&attr->attr);
attr->attr.name = "pm_qos_latency_us";
attr->attr.mode = 0644;
if (device_create_file(host->hba->var->dev, attr))
dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos_latency_us\n");
/* PM QoS enable sys-fs attribute */
attr = &host->pm_qos.enable_attr;
attr->show = ufs_qcom_pm_qos_enable_show;
attr->store = ufs_qcom_pm_qos_enable_store;
sysfs_attr_init(&attr->attr);
attr->attr.name = "pm_qos_enable";
attr->attr.mode = 0644;
if (device_create_file(host->hba->var->dev, attr))
dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos enable\n");
host->pm_qos.is_enabled = true;
return 0;
free_groups:
kfree(host->pm_qos.groups);
no_pm_qos:
host->pm_qos.groups = NULL;
return ret ? ret : -ENOTSUPP;
}
static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host)
{
int i;
if (!host->pm_qos.groups)
return;
for (i = 0; i < host->pm_qos.num_groups; i++)
flush_work(&host->pm_qos.groups[i].unvote_work);
}
static void ufs_qcom_pm_qos_remove(struct ufs_qcom_host *host)
{
int i;
if (!host->pm_qos.groups)
return;
for (i = 0; i < host->pm_qos.num_groups; i++)
pm_qos_remove_request(&host->pm_qos.groups[i].req);
destroy_workqueue(host->pm_qos.workq);
kfree(host->pm_qos.groups);
host->pm_qos.groups = NULL;
}
#endif /* CONFIG_SMP */
#define ANDROID_BOOT_DEV_MAX 30
static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
@ -1734,11 +2128,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
goto out_variant_clear;
}
/*
* voting/devoting device ref_clk source is time consuming hence
* skip devoting it during aggressive clock gating. This clock
* will still be gated off during runtime suspend.
*/
host->generic_phy = devm_phy_get(dev, "ufsphy");
if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
@ -1756,6 +2145,10 @@ static int ufs_qcom_init(struct ufs_hba *hba)
goto out_variant_clear;
}
err = ufs_qcom_pm_qos_init(host);
if (err)
dev_info(dev, "%s: PM QoS will be disabled\n", __func__);
/* restore the secure configuration */
ufs_qcom_update_sec_cfg(hba, true);
@ -1842,7 +2235,9 @@ out_disable_phy:
phy_power_off(host->generic_phy);
out_unregister_bus:
phy_exit(host->generic_phy);
msm_bus_scale_unregister_client(host->bus_vote.client_handle);
out_variant_clear:
devm_kfree(dev, host);
ufshcd_set_variant(hba, NULL);
out:
return err;
@ -1852,9 +2247,11 @@ static void ufs_qcom_exit(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
msm_bus_scale_unregister_client(host->bus_vote.client_handle);
ufs_qcom_disable_lane_clks(host);
phy_power_off(host->generic_phy);
phy_exit(host->generic_phy);
ufs_qcom_pm_qos_remove(host);
}
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
@ -2201,6 +2598,7 @@ static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
"HCI Vendor Specific Registers ");
@ -2215,8 +2613,9 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep)
usleep_range(1000, 1100);
ufs_qcom_print_unipro_testbus(hba);
usleep_range(1000, 1100);
ufs_qcom_ice_print_regs(host);
ufs_qcom_phy_dbg_register_dump(phy);
usleep_range(1000, 1100);
ufs_qcom_ice_print_regs(host);
}
/**
@ -2255,10 +2654,16 @@ static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = {
.crypto_engine_get_status = ufs_qcom_crypto_engine_get_status,
};
static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = {
.req_start = ufs_qcom_pm_qos_req_start,
.req_end = ufs_qcom_pm_qos_req_end,
};
static struct ufs_hba_variant ufs_hba_qcom_variant = {
.name = "qcom",
.vops = &ufs_hba_qcom_vops,
.crypto_vops = &ufs_hba_crypto_variant_ops,
.pm_qos_vops = &ufs_hba_pm_qos_variant_ops,
};
/**

View File

@ -14,6 +14,8 @@
#ifndef UFS_QCOM_H_
#define UFS_QCOM_H_
#include <linux/phy/phy.h>
#include <linux/pm_qos.h>
#include "ufshcd.h"
#define MAX_UFS_QCOM_HOSTS 2
@ -258,9 +260,62 @@ struct qcom_debugfs_files {
struct dentry *testbus_cfg;
struct dentry *testbus_bus;
struct dentry *dbg_regs;
struct dentry *pm_qos;
};
#endif
/* PM QoS voting state */
enum ufs_qcom_pm_qos_state {
PM_QOS_UNVOTED,
PM_QOS_VOTED,
PM_QOS_REQ_VOTE,
PM_QOS_REQ_UNVOTE,
};
/**
* struct ufs_qcom_pm_qos_cpu_group - data related to cluster PM QoS voting
* logic
* @req: request object for PM QoS
* @vote_work: work object for voting procedure
* @unvote_work: work object for un-voting procedure
* @host: back pointer to the main structure
* @state: voting state machine current state
* @latency_us: requested latency value used for cluster voting, in
* microseconds
* @mask: cpu mask defined for this cluster
* @active_reqs: number of active requests on this cluster
*/
struct ufs_qcom_pm_qos_cpu_group {
struct pm_qos_request req;
struct work_struct vote_work;
struct work_struct unvote_work;
struct ufs_qcom_host *host;
enum ufs_qcom_pm_qos_state state;
s32 latency_us;
cpumask_t mask;
int active_reqs;
};
/**
* struct ufs_qcom_pm_qos - data related to PM QoS voting logic
* @groups: PM QoS cpu group state array
* @enable_attr: sysfs attribute to enable/disable PM QoS voting logic
* @latency_attr: sysfs attribute to set latency value
* @workq: single threaded workqueue to run PM QoS voting/unvoting
* @num_clusters: number of clusters defined
* @default_cpu: cpu to use for voting for request not specifying a cpu
* @is_enabled: flag specifying whether voting logic is enabled
*/
struct ufs_qcom_pm_qos {
struct ufs_qcom_pm_qos_cpu_group *groups;
struct device_attribute enable_attr;
struct device_attribute latency_attr;
struct workqueue_struct *workq;
int num_groups;
int default_cpu;
bool is_enabled;
};
struct ufs_qcom_host {
/*
* Set this capability if host controller supports the QUniPro mode
@ -296,6 +351,10 @@ struct ufs_qcom_host {
struct clk *tx_l0_sync_clk;
struct clk *rx_l1_sync_clk;
struct clk *tx_l1_sync_clk;
/* PM Quality-of-Service (QoS) data */
struct ufs_qcom_pm_qos pm_qos;
bool disable_lpm;
bool is_lane_clks_enabled;
bool sec_cfg_updated;
@ -318,6 +377,7 @@ struct ufs_qcom_host {
struct work_struct ice_cfg_work;
struct request *req_pending;
struct ufs_vreg *vddp_ref_clk;
bool work_pending;
};
static inline u32

View File

@ -350,7 +350,7 @@ static void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba)
hba->dev_ref_clk_freq = REF_CLK_FREQ_26_MHZ;
}
#ifdef CONFIG_PM
#ifdef CONFIG_SMP
/**
* ufshcd_pltfrm_suspend - suspend power management function
* @dev: pointer to device handle

View File

@ -689,7 +689,7 @@ static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
entry.str = str;
entry.lba = lba;
entry->cmd_id = cmd_id;
entry.cmd_id = cmd_id;
entry.transfer_len = transfer_len;
entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
entry.tag = tag;
@ -940,7 +940,8 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
hba->pm_op_in_progress, hba->is_sys_suspended);
dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
hba->auto_bkops_enabled, hba->host->host_self_blocked);
dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
dev_err(hba->dev, "Clk gate=%d, hibern8 on idle=%d\n",
hba->clk_gating.state, hba->hibern8_on_idle.state);
dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
hba->eh_flags, hba->req_abort_count);
dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
@ -1377,23 +1378,16 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
int ret = 0;
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
ktime_t start = ktime_get();
bool clk_state_changed = false;
if (list_empty(head))
goto out;
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
if (ret)
return ret;
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (scale_up && clki->max_freq) {
if (clki->curr_freq == clki->max_freq)
continue;
clk_state_changed = true;
ret = clk_set_rate(clki->clk, clki->max_freq);
if (ret) {
dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
@ -1412,7 +1406,6 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
if (clki->curr_freq == clki->min_freq)
continue;
clk_state_changed = true;
ret = clk_set_rate(clki->clk, clki->min_freq);
if (ret) {
dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
@ -1431,13 +1424,7 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
clki->name, clk_get_rate(clki->clk));
}
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
out:
if (clk_state_changed)
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
}
@ -1579,11 +1566,12 @@ out:
*/
static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
{
#define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
int ret = 0;
struct ufs_pa_layer_attr new_pwr_info;
u32 scale_down_gear = ufshcd_vops_get_scale_down_gear(hba);
WARN_ON(!hba->clk_scaling.saved_pwr_info.is_valid);
if (scale_up) {
memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
sizeof(struct ufs_pa_layer_attr));
@ -1828,7 +1816,6 @@ static int ufshcd_devfreq_target(struct device *dev,
start = ktime_get();
ret = ufshcd_devfreq_scale(hba, scale_up);
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
@ -3345,9 +3332,10 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
cpu_to_be32(lrbp->cmd->sdb.length);
cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
if (cdb_len < MAX_CDB_SIZE)
memset(ucd_req_ptr->sc.cdb + cdb_len, 0,
(MAX_CDB_SIZE - cdb_len));
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
@ -3663,6 +3651,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (ufshcd_is_hibern8_on_idle_allowed(hba))
WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
/* Vote PM QoS for the request */
ufshcd_vops_pm_qos_req_start(hba, cmd->request);
/* IO svc time latency histogram */
if (hba != NULL && cmd->request != NULL) {
if (hba->latency_hist_enabled) {
@ -3699,6 +3690,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
ufshcd_release_all(hba);
ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
goto out;
}
@ -3706,6 +3698,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (err) {
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
ufshcd_release_all(hba);
ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
goto out;
}
@ -3720,6 +3714,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
ufshcd_release_all(hba);
ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
goto out;
}
@ -3738,6 +3733,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
ufshcd_release_all(hba);
ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
ufshcd_vops_crypto_engine_cfg_end(hba, lrbp, cmd->request);
dev_err(hba->dev, "%s: failed sending command, %d\n",
__func__, err);
@ -4208,7 +4204,7 @@ static int ufshcd_query_attr_retry(struct ufs_hba *hba,
if (ret)
dev_err(hba->dev,
"%s: query attribute, idn %d, failed with error %d after %d retires\n",
__func__, idn, ret, QUERY_REQ_RETRIES);
__func__, idn, ret, retries);
return ret;
}
@ -5154,9 +5150,12 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
} else {
ret = -EAGAIN;
}
} else
} else {
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
POST_CHANGE);
dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
ktime_to_us(ktime_get()));
}
return ret;
}
@ -5202,6 +5201,8 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
} else {
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
POST_CHANGE);
dev_dbg(hba->dev, "%s: Hibern8 Exit at %lld us", __func__,
ktime_to_us(ktime_get()));
hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
hba->ufs_stats.hibern8_exit_cnt++;
}
@ -6221,6 +6222,13 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
__ufshcd_release(hba, false);
__ufshcd_hibern8_release(hba, false);
if (cmd->request) {
/*
* As we are accessing the "request" structure,
* this must be called before calling
* ->scsi_done() callback.
*/
ufshcd_vops_pm_qos_req_end(hba, cmd->request,
false);
ufshcd_vops_crypto_engine_cfg_end(hba,
lrbp, cmd->request);
}
@ -6298,6 +6306,13 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
lrbp->cmd = NULL;
ufshcd_release_all(hba);
if (cmd->request) {
/*
* As we are accessing the "request" structure,
* this must be called before calling
* ->scsi_done() callback.
*/
ufshcd_vops_pm_qos_req_end(hba, cmd->request,
true);
ufshcd_vops_crypto_engine_cfg_end(hba,
lrbp, cmd->request);
}
@ -6687,8 +6702,14 @@ static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
(hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) {
/*
* we have to do error recovery but atleast silence the error
* logs.
*/
hba->silence_err_logs = true;
goto out;
}
if ((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
@ -6706,8 +6727,13 @@ static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
*/
if ((hba->saved_err & INT_FATAL_ERRORS) ||
((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
(hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) {
if (((hba->saved_err & INT_FATAL_ERRORS) ==
DEVICE_FATAL_ERROR) || (hba->saved_uic_err &
~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))
hba->silence_err_logs = true;
goto out;
}
/*
* As DL NAC is the only error received so far, send out NOP
@ -6716,12 +6742,17 @@ static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
* - If we get response then clear the DL NAC error bit.
*/
/* silence the error logs from NOP command */
hba->silence_err_logs = true;
spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_verify_dev_init(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->silence_err_logs = false;
if (err)
if (err) {
hba->silence_err_logs = true;
goto out;
}
/* Link seems to be alive hence ignore the DL NAC errors */
if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
@ -6732,6 +6763,11 @@ static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
err_handling = false;
goto out;
}
/*
* there seems to be some errors other than NAC, so do error
* recovery
*/
hba->silence_err_logs = true;
}
out:
spin_unlock_irqrestore(hba->host->host_lock, flags);
@ -6806,6 +6842,21 @@ static void ufshcd_err_handler(struct work_struct *work)
hba->ufshcd_state = UFSHCD_STATE_RESET;
ufshcd_set_eh_in_progress(hba);
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba);
if (hba->dev_info.quirks &
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
bool ret;
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
ret = ufshcd_quirk_dl_nac_errors(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (!ret)
goto skip_err_handling;
}
/*
* Dump controller state before resetting. Transfer requests state
* will be dump as part of the request completion.
@ -6827,40 +6878,14 @@ static void ufshcd_err_handler(struct work_struct *work)
hba->auto_h8_err = false;
}
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba);
if (hba->dev_info.quirks &
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
bool ret;
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
ret = ufshcd_quirk_dl_nac_errors(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (!ret)
goto skip_err_handling;
}
if ((hba->saved_err & INT_FATAL_ERRORS)
|| hba->saved_ce_err || hba->force_host_reset ||
((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
needs_reset = true;
if (hba->saved_err & INT_FATAL_ERRORS)
ufshcd_update_error_stats(hba,
UFS_ERR_INT_FATAL_ERRORS);
if (hba->saved_ce_err)
ufshcd_update_error_stats(hba, UFS_ERR_CRYPTO_ENGINE);
if (hba->saved_err & UIC_ERROR)
ufshcd_update_error_stats(hba,
UFS_ERR_INT_UIC_ERROR);
}
/*
* if host reset is required then skip clearing the pending
* transfers forcefully because they will automatically get
@ -6901,6 +6926,20 @@ skip_pending_xfer_clear:
if (needs_reset) {
unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
if (hba->saved_err & INT_FATAL_ERRORS)
ufshcd_update_error_stats(hba,
UFS_ERR_INT_FATAL_ERRORS);
if (hba->saved_ce_err)
ufshcd_update_error_stats(hba, UFS_ERR_CRYPTO_ENGINE);
if (hba->saved_err & UIC_ERROR)
ufshcd_update_error_stats(hba,
UFS_ERR_INT_UIC_ERROR);
if (err_xfer || err_tm)
ufshcd_update_error_stats(hba,
UFS_ERR_CLEAR_PEND_XFER_TM);
/*
* ufshcd_reset_and_restore() does the link reinitialization
* which will need atleast one empty doorbell slot to send the
@ -7025,7 +7064,8 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
* To know whether this error is fatal or not, DB timeout
* must be checked but this error is handled separately.
*/
dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
dev_dbg(hba->dev, "%s: UIC Lane error reported, reg 0x%x\n",
__func__, reg);
ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
/*
@ -8085,6 +8125,9 @@ static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
int ret = 0;
u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
if (!ufshcd_is_unipro_pa_params_tuning_req(hba))
return 0;
ret = ufshcd_dme_peer_get(hba,
UIC_ARG_MIB_SEL(
RX_MIN_ACTIVATETIME_CAPABILITY,
@ -8418,10 +8461,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (ret)
goto out;
/* set the default level for urgent bkops */
hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
hba->is_urgent_bkops_lvl_checked = false;
/* Debug counters initialization */
ufshcd_clear_dbg_ufs_stats(hba);
/* set the default level for urgent bkops */
@ -8892,8 +8931,8 @@ static int ufshcd_query_ioctl(struct ufs_hba *hba, u8 lun, void __user *buffer)
default:
goto out_einval;
}
err = ufshcd_query_flag(hba, ioctl_data->opcode, ioctl_data->idn,
&flag);
err = ufshcd_query_flag_retry(hba, ioctl_data->opcode,
ioctl_data->idn, &flag);
break;
default:
goto out_einval;
@ -9004,11 +9043,11 @@ static struct scsi_host_template ufshcd_driver_template = {
.eh_abort_handler = ufshcd_abort,
.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
.eh_host_reset_handler = ufshcd_eh_host_reset_handler,
.eh_timed_out = ufshcd_eh_timed_out,
.ioctl = ufshcd_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ufshcd_ioctl,
#endif
.eh_timed_out = ufshcd_eh_timed_out,
.this_id = -1,
.sg_tablesize = SG_ALL,
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
@ -9330,6 +9369,9 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
out:
if (ret) {
if (on)
/* Can't do much if this fails */
(void) ufshcd_vops_set_bus_vote(hba, false);
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
clk_disable_unprepare(clki->clk);
@ -9521,7 +9563,6 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
if (ret)
pr_err("%s: failed with err %d\n", __func__, ret);
@ -9661,8 +9702,7 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
* To avoid this situation, add 2ms delay before putting these UFS
* rails in LPM mode.
*/
if (!ufshcd_is_link_active(hba) &&
hba->dev_info.quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
if (!ufshcd_is_link_active(hba))
usleep_range(2000, 2100);
/*
@ -9879,9 +9919,9 @@ set_dev_active:
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
hba->hibern8_on_idle.is_suspended = false;
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
hba->hibern8_on_idle.is_suspended = false;
hba->clk_gating.is_suspended = false;
ufshcd_release_all(hba);
out:
@ -10442,9 +10482,14 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_exit_clk_gating(hba);
ufshcd_exit_hibern8_on_idle(hba);
ufshcd_exit_latency_hist(hba);
if (ufshcd_is_clkscaling_supported(hba))
if (ufshcd_is_clkscaling_supported(hba)) {
device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
if (hba->devfreq)
devfreq_remove_device(hba->devfreq);
}
ufshcd_exit_latency_hist(hba);
ufshcd_hba_exit(hba);
ufsdbg_remove_debugfs(hba);
}
@ -10550,6 +10595,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Get UFS version supported by the controller */
hba->ufs_version = ufshcd_get_ufs_version(hba);
/* print error message if ufs_version is not valid */
if ((hba->ufs_version != UFSHCI_VERSION_10) &&
(hba->ufs_version != UFSHCI_VERSION_11) &&
(hba->ufs_version != UFSHCI_VERSION_20) &&

View File

@ -78,6 +78,7 @@
#define UFSHCD_DRIVER_VERSION "0.3"
#define UFS_BIT(x) BIT(x)
#define UFS_MASK(x, y) (x << ((y) % BITS_PER_LONG))
struct ufs_hba;
@ -387,14 +388,24 @@ struct ufs_hba_crypto_variant_ops {
int (*crypto_engine_get_status)(struct ufs_hba *, u32 *);
};
/**
* struct ufs_hba_pm_qos_variant_ops - variant specific PM QoS callbacks
*/
struct ufs_hba_pm_qos_variant_ops {
void (*req_start)(struct ufs_hba *, struct request *);
void (*req_end)(struct ufs_hba *, struct request *, bool);
};
/**
* struct ufs_hba_variant - variant specific parameters
* @name: variant name
*/
struct ufs_hba_variant {
struct device *dev;
const char *name;
struct ufs_hba_variant_ops *vops;
struct ufs_hba_crypto_variant_ops *crypto_vops;
struct ufs_hba_pm_qos_variant_ops *pm_qos_vops;
};
/* clock gating state */
@ -1514,4 +1525,19 @@ static inline int ufshcd_vops_crypto_engine_get_status(struct ufs_hba *hba,
return 0;
}
static inline void ufshcd_vops_pm_qos_req_start(struct ufs_hba *hba,
struct request *req)
{
if (hba->var && hba->var->pm_qos_vops &&
hba->var->pm_qos_vops->req_start)
hba->var->pm_qos_vops->req_start(hba, req);
}
static inline void ufshcd_vops_pm_qos_req_end(struct ufs_hba *hba,
struct request *req, bool lock)
{
if (hba->var && hba->var->pm_qos_vops && hba->var->pm_qos_vops->req_end)
hba->var->pm_qos_vops->req_end(hba, req, lock);
}
#endif /* End of Header */

View File

@ -93,8 +93,6 @@ enum {
MASK_CRYPTO_SUPPORT = 0x10000000,
};
#define UFS_MASK(mask, offset) ((mask) << (offset))
/* UFS Version 08h */
#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)