Merge "msm: kgsl: Do not access GMU_HFI_ON flag in power control"

This commit is contained in:
qctecmdr Service 2018-07-31 22:16:45 -07:00 committed by Gerrit - the friendly Code Review server
commit 9a7f4ba893
9 changed files with 103 additions and 227 deletions

View File

@ -2135,7 +2135,7 @@ static int adreno_stop(struct kgsl_device *device)
gmu_dev_ops->oob_clear(adreno_dev, oob_gpu);
if (gmu_core_regulator_isenabled(device)) {
/* GPU is on. Try recovery */
gmu_core_setbit(device, GMU_FAULT);
set_bit(GMU_FAULT, &device->gmu_core.flags);
gmu_core_snapshot(device);
error = -EINVAL;
} else {
@ -2177,7 +2177,7 @@ static int adreno_stop(struct kgsl_device *device)
if (!error && GMU_DEV_OP_VALID(gmu_dev_ops, wait_for_lowest_idle) &&
gmu_dev_ops->wait_for_lowest_idle(adreno_dev)) {
gmu_core_setbit(device, GMU_FAULT);
set_bit(GMU_FAULT, &device->gmu_core.flags);
gmu_core_snapshot(device);
/*
* Assume GMU hang after 10ms without responding.

View File

@ -373,7 +373,7 @@ static int a6xx_rpmh_power_on_gpu(struct kgsl_device *device)
int val;
/* Only trigger wakeup sequence if sleep sequence was done earlier */
if (!test_bit(GMU_RSCC_SLEEP_SEQ_DONE, &gmu->flags))
if (!test_bit(GMU_RSCC_SLEEP_SEQ_DONE, &device->gmu_core.flags))
return 0;
gmu_core_regread(device, A6XX_GPU_CC_GX_DOMAIN_MISC, &val);
@ -406,7 +406,7 @@ static int a6xx_rpmh_power_on_gpu(struct kgsl_device *device)
gmu_core_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
/* Clear sleep sequence flag as wakeup sequence is successful */
clear_bit(GMU_RSCC_SLEEP_SEQ_DONE, &gmu->flags);
clear_bit(GMU_RSCC_SLEEP_SEQ_DONE, &device->gmu_core.flags);
/* Enable the power counter because it was disabled before slumber */
gmu_core_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
@ -423,7 +423,7 @@ static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int ret;
if (test_bit(GMU_RSCC_SLEEP_SEQ_DONE, &gmu->flags))
if (test_bit(GMU_RSCC_SLEEP_SEQ_DONE, &device->gmu_core.flags))
return 0;
/* RSC sleep sequence is different on v1 */
@ -468,7 +468,7 @@ static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
gmu_core_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 0);
set_bit(GMU_RSCC_SLEEP_SEQ_DONE, &gmu->flags);
set_bit(GMU_RSCC_SLEEP_SEQ_DONE, &device->gmu_core.flags);
return 0;
}
@ -955,7 +955,8 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
/* Turn on TCM retention */
gmu_core_regwrite(device, A6XX_GMU_GENERAL_7, 1);
if (!test_and_set_bit(GMU_BOOT_INIT_DONE, &gmu->flags))
if (!test_and_set_bit(GMU_BOOT_INIT_DONE,
&device->gmu_core.flags))
ret = _load_gmu_rpmh_ucode(device);
else
ret = a6xx_rpmh_power_on_gpu(device);

View File

@ -656,7 +656,7 @@ static inline int kgsl_state_is_awake(struct kgsl_device *device)
device->state == KGSL_STATE_AWARE)
return true;
else if (gmu_core_isenabled(device) &&
gmu_core_testbit(device, GMU_CLK_ON))
test_bit(GMU_CLK_ON, &device->gmu_core.flags))
return true;
else
return false;

View File

@ -24,9 +24,7 @@
#include "kgsl_device.h"
#include "kgsl_gmu.h"
#include "kgsl_hfi.h"
#include "a6xx_reg.h"
#include "adreno.h"
#include "kgsl_trace.h"
#define GMU_CONTEXT_USER 0
#define GMU_CONTEXT_KERNEL 1
@ -611,6 +609,18 @@ static int gmu_dcvs_set(struct kgsl_device *device,
.bw = INVALID_DCVS_IDX,
};
/* Do not set to XO and lower GPU clock vote from GMU */
if ((gpu_pwrlevel != INVALID_DCVS_IDX) &&
(gpu_pwrlevel >= gmu->num_gpupwrlevels - 1))
return -EINVAL;
/* If GMU has not been started, save it */
if (!test_bit(GMU_HFI_ON, &device->gmu_core.flags)) {
/* store clock change request */
set_bit(GMU_DCVS_REPLAY, &device->gmu_core.flags);
return 0;
}
if (gpu_pwrlevel < gmu->num_gpupwrlevels - 1)
req.freq = gmu->num_gpupwrlevels - gpu_pwrlevel - 1;
@ -619,13 +629,15 @@ static int gmu_dcvs_set(struct kgsl_device *device,
/* GMU will vote for slumber levels through the sleep sequence */
if ((req.freq == INVALID_DCVS_IDX) &&
(req.bw == INVALID_DCVS_IDX))
(req.bw == INVALID_DCVS_IDX)) {
clear_bit(GMU_DCVS_REPLAY, &device->gmu_core.flags);
return 0;
}
if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
ret = gmu_dev_ops->rpmh_gpu_pwrctrl(adreno_dev,
GMU_DCVS_NOHFI, req.freq, req.bw);
else if (test_bit(GMU_HFI_ON, &gmu->flags))
else if (test_bit(GMU_HFI_ON, &device->gmu_core.flags))
ret = hfi_send_req(gmu, H2F_MSG_GX_BW_PERF_VOTE, &req);
if (ret) {
@ -637,6 +649,8 @@ static int gmu_dcvs_set(struct kgsl_device *device,
adreno_dispatcher_schedule(device);
}
/* indicate actual clock change */
clear_bit(GMU_DCVS_REPLAY, &device->gmu_core.flags);
return ret;
}
@ -1076,8 +1090,9 @@ static int gmu_pwrlevel_probe(struct gmu_device *gmu, struct device_node *node)
return 0;
}
static int gmu_reg_probe(struct gmu_device *gmu)
static int gmu_reg_probe(struct kgsl_device *device)
{
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
struct resource *res;
res = platform_get_resource_byname(gmu->pdev, IORESOURCE_MEM,
@ -1097,10 +1112,9 @@ static int gmu_reg_probe(struct gmu_device *gmu)
gmu->reg_phys = res->start;
gmu->reg_len = resource_size(res);
gmu->reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
resource_size(res));
if (gmu->reg_virt == NULL) {
device->gmu_core.reg_virt = devm_ioremap(&gmu->pdev->dev,
res->start, resource_size(res));
if (device->gmu_core.reg_virt == NULL) {
dev_err(&gmu->pdev->dev, "kgsl_gmu_reg ioremap failed\n");
return -ENODEV;
}
@ -1264,8 +1278,7 @@ static int gmu_irq_probe(struct kgsl_device *device, struct gmu_device *gmu)
}
/* Do not access any GMU registers in GMU probe function */
static int gmu_probe(struct kgsl_device *device,
struct device_node *node, unsigned long flags)
static int gmu_probe(struct kgsl_device *device, struct device_node *node)
{
struct gmu_device *gmu;
struct gmu_memdesc *mem_addr = NULL;
@ -1279,11 +1292,10 @@ static int gmu_probe(struct kgsl_device *device,
if (gmu == NULL)
return -ENOMEM;
device->gmu_core.ptr = (void *)gmu;
hfi = &gmu->hfi;
gmu->load_mode = TCM_BOOT;
gmu->ver = ~0U;
gmu->flags = flags;
gmu->pdev = of_find_device_by_node(node);
of_dma_configure(&gmu->pdev->dev, node);
@ -1305,7 +1317,7 @@ static int gmu_probe(struct kgsl_device *device,
mem_addr = gmu->hfi_mem;
/* Map and reserve GMU CSRs registers */
ret = gmu_reg_probe(gmu);
ret = gmu_reg_probe(device);
if (ret)
goto error;
@ -1372,9 +1384,8 @@ static int gmu_probe(struct kgsl_device *device,
/* disable LM during boot time */
clear_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag);
set_bit(GMU_ENABLED, &gmu->flags);
set_bit(GMU_ENABLED, &device->gmu_core.flags);
device->gmu_core.ptr = (void *)gmu;
device->gmu_core.dev_ops = &adreno_a6xx_gmudev;
return 0;
@ -1384,8 +1395,9 @@ error:
return ret;
}
static int gmu_enable_clks(struct gmu_device *gmu)
static int gmu_enable_clks(struct kgsl_device *device)
{
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
int ret, j = 0;
if (IS_ERR_OR_NULL(gmu->clks[0]))
@ -1409,12 +1421,13 @@ static int gmu_enable_clks(struct gmu_device *gmu)
j++;
}
set_bit(GMU_CLK_ON, &gmu->flags);
set_bit(GMU_CLK_ON, &device->gmu_core.flags);
return 0;
}
static int gmu_disable_clks(struct gmu_device *gmu)
static int gmu_disable_clks(struct kgsl_device *device)
{
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
int j = 0;
if (IS_ERR_OR_NULL(gmu->clks[0]))
@ -1425,7 +1438,7 @@ static int gmu_disable_clks(struct gmu_device *gmu)
j++;
}
clear_bit(GMU_CLK_ON, &gmu->flags);
clear_bit(GMU_CLK_ON, &device->gmu_core.flags);
return 0;
}
@ -1488,7 +1501,7 @@ static int gmu_suspend(struct kgsl_device *device)
struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device);
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
if (!test_bit(GMU_CLK_ON, &gmu->flags))
if (!test_bit(GMU_CLK_ON, &device->gmu_core.flags))
return 0;
/* Pending message in all queues are abandoned */
@ -1498,7 +1511,7 @@ static int gmu_suspend(struct kgsl_device *device)
if (gmu_dev_ops->rpmh_gpu_pwrctrl(adreno_dev, GMU_SUSPEND, 0, 0))
return -EINVAL;
gmu_disable_clks(gmu);
gmu_disable_clks(device);
gmu_disable_gdsc(gmu);
dev_err(&gmu->pdev->dev, "Suspended GMU\n");
return 0;
@ -1547,9 +1560,9 @@ static int gmu_start(struct kgsl_device *device)
switch (device->state) {
case KGSL_STATE_INIT:
case KGSL_STATE_SUSPEND:
WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
WARN_ON(test_bit(GMU_CLK_ON, &device->gmu_core.flags));
gmu_enable_gdsc(gmu);
gmu_enable_clks(gmu);
gmu_enable_clks(device);
gmu_dev_ops->irq_enable(device);
/* Vote for 300MHz DDR for GMU to init */
@ -1574,9 +1587,9 @@ static int gmu_start(struct kgsl_device *device)
break;
case KGSL_STATE_SLUMBER:
WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
WARN_ON(test_bit(GMU_CLK_ON, &device->gmu_core.flags));
gmu_enable_gdsc(gmu);
gmu_enable_clks(gmu);
gmu_enable_clks(device);
gmu_dev_ops->irq_enable(device);
ret = gmu_dev_ops->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
@ -1593,10 +1606,10 @@ static int gmu_start(struct kgsl_device *device)
case KGSL_STATE_RESET:
if (test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv) ||
test_bit(GMU_FAULT, &gmu->flags)) {
test_bit(GMU_FAULT, &device->gmu_core.flags)) {
gmu_suspend(device);
gmu_enable_gdsc(gmu);
gmu_enable_clks(gmu);
gmu_enable_clks(device);
gmu_dev_ops->irq_enable(device);
ret = gmu_dev_ops->rpmh_gpu_pwrctrl(
@ -1646,7 +1659,7 @@ static void gmu_stop(struct kgsl_device *device)
struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device);
int ret = 0;
if (!test_bit(GMU_CLK_ON, &gmu->flags))
if (!test_bit(GMU_CLK_ON, &device->gmu_core.flags))
return;
/* Wait for the lowest idle level we requested */
@ -1668,7 +1681,7 @@ static void gmu_stop(struct kgsl_device *device)
hfi_stop(gmu);
gmu_dev_ops->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
gmu_disable_clks(gmu);
gmu_disable_clks(device);
gmu_disable_gdsc(gmu);
msm_bus_scale_client_update_request(gmu->pcl, 0);
@ -1680,7 +1693,7 @@ error:
* Set GMU_FAULT flag to indicate to power contrller
* that hang recovery is needed to power on GPU
*/
set_bit(GMU_FAULT, &gmu->flags);
set_bit(GMU_FAULT, &device->gmu_core.flags);
dev_err(&gmu->pdev->dev, "Failed to stop GMU\n");
gmu_core_snapshot(device);
}
@ -1727,11 +1740,6 @@ static void gmu_remove(struct kgsl_device *device)
gmu->pcl = 0;
}
if (gmu->reg_virt) {
devm_iounmap(&gmu->pdev->dev, gmu->reg_virt);
gmu->reg_virt = NULL;
}
gmu_memory_close(gmu);
for (i = 0; i < MAX_GMU_CLKS; i++) {
@ -1751,86 +1759,12 @@ static void gmu_remove(struct kgsl_device *device)
gmu->cx_gdsc = NULL;
}
gmu->flags = 0;
device->gmu_core.flags = 0;
device->gmu_core.ptr = NULL;
gmu->pdev = NULL;
kfree(gmu);
}
static void gmu_regwrite(struct kgsl_device *device,
unsigned int offsetwords, unsigned int value)
{
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
void __iomem *reg;
trace_kgsl_regwrite(device, offsetwords, value);
offsetwords -= device->gmu_core.gmu2gpu_offset;
reg = gmu->reg_virt + (offsetwords << 2);
/*
* ensure previous writes post before this one,
* i.e. act like normal writel()
*/
wmb();
__raw_writel(value, reg);
}
static void gmu_regread(struct kgsl_device *device,
unsigned int offsetwords, unsigned int *value)
{
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
void __iomem *reg;
offsetwords -= device->gmu_core.gmu2gpu_offset;
reg = gmu->reg_virt + (offsetwords << 2);
*value = __raw_readl(reg);
/*
* ensure this read finishes before the next one.
* i.e. act like normal readl()
*/
rmb();
}
/* Check if GPMU is in charge of power features */
static bool gmu_gpmu_isenabled(struct kgsl_device *device)
{
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
return test_bit(GMU_GPMU, &gmu->flags);
}
/* Check if GMU is enabled. Only set once GMU is fully initialized */
static bool gmu_isenabled(struct kgsl_device *device)
{
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
return test_bit(GMU_ENABLED, &gmu->flags);
}
static void gmu_set_bit(struct kgsl_device *device, enum gmu_core_flags flag)
{
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
set_bit(flag, &gmu->flags);
}
static void gmu_clear_bit(struct kgsl_device *device, enum gmu_core_flags flag)
{
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
clear_bit(flag, &gmu->flags);
}
static int gmu_test_bit(struct kgsl_device *device, enum gmu_core_flags flag)
{
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
return test_bit(flag, &gmu->flags);
}
static bool gmu_regulator_isenabled(struct kgsl_device *device)
{
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
@ -1838,19 +1772,11 @@ static bool gmu_regulator_isenabled(struct kgsl_device *device)
return (gmu->gx_gdsc && regulator_is_enabled(gmu->gx_gdsc));
}
struct gmu_core_ops gmu_ops = {
.probe = gmu_probe,
.remove = gmu_remove,
.regread = gmu_regread,
.regwrite = gmu_regwrite,
.isenabled = gmu_isenabled,
.gpmu_isenabled = gmu_gpmu_isenabled,
.start = gmu_start,
.stop = gmu_stop,
.set_bit = gmu_set_bit,
.clear_bit = gmu_clear_bit,
.test_bit = gmu_test_bit,
.dcvs_set = gmu_dcvs_set,
.snapshot = gmu_snapshot,
.regulator_isenabled = gmu_regulator_isenabled,

View File

@ -103,7 +103,6 @@ enum gmu_load_mode {
* struct gmu_device - GMU device structure
* @ver: GMU FW version, read from GMU
* @reg_phys: GMU CSR physical address
* @reg_virt: GMU CSR virtual address
* @reg_len: GMU CSR range
* @gmu_interrupt_num: GMU interrupt number
* @fw_image: descriptor of GMU memory that has GMU image in it
@ -127,7 +126,6 @@ enum gmu_load_mode {
* @gx_gdsc: GX headswitch that controls power of GPU subsystem
* @clks: GPU subsystem clocks required for GMU functionality
* @load_mode: GMU FW load/boot mode
* @flags: GMU flags
* @wakeup_pwrlevel: GPU wake up power/DCVS level in case different
* than default power level
* @pcl: GPU BW scaling client
@ -139,7 +137,6 @@ struct gmu_device {
unsigned int ver;
struct platform_device *pdev;
unsigned long reg_phys;
void __iomem *reg_virt;
unsigned int reg_len;
unsigned int gmu_interrupt_num;
struct gmu_memdesc cached_fw_image;
@ -165,7 +162,6 @@ struct gmu_device {
struct regulator *gx_gdsc;
struct clk *clks[MAX_GMU_CLKS];
enum gmu_load_mode load_mode;
unsigned long flags;
unsigned int wakeup_pwrlevel;
unsigned int pcl;
unsigned int ccl;

View File

@ -15,7 +15,7 @@
#include "kgsl_device.h"
#include "kgsl_gmu_core.h"
#include "a6xx_reg.h"
#include "kgsl_trace.h"
#include "adreno.h"
#undef MODULE_PARAM_PREFIX
@ -36,11 +36,10 @@ int gmu_core_probe(struct kgsl_device *device)
{
struct device_node *node;
struct gmu_core_ops *gmu_core_ops;
unsigned long flags;
int i = 0, ret = -ENXIO;
flags = ADRENO_FEATURE(ADRENO_DEVICE(device), ADRENO_GPMU) ?
BIT(GMU_GPMU) : 0;
device->gmu_core.flags = ADRENO_FEATURE(ADRENO_DEVICE(device),
ADRENO_GPMU) ? BIT(GMU_GPMU) : 0;
for (i = 0; i < ARRAY_SIZE(gmu_subtypes); i++) {
node = of_find_compatible_node(device->pdev->dev.of_node,
@ -53,14 +52,14 @@ int gmu_core_probe(struct kgsl_device *device)
/* No GMU in dt, no worries...hopefully */
if (node == NULL) {
/* If we are trying to use GPMU and no GMU, that's bad */
if (flags & BIT(GMU_GPMU))
if (device->gmu_core.flags & BIT(GMU_GPMU))
return ret;
/* Otherwise it's ok and nothing to do */
return 0;
}
if (gmu_core_ops && gmu_core_ops->probe) {
ret = gmu_core_ops->probe(device, node, flags);
ret = gmu_core_ops->probe(device, node);
if (ret == 0)
device->gmu_core.core_ops = gmu_core_ops;
}
@ -78,22 +77,12 @@ void gmu_core_remove(struct kgsl_device *device)
bool gmu_core_isenabled(struct kgsl_device *device)
{
struct gmu_core_ops *gmu_core_ops = GMU_CORE_OPS(device);
if (gmu_core_ops && gmu_core_ops->isenabled)
return !nogmu && gmu_core_ops->isenabled(device);
return false;
return test_bit(GMU_ENABLED, &device->gmu_core.flags);
}
bool gmu_core_gpmu_isenabled(struct kgsl_device *device)
{
struct gmu_core_ops *gmu_core_ops = GMU_CORE_OPS(device);
if (gmu_core_ops && gmu_core_ops->gpmu_isenabled)
return gmu_core_ops->gpmu_isenabled(device);
return false;
return test_bit(GMU_GPMU, &device->gmu_core.flags);
}
int gmu_core_start(struct kgsl_device *device)
@ -143,32 +132,6 @@ int gmu_core_dcvs_set(struct kgsl_device *device, unsigned int gpu_pwrlevel,
return -EINVAL;
}
void gmu_core_setbit(struct kgsl_device *device, enum gmu_core_flags flag)
{
struct gmu_core_ops *gmu_core_ops = GMU_CORE_OPS(device);
if (gmu_core_ops && gmu_core_ops->set_bit)
return gmu_core_ops->set_bit(device, flag);
}
void gmu_core_clearbit(struct kgsl_device *device, enum gmu_core_flags flag)
{
struct gmu_core_ops *gmu_core_ops = GMU_CORE_OPS(device);
if (gmu_core_ops && gmu_core_ops->clear_bit)
return gmu_core_ops->clear_bit(device, flag);
}
int gmu_core_testbit(struct kgsl_device *device, enum gmu_core_flags flag)
{
struct gmu_core_ops *gmu_core_ops = GMU_CORE_OPS(device);
if (gmu_core_ops && gmu_core_ops->test_bit)
return gmu_core_ops->test_bit(device, flag);
return -EINVAL;
}
bool gmu_core_regulator_isenabled(struct kgsl_device *device)
{
struct gmu_core_ops *gmu_core_ops = GMU_CORE_OPS(device);
@ -191,31 +154,47 @@ bool gmu_core_is_register_offset(struct kgsl_device *device,
void gmu_core_regread(struct kgsl_device *device, unsigned int offsetwords,
unsigned int *value)
{
struct gmu_core_ops *gmu_core_ops = GMU_CORE_OPS(device);
void __iomem *reg;
if (!gmu_core_is_register_offset(device, offsetwords)) {
WARN(1, "Out of bounds register read: 0x%x\n", offsetwords);
return;
}
if (gmu_core_ops && gmu_core_ops->regread)
gmu_core_ops->regread(device, offsetwords, value);
else
*value = 0;
offsetwords -= device->gmu_core.gmu2gpu_offset;
reg = device->gmu_core.reg_virt + (offsetwords << 2);
*value = __raw_readl(reg);
/*
* ensure this read finishes before the next one.
* i.e. act like normal readl()
*/
rmb();
}
void gmu_core_regwrite(struct kgsl_device *device, unsigned int offsetwords,
unsigned int value)
{
struct gmu_core_ops *gmu_core_ops = GMU_CORE_OPS(device);
void __iomem *reg;
if (!gmu_core_is_register_offset(device, offsetwords)) {
WARN(1, "Out of bounds register write: 0x%x\n", offsetwords);
return;
}
if (gmu_core_ops && gmu_core_ops->regwrite)
gmu_core_ops->regwrite(device, offsetwords, value);
trace_kgsl_regwrite(device, offsetwords, value);
offsetwords -= device->gmu_core.gmu2gpu_offset;
reg = device->gmu_core.reg_virt + (offsetwords << 2);
/*
* ensure previous writes post before this one,
* i.e. act like normal writel()
*/
wmb();
__raw_writel(value, reg);
}
void gmu_core_regrmw(struct kgsl_device *device,

View File

@ -118,20 +118,10 @@ struct adreno_device;
struct kgsl_snapshot;
struct gmu_core_ops {
int (*probe)(struct kgsl_device *device, struct device_node *node,
unsigned long flags);
int (*probe)(struct kgsl_device *device, struct device_node *node);
void (*remove)(struct kgsl_device *device);
void (*regread)(struct kgsl_device *device,
unsigned int offsetwords, unsigned int *value);
void (*regwrite)(struct kgsl_device *device,
unsigned int offsetwords, unsigned int value);
bool (*isenabled)(struct kgsl_device *device);
bool (*gpmu_isenabled)(struct kgsl_device *device);
int (*dcvs_set)(struct kgsl_device *device,
unsigned int gpu_pwrlevel, unsigned int bus_level);
void (*set_bit)(struct kgsl_device *device, enum gmu_core_flags flag);
void (*clear_bit)(struct kgsl_device *device, enum gmu_core_flags flag);
int (*test_bit)(struct kgsl_device *device, enum gmu_core_flags flag);
int (*start)(struct kgsl_device *device);
void (*stop)(struct kgsl_device *device);
void (*snapshot)(struct kgsl_device *device);
@ -169,15 +159,19 @@ struct gmu_dev_ops {
* and GPU register set, the offset will be used when accessing
* gmu registers using offset defined in GPU register space.
* @reg_len: GMU registers length
* @reg_virt: GMU CSR virtual address
* @core_ops: Pointer to gmu core operations
* @dev_ops: Pointer to gmu device operations
* @flags: GMU flags
*/
struct gmu_core_device {
void *ptr;
unsigned int gmu2gpu_offset;
unsigned int reg_len;
void __iomem *reg_virt;
struct gmu_core_ops *core_ops;
struct gmu_dev_ops *dev_ops;
unsigned long flags;
};
/* GMU core functions */
@ -193,9 +187,6 @@ bool gmu_core_gpmu_isenabled(struct kgsl_device *device);
bool gmu_core_isenabled(struct kgsl_device *device);
int gmu_core_dcvs_set(struct kgsl_device *device, unsigned int gpu_pwrlevel,
unsigned int bus_level);
void gmu_core_setbit(struct kgsl_device *device, enum gmu_core_flags flag);
void gmu_core_clearbit(struct kgsl_device *device, enum gmu_core_flags flag);
int gmu_core_testbit(struct kgsl_device *device, enum gmu_core_flags flag);
bool gmu_core_regulator_isenabled(struct kgsl_device *device);
bool gmu_core_is_register_offset(struct kgsl_device *device,
unsigned int offsetwords);

View File

@ -684,7 +684,7 @@ int hfi_start(struct kgsl_device *device,
struct hfi_queue_header *hdr;
int result, i;
if (test_bit(GMU_HFI_ON, &gmu->flags))
if (test_bit(GMU_HFI_ON, &device->gmu_core.flags))
return 0;
/* Force read_index to the write_index no matter what */
@ -742,7 +742,7 @@ int hfi_start(struct kgsl_device *device,
return result;
}
}
set_bit(GMU_HFI_ON, &gmu->flags);
set_bit(GMU_HFI_ON, &device->gmu_core.flags);
return 0;
}
@ -751,10 +751,12 @@ void hfi_stop(struct gmu_device *gmu)
struct gmu_memdesc *mem_addr = gmu->hfi_mem;
struct hfi_queue_table *tbl = mem_addr->hostptr;
struct hfi_queue_header *hdr;
struct kgsl_hfi *hfi = &gmu->hfi;
struct kgsl_device *device = hfi->kgsldev;
unsigned int i;
if (!test_bit(GMU_HFI_ON, &gmu->flags))
if (!test_bit(GMU_HFI_ON, &device->gmu_core.flags))
return;
/* Flush HFI queues */
@ -769,7 +771,7 @@ void hfi_stop(struct gmu_device *gmu)
i, hdr->read_index, hdr->write_index);
}
clear_bit(GMU_HFI_ON, &gmu->flags);
clear_bit(GMU_HFI_ON, &device->gmu_core.flags);
}
/* Entry point for external HFI requests */

View File

@ -251,28 +251,9 @@ int kgsl_clk_set_rate(struct kgsl_device *device,
int ret = 0;
/* GMU scales GPU freq */
if (gmu_core_gpmu_isenabled(device)) {
int num_gpupwrlevels = pwr->num_pwrlevels;
/* If GMU has not been started, save it */
if (!gmu_core_testbit(device, GMU_HFI_ON)) {
/* store clock change request */
gmu_core_setbit(device, GMU_DCVS_REPLAY);
return 0;
}
if (num_gpupwrlevels < 0)
return -EINVAL;
/* If the GMU is on we cannot vote for the lowest level */
if (pwrlevel == (num_gpupwrlevels - 1)) {
WARN(1, "Cannot set 0 GPU frequency with GMU\n");
return -EINVAL;
}
if (gmu_core_gpmu_isenabled(device))
ret = gmu_core_dcvs_set(device, pwrlevel, INVALID_DCVS_IDX);
/* indicate actual clock change */
gmu_core_clearbit(device, GMU_DCVS_REPLAY);
} else
else
/* Linux clock driver scales GPU freq */
ret = kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[0],
pl->gpu_freq, clocks[0]);
@ -444,7 +425,7 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
kgsl_pwrctrl_set_thermal_cycle(device, new_level);
if (new_level == old_level &&
!gmu_core_testbit(device, GMU_DCVS_REPLAY))
!test_bit(GMU_DCVS_REPLAY, &device->gmu_core.flags))
return;
kgsl_pwrscale_update_stats(device);
@ -2793,7 +2774,7 @@ _aware(struct kgsl_device *device)
case KGSL_STATE_SLUMBER:
/* if GMU already in FAULT */
if (gmu_core_isenabled(device) &&
gmu_core_testbit(device, GMU_FAULT)) {
test_bit(GMU_FAULT, &device->gmu_core.flags)) {
status = -EINVAL;
break;
}
@ -2808,7 +2789,7 @@ _aware(struct kgsl_device *device)
if (gmu_core_isenabled(device)) {
/* GMU hang recovery */
kgsl_pwrctrl_set_state(device, KGSL_STATE_RESET);
gmu_core_setbit(device, GMU_FAULT);
set_bit(GMU_FAULT, &device->gmu_core.flags);
status = kgsl_pwrctrl_enable(device);
if (status) {
/*
@ -2844,7 +2825,7 @@ _aware(struct kgsl_device *device)
KGSL_STATE_AWARE);
}
gmu_core_clearbit(device, GMU_FAULT);
clear_bit(GMU_FAULT, &device->gmu_core.flags);
return status;
}