Merge "msm: kgsl: Do GX GBIF halt only if GX is on"

This commit is contained in:
qctecmdr 2020-09-18 23:33:18 -07:00 committed by Gerrit - the friendly Code Review server
commit 8209738692
3 changed files with 84 additions and 33 deletions

View File

@ -1144,6 +1144,39 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device)
}
#define A6XX_VBIF_XIN_HALT_CTRL1_ACKS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
static void do_gbif_halt(struct kgsl_device *device, u32 reg, u32 ack_reg,
u32 mask, const char *client)
{
u32 ack;
unsigned long t;
kgsl_regwrite(device, reg, mask);
t = jiffies + msecs_to_jiffies(100);
do {
kgsl_regread(device, ack_reg, &ack);
if ((ack & mask) == mask)
return;
/*
* If we are attempting recovery in case of stall-on-fault
* then the halt sequence will not complete as long as SMMU
* is stalled.
*/
kgsl_mmu_pagefault_resume(&device->mmu);
usleep_range(10, 100);
} while (!time_after(jiffies, t));
/* Check one last time */
kgsl_mmu_pagefault_resume(&device->mmu);
kgsl_regread(device, ack_reg, &ack);
if ((ack & mask) == mask)
return;
dev_err(device->dev, "%s GBIF halt timed out\n", client);
}
static void a6xx_llm_glm_handshake(struct kgsl_device *device)
{
@ -1209,6 +1242,27 @@ static int a6xx_gmu_suspend(struct kgsl_device *device)
gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 1);
if (adreno_has_gbif(adreno_dev)) {
struct adreno_gpudev *gpudev =
ADRENO_GPU_DEVICE(adreno_dev);
/* Halt GX traffic */
if (a6xx_gmu_gx_is_on(adreno_dev))
do_gbif_halt(device, A6XX_RBBM_GBIF_HALT,
A6XX_RBBM_GBIF_HALT_ACK,
gpudev->gbif_gx_halt_mask,
"GX");
/* Halt CX traffic */
do_gbif_halt(device, A6XX_GBIF_HALT, A6XX_GBIF_HALT_ACK,
gpudev->gbif_arb_halt_mask, "CX");
}
if (a6xx_gmu_gx_is_on(adreno_dev))
kgsl_regwrite(device, A6XX_RBBM_SW_RESET_CMD, 0x1);
/* Allow the software reset to complete */
udelay(100);
/*
* This is based on the assumption that GMU is the only one controlling
* the GX HS. This code path is the only client voting for GX through

View File

@ -854,8 +854,6 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
if (pt->name == KGSL_MMU_SECURE_PT)
ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
ctx->fault = 1;
if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
&adreno_dev->ft_pf_policy) &&
(flags & IOMMU_FAULT_TRANSACTION_STALLED)) {
@ -948,6 +946,9 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_CFIE_SHIFT);
KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
/* This is used by reset/recovery path */
ctx->stalled_on_fault = true;
adreno_set_gpu_fault(adreno_dev, ADRENO_IOMMU_PAGE_FAULT);
/* Go ahead with recovery*/
adreno_dispatcher_schedule(device);
@ -2076,7 +2077,7 @@ static void kgsl_iommu_clear_fsr(struct kgsl_mmu *mmu)
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
unsigned int sctlr_val;
if (ctx->default_pt != NULL) {
if (ctx->default_pt != NULL && ctx->stalled_on_fault) {
kgsl_iommu_enable_clk(mmu);
KGSL_IOMMU_SET_CTX_REG(ctx, FSR, 0xffffffff);
/*
@ -2093,6 +2094,7 @@ static void kgsl_iommu_clear_fsr(struct kgsl_mmu *mmu)
*/
wmb();
kgsl_iommu_disable_clk(mmu);
ctx->stalled_on_fault = false;
}
}
@ -2100,36 +2102,31 @@ static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
{
struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
unsigned int fsr_val;
if (ctx->default_pt != NULL && ctx->fault) {
while (1) {
KGSL_IOMMU_SET_CTX_REG(ctx, FSR, 0xffffffff);
/*
* Make sure the above register write
* is not reordered across the barrier
* as we use writel_relaxed to write it.
*/
wmb();
if (ctx->default_pt != NULL && ctx->stalled_on_fault) {
/*
* This will only clear fault bits in FSR. FSR.SS will still
* be set. Writing to RESUME (below) is the only way to clear
* FSR.SS bit.
*/
KGSL_IOMMU_SET_CTX_REG(ctx, FSR, 0xffffffff);
/*
* Make sure the above register write is not reordered across
* the barrier as we use writel_relaxed to write it.
*/
wmb();
/*
* Write 1 to RESUME.TnR to terminate the
* stalled transaction.
*/
KGSL_IOMMU_SET_CTX_REG(ctx, RESUME, 1);
/*
* Make sure the above register writes
* are not reordered across the barrier
* as we use writel_relaxed to write them
*/
wmb();
/*
* Write 1 to RESUME.TnR to terminate the stalled transaction.
* This will also allow the SMMU to process new transactions.
*/
KGSL_IOMMU_SET_CTX_REG(ctx, RESUME, 1);
/*
* Make sure the above register writes are not reordered across
* the barrier as we use writel_relaxed to write them.
*/
wmb();
udelay(5);
fsr_val = KGSL_IOMMU_GET_CTX_REG(ctx, FSR);
if (!(fsr_val & (1 << KGSL_IOMMU_FSR_SS_SHIFT)))
break;
}
ctx->fault = 0;
}
}

View File

@ -100,8 +100,8 @@ enum kgsl_iommu_context_id {
* @cb_num: The hardware context bank number, used for calculating register
* offsets.
* @kgsldev: The kgsl device that uses this context.
* @fault: Flag when set indicates that this iommu device has caused a page
* fault
* @stalled_on_fault: Flag when set indicates that this iommu device is stalled
* on a page fault
* @gpu_offset: Offset of this context bank in the GPU register space
* @default_pt: The default pagetable for this context,
* it may be changed by self programming.
@ -112,7 +112,7 @@ struct kgsl_iommu_context {
enum kgsl_iommu_context_id id;
unsigned int cb_num;
struct kgsl_device *kgsldev;
int fault;
bool stalled_on_fault;
void __iomem *regbase;
unsigned int gpu_offset;
struct kgsl_pagetable *default_pt;