Merge tag 'LA.UM.9.1.r1-16300-SMxxx0.QSSI14.0' of https://git.codelinaro.org/clo/la/kernel/msm-4.14

"LA.UM.9.1.r1-16300-SMxxx0.QSSI14.0"

* tag 'LA.UM.9.1.r1-16300-SMxxx0.QSSI14.0' of https://git.codelinaro.org/clo/la/kernel/msm-4.14:
  msm: adsprpc: use-after-free (UAF) in global maps
  msm: npu v2: Fix OOB issue in IPC between driver and firmware
  msm: npu v1: Fix OOB issue in IPC between driver and firmware
  rpmsg: bgcom: out of bound read from process_cmd
  defconfig: Disable SLUB_DEBUG SCHED_DEBUG and DEBUG_PREEMPT in perf build
  msm: vidc: Release cvp buffer lock in invalid buffer case
  msm: vidc: Fix possible UAF during buffer unregister call
  msm: camera: sensor: Handling race condition in util api
  msm: kgsl: Update the protect register list
  msm: kgsl: sensor: Proper handling of race condition in util api

Change-Id: I9b335937bd7c56f7bf1512bb81a3da0243a10987
Signed-off-by: Richard Raya <rdxzv.dev@gmail.com>
This commit is contained in:
Richard Raya 2024-07-23 18:06:42 -03:00
commit 80d6c11591
21 changed files with 311 additions and 859 deletions

View File

@ -11,6 +11,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@ -336,6 +337,8 @@ CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_PANIC_ON_RECURSIVE_FAULT=y
CONFIG_PANIC_TIMEOUT=5
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_IPC_LOGGING=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -605,64 +606,43 @@ static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl)
} while (free);
}
static void fastrpc_mmap_add_global(struct fastrpc_mmap *map)
{
struct fastrpc_apps *me = &gfa;
unsigned long irq_flags = 0;
spin_lock_irqsave(&me->hlock, irq_flags);
hlist_add_head(&map->hn, &me->maps);
spin_unlock_irqrestore(&me->hlock, irq_flags);
}
static void fastrpc_mmap_add(struct fastrpc_mmap *map)
{
if (map->flags == ADSP_MMAP_HEAP_ADDR ||
map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
struct fastrpc_apps *me = &gfa;
struct fastrpc_file *fl = map->fl;
spin_lock(&me->hlock);
hlist_add_head(&map->hn, &me->maps);
spin_unlock(&me->hlock);
} else {
struct fastrpc_file *fl = map->fl;
hlist_add_head(&map->hn, &fl->maps);
}
hlist_add_head(&map->hn, &fl->maps);
}
static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
uintptr_t va, size_t len, int mflags, int refs,
struct fastrpc_mmap **ppmap)
{
struct fastrpc_apps *me = &gfa;
struct fastrpc_mmap *match = NULL, *map = NULL;
struct hlist_node *n;
if ((va + len) < va)
return -EOVERFLOW;
if (mflags == ADSP_MMAP_HEAP_ADDR ||
mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
spin_lock(&me->hlock);
hlist_for_each_entry_safe(map, n, &me->maps, hn) {
if (va >= map->va &&
va + len <= map->va + map->len &&
map->fd == fd) {
if (refs) {
if (map->refs + 1 == INT_MAX) {
spin_unlock(&me->hlock);
return -ETOOMANYREFS;
}
map->refs++;
}
match = map;
break;
}
}
spin_unlock(&me->hlock);
} else {
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
if (va >= map->va &&
va + len <= map->va + map->len &&
map->fd == fd) {
if (refs) {
if (map->refs + 1 == INT_MAX)
return -ETOOMANYREFS;
map->refs++;
}
match = map;
break;
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
if (va >= map->va &&
va + len <= map->va + map->len &&
map->fd == fd) {
if (refs) {
if (map->refs + 1 == INT_MAX)
return -ETOOMANYREFS;
map->refs++;
}
match = map;
break;
}
}
if (match) {
@ -1011,8 +991,9 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
map->va = va;
}
map->len = len;
fastrpc_mmap_add(map);
if ((mflags != ADSP_MMAP_HEAP_ADDR) &&
(mflags != ADSP_MMAP_REMOTE_HEAP_ADDR))
fastrpc_mmap_add(map);
*ppmap = map;
bail:
@ -2352,6 +2333,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
mutex_unlock(&fl->map_mutex);
if (err)
goto bail;
fastrpc_mmap_add_global(mem);
phys = mem->phys;
size = mem->size;
if (me->channel[fl->cid].rhvm.vmid) {
@ -2800,8 +2782,11 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked)
}
} while (match);
bail:
if (err && match)
fastrpc_mmap_add(match);
if (err && match) {
mutex_lock(&fl->map_mutex);
fastrpc_mmap_add_global(match);
mutex_unlock(&fl->map_mutex);
}
return err;
}
@ -2921,7 +2906,11 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
bail:
if (err && map) {
mutex_lock(&fl->map_mutex);
fastrpc_mmap_add(map);
if ((map->flags == ADSP_MMAP_HEAP_ADDR) ||
(map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR))
fastrpc_mmap_add_global(map);
else
fastrpc_mmap_add(map);
mutex_unlock(&fl->map_mutex);
}
mutex_unlock(&fl->internal_map_mutex);
@ -3028,6 +3017,9 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
if (err)
goto bail;
map->raddr = raddr;
if (ud->flags == ADSP_MMAP_HEAP_ADDR ||
ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
fastrpc_mmap_add_global(map);
}
ud->vaddrout = raddr;
bail:

View File

@ -1,5 +1,5 @@
/* Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -2063,17 +2063,6 @@ static int _adreno_start(struct adreno_device *adreno_dev)
}
}
if (gmu_core_gpmu_isenabled(device) &&
adreno_dev->perfctr_ifpc_lo == 0) {
ret = adreno_perfcounter_get(adreno_dev,
KGSL_PERFCOUNTER_GROUP_GPMU_PWR, 4,
&adreno_dev->perfctr_ifpc_lo, NULL,
PERFCOUNTER_FLAG_KERNEL);
if (ret) {
WARN_ONCE(1, "Unable to get perf counter for IFPC\n");
adreno_dev->perfctr_ifpc_lo = 0;
}
}
/* Clear the busy_data stats - we're starting over from scratch */
adreno_dev->busy_data.gpu_busy = 0;

View File

@ -1,4 +1,5 @@
/* Copyright (c)2017-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -355,42 +356,6 @@ static const struct {
{adreno_is_a610, a612_hwcg_regs, ARRAY_SIZE(a612_hwcg_regs)},
};
static struct a6xx_protected_regs {
unsigned int base;
unsigned int count;
int read_protect;
} a6xx_protected_regs_group[] = {
{ 0x600, 0x51, 0 },
{ 0xAE50, 0x2, 1 },
{ 0x9624, 0x13, 1 },
{ 0x8630, 0x8, 1 },
{ 0x9E70, 0x1, 1 },
{ 0x9E78, 0x187, 1 },
{ 0xF000, 0x810, 1 },
{ 0xFC00, 0x3, 0 },
{ 0x50E, 0x0, 1 },
{ 0x50F, 0x0, 0 },
{ 0x510, 0x0, 1 },
{ 0x0, 0x4F9, 0 },
{ 0x501, 0xA, 0 },
{ 0x511, 0x44, 0 },
{ 0xE00, 0x1, 1 },
{ 0xE03, 0xB, 1 },
{ 0x8E00, 0x0, 1 },
{ 0x8E50, 0xF, 1 },
{ 0xBE02, 0x0, 1 },
{ 0xBE20, 0x11F3, 1 },
{ 0x800, 0x82, 1 },
{ 0x8A0, 0x8, 1 },
{ 0x8AB, 0x19, 1 },
{ 0x900, 0x4D, 1 },
{ 0x98D, 0x76, 1 },
{ 0x8D0, 0x23, 0 },
{ 0x980, 0x4, 0 },
{ 0xA630, 0x0, 1 },
{ 0x1b400, 0x1fff, 1 },
};
/* IFPC & Preemption static powerup restore list */
static struct reg_list_pair {
uint32_t offset;
@ -472,6 +437,60 @@ static struct reg_list_pair a615_pwrup_reglist[] = {
{ A6XX_UCHE_GBIF_GX_CONFIG, 0x0 },
};
/**
* struct a6xx_protected_regs - container for a protect register span
*/
static const struct a6xx_protected_regs {
/** @reg: Physical protected mode register to write to */
u32 reg;
/** @start: Dword offset of the starting register in the range */
u32 start;
/**
* @end: Dword offset of the ending register in the range
* (inclusive)
*/
u32 end;
/**
* @noaccess: 1 if the register should not be accessible from
* userspace, 0 if it can be read (but not written)
*/
u32 noaccess;
} a630_protected_regs[] = {
{ A6XX_CP_PROTECT_REG + 0, 0x00000, 0x004ff, 0 },
{ A6XX_CP_PROTECT_REG + 1, 0x00501, 0x00506, 0 },
{ A6XX_CP_PROTECT_REG + 2, 0x0050b, 0x007ff, 0 },
{ A6XX_CP_PROTECT_REG + 3, 0x0050e, 0x0050e, 1 },
{ A6XX_CP_PROTECT_REG + 4, 0x00510, 0x00510, 1 },
{ A6XX_CP_PROTECT_REG + 5, 0x00534, 0x00534, 1 },
{ A6XX_CP_PROTECT_REG + 6, 0x00800, 0x00882, 1 },
{ A6XX_CP_PROTECT_REG + 7, 0x008a0, 0x008a8, 1 },
{ A6XX_CP_PROTECT_REG + 8, 0x008ab, 0x008cf, 1 },
{ A6XX_CP_PROTECT_REG + 9, 0x008d0, 0x0098c, 0 },
{ A6XX_CP_PROTECT_REG + 10, 0x00900, 0x0094d, 1 },
{ A6XX_CP_PROTECT_REG + 11, 0x0098d, 0x00bff, 1 },
{ A6XX_CP_PROTECT_REG + 12, 0x00e00, 0x00e01, 1 },
{ A6XX_CP_PROTECT_REG + 13, 0x00e03, 0x00e0f, 1 },
{ A6XX_CP_PROTECT_REG + 14, 0x03c00, 0x03cc3, 1 },
{ A6XX_CP_PROTECT_REG + 15, 0x03cc4, 0x05cc3, 0 },
{ A6XX_CP_PROTECT_REG + 16, 0x08630, 0x087ff, 1 },
{ A6XX_CP_PROTECT_REG + 17, 0x08e00, 0x08e00, 1 },
{ A6XX_CP_PROTECT_REG + 18, 0x08e08, 0x08e08, 1 },
{ A6XX_CP_PROTECT_REG + 19, 0x08e50, 0x08e6f, 1 },
{ A6XX_CP_PROTECT_REG + 20, 0x09624, 0x097ff, 1 },
{ A6XX_CP_PROTECT_REG + 21, 0x09e70, 0x09e71, 1 },
{ A6XX_CP_PROTECT_REG + 22, 0x09e78, 0x09fff, 1 },
{ A6XX_CP_PROTECT_REG + 23, 0x0a630, 0x0a7ff, 1 },
{ A6XX_CP_PROTECT_REG + 24, 0x0ae02, 0x0ae02, 1 },
{ A6XX_CP_PROTECT_REG + 25, 0x0ae50, 0x0b17f, 1 },
{ A6XX_CP_PROTECT_REG + 26, 0x0b604, 0x0b604, 1 },
{ A6XX_CP_PROTECT_REG + 27, 0x0be02, 0x0be03, 1 },
{ A6XX_CP_PROTECT_REG + 28, 0x0be20, 0x0d5ff, 1 },
{ A6XX_CP_PROTECT_REG + 29, 0x0f000, 0x0fbff, 1 },
{ A6XX_CP_PROTECT_REG + 30, 0x0fc00, 0x11bff, 0 },
{ A6XX_CP_PROTECT_REG + 31, 0x11c00, 0x11c00, 1 },
{ 0 },
};
static struct reg_list_pair a6xx_ifpc_perfctr_reglist[] = {
{ A6XX_RBBM_PERFCTR_CNTL, 0x0 },
};
@ -517,55 +536,36 @@ static void a6xx_init(struct adreno_device *adreno_dev)
/**
* a6xx_protect_init() - Initializes register protection on a6xx
* @device: Pointer to the device structure
* Performs register writes to enable protected access to sensitive
* registers
*/
static void a6xx_protect_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct kgsl_protected_registers *mmu_prot =
kgsl_mmu_get_prot_regs(&device->mmu);
int i, num_sets;
int req_sets = ARRAY_SIZE(a6xx_protected_regs_group);
int max_sets = adreno_dev->gpucore->num_protected_regs;
unsigned int mmu_base = 0, mmu_range = 0, cur_range;
const struct a6xx_protected_regs *regs = a630_protected_regs;
int i;
/* enable access protection to privileged registers */
kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000003);
/*
* Enable access protection to privileged registers, fault on an access
* protect violation and select the last span to protect from the start
* address all the way to the end of the register address space
*/
if (mmu_prot) {
mmu_base = mmu_prot->base;
mmu_range = mmu_prot->range;
req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
}
kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL,
(1 << 0) | (1 << 1) | (1 << 3));
if (req_sets > max_sets)
WARN(1, "Size exceeds the num of protection regs available\n");
/* Program each register defined by the core definition */
for (i = 0; regs[i].reg; i++) {
u32 count;
/* Protect GPU registers */
num_sets = min_t(unsigned int,
ARRAY_SIZE(a6xx_protected_regs_group), max_sets);
for (i = 0; i < num_sets; i++) {
struct a6xx_protected_regs *regs =
&a6xx_protected_regs_group[i];
/*
* This is the offset of the end register as counted from the
* start, i.e. # of registers in the range - 1
*/
count = regs[i].end - regs[i].start;
kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
regs->base | (regs->count << 18) |
(regs->read_protect << 31));
}
/* Protect MMU registers */
if (mmu_prot) {
while ((i < max_sets) && (mmu_range > 0)) {
cur_range = min_t(unsigned int, mmu_range,
0x2000);
kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
mmu_base | ((cur_range - 1) << 18) | (1 << 31));
mmu_base += cur_range;
mmu_range -= cur_range;
i++;
}
kgsl_regwrite(device, regs[i].reg,
(regs[i].start & 0x3ffff) |
((count & 0x1fff) << 18) |
(regs[i].noaccess << 31));
}
}
@ -864,6 +864,10 @@ static void a6xx_start(struct adreno_device *adreno_dev)
/* Turn on performance counters */
kgsl_regwrite(device, A6XX_RBBM_PERFCTR_CNTL, 0x1);
/* Turn on the IFPC counter (countable 4 on XOCLK4) */
if (gmu_core_isenabled(device))
gmu_core_regrmw(device, A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1,
0xff, 0x4);
/* Turn on GX_MEM retention */
if (gmu_core_gpmu_isenabled(device) && adreno_is_a612(adreno_dev)) {
@ -948,6 +952,20 @@ static void a6xx_start(struct adreno_device *adreno_dev)
if (adreno_is_preemption_enabled(adreno_dev))
kgsl_regwrite(device, A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
0x1);
/*
* Enable GMU power counter 0 to count GPU busy. This is applicable to
* all a6xx targets
*/
kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
/*
* A610 GPU has only one power counter fixed to count GPU busy
* cycles with no select register.
*/
if (!adreno_is_a610(adreno_dev))
kgsl_regrmw(device,
A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
0xff, 0x20);
kgsl_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0x1);
a6xx_protect_init(adreno_dev);
@ -2770,50 +2788,11 @@ static struct adreno_perfcount_register a6xx_perfcounters_gbif_pwr[] = {
A6XX_GBIF_PWR_CNT_HIGH2, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
};
static struct adreno_perfcount_register a6xx_perfcounters_pwr[] = {
{ KGSL_PERFCOUNTER_BROKEN, 0, 0, 0, 0, -1, 0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H, -1, 0 },
};
static struct adreno_perfcount_register a6xx_perfcounters_alwayson[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_CP_ALWAYS_ON_COUNTER_LO,
A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
};
static struct adreno_perfcount_register a6xx_pwrcounters_gpmu[] = {
/*
* A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0 is used for the GPU
* busy count (see the PWR group above). Mark it as broken
* so it's not re-used.
*/
{ KGSL_PERFCOUNTER_BROKEN, 0, 0,
A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H, -1,
A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L,
A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H, -1,
A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L,
A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H, -1,
A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L,
A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H, -1,
A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L,
A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H, -1,
A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L,
A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H, -1,
A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
};
/*
* ADRENO_PERFCOUNTER_GROUP_RESTORE flag is enabled by default
* because most of the perfcounter groups need to be restored
@ -2852,11 +2831,8 @@ static struct adreno_perfcount_group a6xx_perfcounter_groups
A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF, vbif, 0),
A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
ADRENO_PERFCOUNTER_GROUP_FIXED),
A6XX_PERFCOUNTER_GROUP_FLAGS(PWR, pwr,
ADRENO_PERFCOUNTER_GROUP_FIXED),
A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
ADRENO_PERFCOUNTER_GROUP_FIXED),
A6XX_POWER_COUNTER_GROUP(GPMU, gpmu),
};
static struct adreno_perfcounters a6xx_perfcounters = {
@ -2864,39 +2840,6 @@ static struct adreno_perfcounters a6xx_perfcounters = {
ARRAY_SIZE(a6xx_perfcounter_groups),
};
/* Program the GMU power counter to count GPU busy cycles */
static int a6xx_enable_pwr_counters(struct adreno_device *adreno_dev,
unsigned int counter)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
/*
* We have a limited number of power counters. Since we're not using
* total GPU cycle count, return error if requested.
*/
if (counter == 0)
return -EINVAL;
/* We can use GPU without GMU and allow it to count GPU busy cycles */
if (!gmu_core_isenabled(device) &&
!kgsl_is_register_offset(device,
A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK))
return -ENODEV;
kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xFF000000);
/*
* A610 GPU has only one power counter fixed to count GPU busy
* cycles with no select register.
*/
if (!adreno_is_a610(adreno_dev))
kgsl_regrmw(device,
A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xFF, 0x20);
kgsl_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0x1);
return 0;
}
static void a6xx_efuse_gaming_bin(struct adreno_device *adreno_dev)
{
unsigned int val;
@ -2992,6 +2935,14 @@ static void a6xx_platform_setup(struct adreno_device *adreno_dev)
if (ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC))
set_bit(ADRENO_SPTP_PC_CTRL, &adreno_dev->pwrctrl_flag);
/* Set the counter for IFPC */
if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
adreno_dev->perfctr_ifpc_lo =
A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L;
/* Set the GPU busy counter for frequency scaling */
adreno_dev->perfctr_pwr_lo = A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L;
/* Check efuse bits for various capabilties */
a6xx_check_features(adreno_dev);
}
@ -3158,28 +3109,6 @@ static const struct adreno_reg_offsets a6xx_reg_offsets = {
.offset_0 = ADRENO_REG_REGISTER_MAX,
};
static void a6xx_perfcounter_init(struct adreno_device *adreno_dev)
{
/*
* A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4/5 is not present on A612.
* Mark them as broken so that they can't be used.
*/
if (adreno_is_a612(adreno_dev)) {
a6xx_pwrcounters_gpmu[4].countable = KGSL_PERFCOUNTER_BROKEN;
a6xx_pwrcounters_gpmu[5].countable = KGSL_PERFCOUNTER_BROKEN;
} else if (adreno_is_a610(adreno_dev)) {
/*
* A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1 to 5 are not
* present on A610. Mark them as broken so that they
* can't be used.
*/
a6xx_pwrcounters_gpmu[1].countable = KGSL_PERFCOUNTER_BROKEN;
a6xx_pwrcounters_gpmu[2].countable = KGSL_PERFCOUNTER_BROKEN;
a6xx_pwrcounters_gpmu[3].countable = KGSL_PERFCOUNTER_BROKEN;
a6xx_pwrcounters_gpmu[4].countable = KGSL_PERFCOUNTER_BROKEN;
a6xx_pwrcounters_gpmu[5].countable = KGSL_PERFCOUNTER_BROKEN;
}
}
static int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
struct adreno_perfcount_register *reg, bool update_reg)
@ -3363,7 +3292,6 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
.regulator_enable = a6xx_sptprac_enable,
.regulator_disable = a6xx_sptprac_disable,
.perfcounters = &a6xx_perfcounters,
.enable_pwr_counters = a6xx_enable_pwr_counters,
.read_throttling_counters = a6xx_read_throttling_counters,
.count_throttles = a6xx_count_throttles,
.microcode_read = a6xx_microcode_read,
@ -3386,7 +3314,6 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
.preemption_context_destroy = a6xx_preemption_context_destroy,
.sptprac_is_on = a6xx_sptprac_is_on,
.ccu_invalidate = a6xx_ccu_invalidate,
.perfcounter_init = a6xx_perfcounter_init,
.perfcounter_update = a6xx_perfcounter_update,
.clk_set_options = a6xx_clk_set_options,
.zap_shader_unload = a6xx_zap_shader_unload,

View File

@ -1,5 +1,5 @@
/* Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -30,6 +30,7 @@
#include "adreno_trace.h"
#include "a3xx_reg.h"
#include "a6xx_reg.h"
#include "adreno_a5xx.h"
#define RB_HOSTPTR(_rb, _pos) \
@ -927,6 +928,8 @@ static inline int _get_alwayson_counter(struct adreno_device *adreno_dev,
ADRENO_GPUREV(adreno_dev) <= ADRENO_REV_A530)
*p++ = adreno_getreg(adreno_dev,
ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO);
else if (adreno_is_a6xx(adreno_dev))
*p++ = A6XX_CP_ALWAYS_ON_COUNTER_LO | (1 << 30) | (2 << 18);
else
*p++ = adreno_getreg(adreno_dev,
ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO) |

View File

@ -157,10 +157,11 @@ int32_t cam_sensor_handle_random_write(
struct list_head **list)
{
struct i2c_settings_list *i2c_list;
int32_t rc = 0, cnt;
int32_t rc = 0, cnt, payload_count;
payload_count = cam_cmd_i2c_random_wr->header.count;
i2c_list = cam_sensor_get_i2c_ptr(i2c_reg_settings,
cam_cmd_i2c_random_wr->header.count);
payload_count);
if (i2c_list == NULL ||
i2c_list->i2c_settings.reg_setting == NULL) {
CAM_ERR(CAM_SENSOR, "Failed in allocating i2c_list");
@ -169,15 +170,14 @@ int32_t cam_sensor_handle_random_write(
*cmd_length_in_bytes = (sizeof(struct i2c_rdwr_header) +
sizeof(struct i2c_random_wr_payload) *
(cam_cmd_i2c_random_wr->header.count));
payload_count);
i2c_list->op_code = CAM_SENSOR_I2C_WRITE_RANDOM;
i2c_list->i2c_settings.addr_type =
cam_cmd_i2c_random_wr->header.addr_type;
i2c_list->i2c_settings.data_type =
cam_cmd_i2c_random_wr->header.data_type;
for (cnt = 0; cnt < (cam_cmd_i2c_random_wr->header.count);
cnt++) {
for (cnt = 0; cnt < payload_count; cnt++) {
i2c_list->i2c_settings.reg_setting[cnt].reg_addr =
cam_cmd_i2c_random_wr->random_wr_payload[cnt].reg_addr;
i2c_list->i2c_settings.reg_setting[cnt].reg_data =
@ -197,10 +197,11 @@ static int32_t cam_sensor_handle_continuous_write(
struct list_head **list)
{
struct i2c_settings_list *i2c_list;
int32_t rc = 0, cnt;
int32_t rc = 0, cnt, payload_count;
payload_count = cam_cmd_i2c_continuous_wr->header.count;
i2c_list = cam_sensor_get_i2c_ptr(i2c_reg_settings,
cam_cmd_i2c_continuous_wr->header.count);
payload_count);
if (i2c_list == NULL ||
i2c_list->i2c_settings.reg_setting == NULL) {
CAM_ERR(CAM_SENSOR, "Failed in allocating i2c_list");
@ -210,7 +211,7 @@ static int32_t cam_sensor_handle_continuous_write(
*cmd_length_in_bytes = (sizeof(struct i2c_rdwr_header) +
sizeof(cam_cmd_i2c_continuous_wr->reg_addr) +
sizeof(struct cam_cmd_read) *
(cam_cmd_i2c_continuous_wr->header.count));
(payload_count));
if (cam_cmd_i2c_continuous_wr->header.op_code ==
CAMERA_SENSOR_I2C_OP_CONT_WR_BRST)
i2c_list->op_code = CAM_SENSOR_I2C_WRITE_BURST;
@ -227,8 +228,7 @@ static int32_t cam_sensor_handle_continuous_write(
i2c_list->i2c_settings.size =
cam_cmd_i2c_continuous_wr->header.count;
for (cnt = 0; cnt < (cam_cmd_i2c_continuous_wr->header.count);
cnt++) {
for (cnt = 0; cnt < payload_count; cnt++) {
i2c_list->i2c_settings.reg_setting[cnt].reg_addr =
cam_cmd_i2c_continuous_wr->reg_addr;
i2c_list->i2c_settings.reg_setting[cnt].reg_data =
@ -931,23 +931,29 @@ int32_t cam_sensor_update_power_settings(void *cmd_buf,
int32_t i = 0, pwr_up = 0, pwr_down = 0;
struct cam_sensor_power_setting *pwr_settings;
void *ptr = cmd_buf, *scr;
struct cam_cmd_power *pwr_cmd = (struct cam_cmd_power *)cmd_buf;
struct common_header *cmm_hdr = (struct common_header *)cmd_buf;
struct cam_cmd_power *pwr_cmd =
kzalloc(sizeof(struct cam_cmd_power), GFP_KERNEL);
if (!pwr_cmd)
return -ENOMEM;
memcpy(pwr_cmd, cmd_buf, sizeof(struct cam_cmd_power));
if (!pwr_cmd || !cmd_length || cmd_buf_len < (size_t)cmd_length ||
cam_sensor_validate(cmd_buf, cmd_buf_len)) {
CAM_ERR(CAM_SENSOR, "Invalid Args: pwr_cmd %pK, cmd_length: %d",
pwr_cmd, cmd_length);
return -EINVAL;
rc = -EINVAL;
goto free_power_command;
}
power_info->power_setting_size = 0;
power_info->power_setting =
(struct cam_sensor_power_setting *)
kzalloc(sizeof(struct cam_sensor_power_setting) *
MAX_POWER_CONFIG, GFP_KERNEL);
if (!power_info->power_setting)
return -ENOMEM;
if (!power_info->power_setting) {
rc = -ENOMEM;
goto free_power_command;
}
power_info->power_down_setting_size = 0;
power_info->power_down_setting =
@ -958,7 +964,8 @@ int32_t cam_sensor_update_power_settings(void *cmd_buf,
kfree(power_info->power_setting);
power_info->power_setting = NULL;
power_info->power_setting_size = 0;
return -ENOMEM;
rc = -ENOMEM;
goto free_power_command;
}
while (tot_size < cmd_length) {
@ -1142,7 +1149,7 @@ int32_t cam_sensor_update_power_settings(void *cmd_buf,
}
}
return rc;
goto free_power_command;
free_power_settings:
kfree(power_info->power_down_setting);
kfree(power_info->power_setting);
@ -1150,6 +1157,9 @@ free_power_settings:
power_info->power_setting = NULL;
power_info->power_down_setting_size = 0;
power_info->power_setting_size = 0;
free_power_command:
kfree(pwr_cmd);
pwr_cmd = NULL;
return rc;
}

View File

@ -157,10 +157,11 @@ int32_t cam_sensor_handle_random_write(
struct list_head **list)
{
struct i2c_settings_list *i2c_list;
int32_t rc = 0, cnt;
int32_t rc = 0, cnt, payload_count;
payload_count = cam_cmd_i2c_random_wr->header.count;
i2c_list = cam_sensor_get_i2c_ptr(i2c_reg_settings,
cam_cmd_i2c_random_wr->header.count);
payload_count);
if (i2c_list == NULL ||
i2c_list->i2c_settings.reg_setting == NULL) {
CAM_ERR(CAM_SENSOR, "Failed in allocating i2c_list");
@ -169,15 +170,14 @@ int32_t cam_sensor_handle_random_write(
*cmd_length_in_bytes = (sizeof(struct i2c_rdwr_header) +
sizeof(struct i2c_random_wr_payload) *
(cam_cmd_i2c_random_wr->header.count));
payload_count);
i2c_list->op_code = CAM_SENSOR_I2C_WRITE_RANDOM;
i2c_list->i2c_settings.addr_type =
cam_cmd_i2c_random_wr->header.addr_type;
i2c_list->i2c_settings.data_type =
cam_cmd_i2c_random_wr->header.data_type;
for (cnt = 0; cnt < (cam_cmd_i2c_random_wr->header.count);
cnt++) {
for (cnt = 0; cnt < payload_count; cnt++) {
i2c_list->i2c_settings.reg_setting[cnt].reg_addr =
cam_cmd_i2c_random_wr->random_wr_payload[cnt].reg_addr;
i2c_list->i2c_settings.reg_setting[cnt].reg_data =
@ -197,10 +197,11 @@ static int32_t cam_sensor_handle_continuous_write(
struct list_head **list)
{
struct i2c_settings_list *i2c_list;
int32_t rc = 0, cnt;
int32_t rc = 0, cnt, payload_count;
payload_count = cam_cmd_i2c_continuous_wr->header.count;
i2c_list = cam_sensor_get_i2c_ptr(i2c_reg_settings,
cam_cmd_i2c_continuous_wr->header.count);
payload_count);
if (i2c_list == NULL ||
i2c_list->i2c_settings.reg_setting == NULL) {
CAM_ERR(CAM_SENSOR, "Failed in allocating i2c_list");
@ -210,7 +211,7 @@ static int32_t cam_sensor_handle_continuous_write(
*cmd_length_in_bytes = (sizeof(struct i2c_rdwr_header) +
sizeof(cam_cmd_i2c_continuous_wr->reg_addr) +
sizeof(struct cam_cmd_read) *
(cam_cmd_i2c_continuous_wr->header.count));
(payload_count));
if (cam_cmd_i2c_continuous_wr->header.op_code ==
CAMERA_SENSOR_I2C_OP_CONT_WR_BRST)
i2c_list->op_code = CAM_SENSOR_I2C_WRITE_BURST;
@ -227,8 +228,7 @@ static int32_t cam_sensor_handle_continuous_write(
i2c_list->i2c_settings.size =
cam_cmd_i2c_continuous_wr->header.count;
for (cnt = 0; cnt < (cam_cmd_i2c_continuous_wr->header.count);
cnt++) {
for (cnt = 0; cnt < payload_count; cnt++) {
i2c_list->i2c_settings.reg_setting[cnt].reg_addr =
cam_cmd_i2c_continuous_wr->reg_addr;
i2c_list->i2c_settings.reg_setting[cnt].reg_data =
@ -865,14 +865,19 @@ int32_t cam_sensor_update_power_settings(void *cmd_buf,
int32_t i = 0, pwr_up = 0, pwr_down = 0;
struct cam_sensor_power_setting *pwr_settings;
void *ptr = cmd_buf, *scr;
struct cam_cmd_power *pwr_cmd = (struct cam_cmd_power *)cmd_buf;
struct common_header *cmm_hdr = (struct common_header *)cmd_buf;
struct cam_cmd_power *pwr_cmd =
kzalloc(sizeof(struct cam_cmd_power), GFP_KERNEL);
if (!pwr_cmd)
return -ENOMEM;
memcpy(pwr_cmd, cmd_buf, sizeof(struct cam_cmd_power));
if (!pwr_cmd || !cmd_length || cmd_buf_len < (size_t)cmd_length ||
cam_sensor_validate(cmd_buf, cmd_buf_len)) {
CAM_ERR(CAM_SENSOR, "Invalid Args: pwr_cmd %pK, cmd_length: %d",
pwr_cmd, cmd_length);
return -EINVAL;
rc = -EINVAL;
goto free_power_command;
}
power_info->power_setting_size = 0;
@ -880,8 +885,10 @@ int32_t cam_sensor_update_power_settings(void *cmd_buf,
(struct cam_sensor_power_setting *)
kzalloc(sizeof(struct cam_sensor_power_setting) *
MAX_POWER_CONFIG, GFP_KERNEL);
if (!power_info->power_setting)
return -ENOMEM;
if (!power_info->power_setting) {
rc = -ENOMEM;
goto free_power_command;
}
power_info->power_down_setting_size = 0;
power_info->power_down_setting =
@ -892,7 +899,8 @@ int32_t cam_sensor_update_power_settings(void *cmd_buf,
kfree(power_info->power_setting);
power_info->power_setting = NULL;
power_info->power_setting_size = 0;
return -ENOMEM;
rc = -ENOMEM;
goto free_power_command;
}
while (tot_size < cmd_length) {
@ -1076,7 +1084,7 @@ int32_t cam_sensor_update_power_settings(void *cmd_buf,
}
}
return rc;
goto free_power_command;
free_power_settings:
kfree(power_info->power_down_setting);
kfree(power_info->power_setting);
@ -1084,6 +1092,9 @@ free_power_settings:
power_info->power_setting = NULL;
power_info->power_down_setting_size = 0;
power_info->power_setting_size = 0;
free_power_command:
kfree(pwr_cmd);
pwr_cmd = NULL;
return rc;
}

View File

@ -102,12 +102,6 @@ struct npu_debugfs_ctx {
struct dentry *root;
uint32_t reg_off;
uint32_t reg_cnt;
uint8_t *log_buf;
struct mutex log_lock;
uint32_t log_num_bytes_buffered;
uint32_t log_read_index;
uint32_t log_write_index;
uint32_t log_buf_size;
};
struct npu_debugfs_reg_ctx {

View File

@ -34,16 +34,6 @@
*/
static int npu_debug_open(struct inode *inode, struct file *file);
static int npu_debug_release(struct inode *inode, struct file *file);
static int npu_debug_reg_open(struct inode *inode, struct file *file);
static int npu_debug_reg_release(struct inode *inode, struct file *file);
static ssize_t npu_debug_reg_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos);
static ssize_t npu_debug_off_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos);
static ssize_t npu_debug_off_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos);
static ssize_t npu_debug_log_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos);
static ssize_t npu_debug_ctrl_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos);
@ -51,27 +41,7 @@ static ssize_t npu_debug_ctrl_write(struct file *file,
* Variables
* -------------------------------------------------------------------------
*/
struct npu_device *g_npu_dev;
static const struct file_operations npu_reg_fops = {
.open = npu_debug_reg_open,
.release = npu_debug_reg_release,
.read = npu_debug_reg_read,
};
static const struct file_operations npu_off_fops = {
.open = npu_debug_open,
.release = npu_debug_release,
.read = npu_debug_off_read,
.write = npu_debug_off_write,
};
static const struct file_operations npu_log_fops = {
.open = npu_debug_open,
.release = npu_debug_release,
.read = npu_debug_log_read,
.write = NULL,
};
static struct npu_device *g_npu_dev;
static const struct file_operations npu_ctrl_fops = {
.open = npu_debug_open,
@ -97,207 +67,6 @@ static int npu_debug_release(struct inode *inode, struct file *file)
return 0;
}
static int npu_debug_reg_open(struct inode *inode, struct file *file)
{
struct npu_debugfs_reg_ctx *reg_ctx;
reg_ctx = kzalloc(sizeof(*reg_ctx), GFP_KERNEL);
if (!reg_ctx)
return -ENOMEM;
/* non-seekable */
file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
reg_ctx->npu_dev = inode->i_private;
file->private_data = reg_ctx;
return 0;
}
static int npu_debug_reg_release(struct inode *inode, struct file *file)
{
struct npu_debugfs_reg_ctx *reg_ctx = file->private_data;
kfree(reg_ctx->buf);
kfree(reg_ctx);
file->private_data = NULL;
return 0;
}
/* -------------------------------------------------------------------------
* Function Implementations - Reg Read/Write
* -------------------------------------------------------------------------
*/
static ssize_t npu_debug_reg_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
struct npu_debugfs_reg_ctx *reg_ctx = file->private_data;
struct npu_device *npu_dev = reg_ctx->npu_dev;
struct npu_debugfs_ctx *debugfs;
size_t len;
debugfs = &npu_dev->debugfs_ctx;
if (debugfs->reg_cnt == 0)
return 0;
if (!reg_ctx->buf) {
char dump_buf[64];
char *ptr;
int cnt, tot, off;
reg_ctx->buf_len = sizeof(dump_buf) *
DIV_ROUND_UP(debugfs->reg_cnt, ROW_BYTES);
reg_ctx->buf = kzalloc(reg_ctx->buf_len, GFP_KERNEL);
if (!reg_ctx->buf)
return -ENOMEM;
ptr = npu_dev->core_io.base + debugfs->reg_off;
tot = 0;
off = (int)debugfs->reg_off;
if (npu_enable_core_power(npu_dev))
return -EPERM;
for (cnt = debugfs->reg_cnt * 4; cnt > 0; cnt -= ROW_BYTES) {
hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
ROW_BYTES, GROUP_BYTES, dump_buf,
sizeof(dump_buf), false);
len = scnprintf(reg_ctx->buf + tot,
reg_ctx->buf_len - tot, "0x%08x: %s\n",
((int) (unsigned long) ptr) -
((int) (unsigned long) npu_dev->core_io.base),
dump_buf);
ptr += ROW_BYTES;
tot += len;
if (tot >= reg_ctx->buf_len)
break;
}
npu_disable_core_power(npu_dev);
reg_ctx->buf_len = tot;
}
if (*ppos >= reg_ctx->buf_len)
return 0; /* done reading */
len = min(count, reg_ctx->buf_len - (size_t) *ppos);
pr_debug("read %zi %zi\n", count, reg_ctx->buf_len - (size_t) *ppos);
if (copy_to_user(user_buf, reg_ctx->buf + *ppos, len)) {
pr_err("failed to copy to user\n");
return -EFAULT;
}
*ppos += len; /* increase offset */
return len;
}
/* -------------------------------------------------------------------------
* Function Implementations - Offset Read/Write
* -------------------------------------------------------------------------
*/
static ssize_t npu_debug_off_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
size_t off = 0;
uint32_t cnt, reg_cnt;
char buf[24];
struct npu_device *npu_dev = file->private_data;
struct npu_debugfs_ctx *debugfs;
pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
npu_dev = g_npu_dev;
debugfs = &npu_dev->debugfs_ctx;
if (count >= sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
buf[count] = 0; /* end of string */
cnt = sscanf(buf, "%zx %x", &off, &reg_cnt);
if (cnt == 1)
reg_cnt = DEFAULT_REG_DUMP_NUM;
pr_debug("reg off = %zx, %d cnt=%d\n", off, reg_cnt, cnt);
if (cnt >= 1) {
debugfs->reg_off = off;
debugfs->reg_cnt = reg_cnt;
}
return count;
}
static ssize_t npu_debug_off_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
size_t len;
char buf[64];
struct npu_device *npu_dev = file->private_data;
struct npu_debugfs_ctx *debugfs;
pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
npu_dev = g_npu_dev;
debugfs = &npu_dev->debugfs_ctx;
if (*ppos)
return 0; /* the end */
len = scnprintf(buf, sizeof(buf), "offset=0x%08x cnt=%d\n",
debugfs->reg_off, debugfs->reg_cnt);
len = min(len, count);
if (copy_to_user(user_buf, buf, len)) {
pr_err("failed to copy to user\n");
return -EFAULT;
}
*ppos += len; /* increase offset */
return len;
}
/* -------------------------------------------------------------------------
* Function Implementations - DebugFS Log
* -------------------------------------------------------------------------
*/
static ssize_t npu_debug_log_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
size_t len = 0;
struct npu_device *npu_dev = file->private_data;
struct npu_debugfs_ctx *debugfs;
pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
npu_dev = g_npu_dev;
debugfs = &npu_dev->debugfs_ctx;
/* mutex log lock */
mutex_lock(&debugfs->log_lock);
if (debugfs->log_num_bytes_buffered != 0) {
len = min(debugfs->log_num_bytes_buffered,
debugfs->log_buf_size - debugfs->log_read_index);
len = min(count, len);
if (copy_to_user(user_buf, (debugfs->log_buf +
debugfs->log_read_index), len)) {
pr_err("%s failed to copy to user\n", __func__);
mutex_unlock(&debugfs->log_lock);
return -EFAULT;
}
debugfs->log_read_index += len;
if (debugfs->log_read_index == debugfs->log_buf_size)
debugfs->log_read_index = 0;
debugfs->log_num_bytes_buffered -= len;
*ppos += len;
}
/* mutex log unlock */
mutex_unlock(&debugfs->log_lock);
return len;
}
/* -------------------------------------------------------------------------
* Function Implementations - DebugFS Control
@ -381,24 +150,6 @@ int npu_debugfs_init(struct npu_device *npu_dev)
return -ENODEV;
}
if (!debugfs_create_file("reg", 0644, debugfs->root,
npu_dev, &npu_reg_fops)) {
pr_err("debugfs_create_file reg fail\n");
goto err;
}
if (!debugfs_create_file("off", 0644, debugfs->root,
npu_dev, &npu_off_fops)) {
pr_err("debugfs_create_file off fail\n");
goto err;
}
if (!debugfs_create_file("log", 0644, debugfs->root,
npu_dev, &npu_log_fops)) {
pr_err("debugfs_create_file log fail\n");
goto err;
}
if (!debugfs_create_file("ctrl", 0644, debugfs->root,
npu_dev, &npu_ctrl_fops)) {
pr_err("debugfs_create_file ctrl fail\n");
@ -435,14 +186,6 @@ int npu_debugfs_init(struct npu_device *npu_dev)
goto err;
}
debugfs->log_num_bytes_buffered = 0;
debugfs->log_read_index = 0;
debugfs->log_write_index = 0;
debugfs->log_buf_size = NPU_LOG_BUF_SIZE;
debugfs->log_buf = kzalloc(debugfs->log_buf_size, GFP_KERNEL);
if (!debugfs->log_buf)
goto err;
mutex_init(&debugfs->log_lock);
return 0;
@ -455,12 +198,6 @@ void npu_debugfs_deinit(struct npu_device *npu_dev)
{
struct npu_debugfs_ctx *debugfs = &npu_dev->debugfs_ctx;
debugfs->log_num_bytes_buffered = 0;
debugfs->log_read_index = 0;
debugfs->log_write_index = 0;
debugfs->log_buf_size = 0;
kfree(debugfs->log_buf);
if (!IS_ERR_OR_NULL(debugfs->root)) {
debugfs_remove_recursive(debugfs->root);
debugfs->root = NULL;

View File

@ -40,15 +40,16 @@
struct npu_queue_tuple {
uint32_t size;
uint32_t hdr;
uint32_t start_offset;
};
static const struct npu_queue_tuple npu_q_setup[6] = {
{ 1024, IPC_QUEUE_CMD_HIGH_PRIORITY | TX_HDR_TYPE | RX_HDR_TYPE },
{ 4096, IPC_QUEUE_APPS_EXEC | TX_HDR_TYPE | RX_HDR_TYPE },
{ 4096, IPC_QUEUE_DSP_EXEC | TX_HDR_TYPE | RX_HDR_TYPE },
{ 4096, IPC_QUEUE_APPS_RSP | TX_HDR_TYPE | RX_HDR_TYPE },
{ 4096, IPC_QUEUE_DSP_RSP | TX_HDR_TYPE | RX_HDR_TYPE },
{ 1024, IPC_QUEUE_LOG | TX_HDR_TYPE | RX_HDR_TYPE },
static struct npu_queue_tuple npu_q_setup[6] = {
{ 1024, IPC_QUEUE_CMD_HIGH_PRIORITY | TX_HDR_TYPE | RX_HDR_TYPE, 0},
{ 4096, IPC_QUEUE_APPS_EXEC | TX_HDR_TYPE | RX_HDR_TYPE, 0},
{ 4096, IPC_QUEUE_DSP_EXEC | TX_HDR_TYPE | RX_HDR_TYPE, 0},
{ 4096, IPC_QUEUE_APPS_RSP | TX_HDR_TYPE | RX_HDR_TYPE, 0},
{ 4096, IPC_QUEUE_DSP_RSP | TX_HDR_TYPE | RX_HDR_TYPE, 0},
{ 1024, IPC_QUEUE_LOG | TX_HDR_TYPE | RX_HDR_TYPE, 0},
};
/* -------------------------------------------------------------------------
@ -75,7 +76,7 @@ static int npu_host_ipc_init_hfi(struct npu_device *npu_dev)
struct hfi_queue_tbl_header *q_tbl_hdr = NULL;
struct hfi_queue_header *q_hdr_arr = NULL;
struct hfi_queue_header *q_hdr = NULL;
void *q_tbl_addr = 0;
void *q_tbl_addr = NULL;
uint32_t reg_val = 0;
uint32_t q_idx = 0;
uint32_t q_tbl_size = sizeof(struct hfi_queue_tbl_header) +
@ -118,6 +119,7 @@ static int npu_host_ipc_init_hfi(struct npu_device *npu_dev)
/* queue is active */
q_hdr->qhdr_status = 0x01;
q_hdr->qhdr_start_offset = cur_start_offset;
npu_q_setup[q_idx].start_offset = cur_start_offset;
q_size = npu_q_setup[q_idx].size;
q_hdr->qhdr_type = npu_q_setup[q_idx].hdr;
/* in bytes */
@ -219,6 +221,18 @@ static int ipc_queue_read(struct npu_device *npu_dev,
/* Read the queue */
MEMR(npu_dev, (void *)((size_t)offset), (uint8_t *)&queue,
HFI_QUEUE_HEADER_SIZE);
if (queue.qhdr_type != npu_q_setup[target_que].hdr ||
queue.qhdr_q_size != npu_q_setup[target_que].size ||
queue.qhdr_read_idx >= queue.qhdr_q_size ||
queue.qhdr_write_idx >= queue.qhdr_q_size ||
queue.qhdr_start_offset !=
npu_q_setup[target_que].start_offset) {
pr_err("Invalid Queue header\n");
status = -EIO;
goto exit;
}
/* check if queue is empty */
if (queue.qhdr_read_idx == queue.qhdr_write_idx) {
/*
@ -242,8 +256,10 @@ static int ipc_queue_read(struct npu_device *npu_dev,
target_que,
packet_size);
if (packet_size == 0) {
status = -EPERM;
if ((packet_size == 0) ||
(packet_size > NPU_IPC_BUF_LENGTH)) {
pr_err("Invalid packet size %d\n", packet_size);
status = -EINVAL;
goto exit;
}
new_read_idx = queue.qhdr_read_idx + packet_size;
@ -313,6 +329,18 @@ static int ipc_queue_write(struct npu_device *npu_dev,
MEMR(npu_dev, (void *)((size_t)offset), (uint8_t *)&queue,
HFI_QUEUE_HEADER_SIZE);
if (queue.qhdr_type != npu_q_setup[target_que].hdr ||
queue.qhdr_q_size != npu_q_setup[target_que].size ||
queue.qhdr_read_idx >= queue.qhdr_q_size ||
queue.qhdr_write_idx >= queue.qhdr_q_size ||
queue.qhdr_start_offset !=
npu_q_setup[target_que].start_offset) {
pr_err("Invalid Queue header\n");
status = -EIO;
goto exit;
}
packet_size = (*(uint32_t *)packet);
if (packet_size == 0) {
/* assign failed status and return */

View File

@ -110,7 +110,7 @@ void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src,
{
size_t dst_off = (size_t)dst;
uint32_t *src_ptr32 = (uint32_t *)src;
uint8_t *src_ptr8 = 0;
uint8_t *src_ptr8 = NULL;
uint32_t i = 0;
uint32_t num = 0;
@ -145,7 +145,7 @@ int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst,
{
size_t src_off = (size_t)src;
uint32_t *out32 = (uint32_t *)dst;
uint8_t *out8 = 0;
uint8_t *out8 = NULL;
uint32_t i = 0;
uint32_t num = 0;
@ -374,7 +374,7 @@ void npu_mem_invalidate(struct npu_client *client, int buf_hdl)
bool npu_mem_verify_addr(struct npu_client *client, uint64_t addr)
{
struct npu_ion_buf *ion_buf = 0;
struct npu_ion_buf *ion_buf = NULL;
struct list_head *pos = NULL;
bool valid = false;
@ -394,7 +394,7 @@ bool npu_mem_verify_addr(struct npu_client *client, uint64_t addr)
void npu_mem_unmap(struct npu_client *client, int buf_hdl, uint64_t addr)
{
struct npu_device *npu_dev = client->npu_dev;
struct npu_ion_buf *ion_buf = 0;
struct npu_ion_buf *ion_buf = NULL;
/* clear entry and retrieve the corresponding buffer */
ion_buf = npu_get_npu_ion_buffer(client, buf_hdl);
@ -449,49 +449,3 @@ void subsystem_put_local(void *sub_system_handle)
return subsystem_put(sub_system_handle);
}
/* -------------------------------------------------------------------------
* Functions - Log
* -------------------------------------------------------------------------
*/
void npu_process_log_message(struct npu_device *npu_dev, uint32_t *message,
uint32_t size)
{
struct npu_debugfs_ctx *debugfs = &npu_dev->debugfs_ctx;
/* mutex log lock */
mutex_lock(&debugfs->log_lock);
if ((debugfs->log_num_bytes_buffered + size) >
debugfs->log_buf_size) {
/* No more space, invalidate it all and start over */
debugfs->log_read_index = 0;
debugfs->log_write_index = size;
debugfs->log_num_bytes_buffered = size;
memcpy(debugfs->log_buf, message, size);
} else {
if ((debugfs->log_write_index + size) >
debugfs->log_buf_size) {
/* Wrap around case */
uint8_t *src_addr = (uint8_t *)message;
uint8_t *dst_addr = 0;
uint32_t remaining_to_end = debugfs->log_buf_size -
debugfs->log_write_index + 1;
dst_addr = debugfs->log_buf + debugfs->log_write_index;
memcpy(dst_addr, src_addr, remaining_to_end);
src_addr = &(src_addr[remaining_to_end]);
dst_addr = debugfs->log_buf;
memcpy(dst_addr, src_addr, size-remaining_to_end);
debugfs->log_write_index = size-remaining_to_end;
} else {
memcpy((debugfs->log_buf + debugfs->log_write_index),
message, size);
debugfs->log_write_index += size;
if (debugfs->log_write_index == debugfs->log_buf_size)
debugfs->log_write_index = 0;
}
debugfs->log_num_bytes_buffered += size;
}
/* mutex log unlock */
mutex_unlock(&debugfs->log_lock);
}

View File

@ -90,7 +90,4 @@ void npu_disable_sys_cache(struct npu_device *npu_dev);
void *subsystem_get_local(char *sub_system);
void subsystem_put_local(void *sub_system_handle);
void npu_process_log_message(struct npu_device *npu_dev, uint32_t *msg,
uint32_t size);
#endif /* _NPU_HW_ACCESS_H*/

View File

@ -27,11 +27,6 @@
* Defines
* -------------------------------------------------------------------------
*/
#define LOG_MSG_HEADER_SIZE 20
#define LOG_MSG_START_MSG_INDEX 5
#define LOG_MSG_TOTAL_SIZE_INDEX 0
#define LOG_MSG_MSG_ID_INDEX 1
#define NPU_FW_TIMEOUT_POLL_INTERVAL_MS 10
#define NPU_FW_TIMEOUT_MS 1000
@ -54,9 +49,7 @@ static void free_network(struct npu_host_ctx *ctx, struct npu_client *client,
static int network_get(struct npu_network *network);
static int network_put(struct npu_network *network);
static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg);
static void log_msg_proc(struct npu_device *npu_dev, uint32_t *msg);
static void host_session_msg_hdlr(struct npu_device *npu_dev);
static void host_session_log_hdlr(struct npu_device *npu_dev);
static int host_error_hdlr(struct npu_device *npu_dev, bool force);
static int npu_send_network_cmd(struct npu_device *npu_dev,
struct npu_network *network, void *cmd_ptr);
@ -345,7 +338,6 @@ irqreturn_t npu_intr_thrd_hdler(int irq, void *ptr)
struct npu_device *npu_dev = (struct npu_device *)ptr;
if (!host_error_hdlr(npu_dev, false)) {
host_session_log_hdlr(npu_dev);
host_session_msg_hdlr(npu_dev);
}
return IRQ_HANDLED;
@ -762,6 +754,12 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
}
pr_debug("network id : %llu\n", network->id);
if (exe_rsp_pkt->header.size < sizeof(*exe_rsp_pkt)) {
pr_err("invalid packet header size, header.size: %d",
exe_rsp_pkt->header.size);
network_put(network);
break;
}
stats_size = exe_rsp_pkt->header.size - sizeof(*exe_rsp_pkt);
pr_debug("stats_size %d:%d\n", exe_rsp_pkt->header.size,
stats_size);
@ -976,52 +974,6 @@ skip_read_msg:
kfree(msg);
}
static void log_msg_proc(struct npu_device *npu_dev, uint32_t *msg)
{
uint32_t msg_id;
uint32_t *log_msg;
uint32_t size;
msg_id = msg[LOG_MSG_MSG_ID_INDEX];
size = msg[LOG_MSG_TOTAL_SIZE_INDEX] - LOG_MSG_HEADER_SIZE;
switch (msg_id) {
case NPU_IPC_MSG_EVENT_NOTIFY:
/* Process the message */
log_msg = &(msg[LOG_MSG_START_MSG_INDEX]);
npu_process_log_message(npu_dev, log_msg, size);
break;
default:
pr_err("unsupported log response received %d\n", msg_id);
break;
}
}
static void host_session_log_hdlr(struct npu_device *npu_dev)
{
uint32_t *msg;
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
msg = kzalloc(sizeof(uint32_t) * NPU_IPC_BUF_LENGTH, GFP_KERNEL);
if (!msg)
return;
mutex_lock(&host_ctx->lock);
if (host_ctx->fw_state == FW_DISABLED) {
pr_warn("handle npu session msg when FW is disabled\n");
goto skip_read_msg;
}
while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_LOG, msg) == 0) {
pr_debug("received from log queue\n");
log_msg_proc(npu_dev, msg);
}
skip_read_msg:
mutex_unlock(&host_ctx->lock);
kfree(msg);
}
/* -------------------------------------------------------------------------
* Function Definitions - Functionality
@ -2101,7 +2053,7 @@ int32_t npu_host_set_perf_mode(struct npu_client *client, uint32_t network_hdl,
ret = set_perf_mode(npu_dev);
if (ret)
pr_err("set_perf_mode failed");
pr_err("set_perf_mode failed\n");
if (network)
network_put(network);

View File

@ -116,12 +116,6 @@ struct npu_debugfs_ctx {
struct dentry *root;
uint32_t reg_off;
uint32_t reg_cnt;
uint8_t *log_buf;
struct mutex log_lock;
uint32_t log_num_bytes_buffered;
uint32_t log_read_index;
uint32_t log_write_index;
uint32_t log_buf_size;
};
struct npu_debugfs_reg_ctx {

View File

@ -20,12 +20,6 @@
#include "npu_hw_access.h"
#include "npu_common.h"
/* -------------------------------------------------------------------------
* Defines
* -------------------------------------------------------------------------
*/
#define NPU_LOG_BUF_SIZE 4096
/* -------------------------------------------------------------------------
* Function Prototypes
* -------------------------------------------------------------------------
@ -40,8 +34,6 @@ static ssize_t npu_debug_off_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos);
static ssize_t npu_debug_off_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos);
static ssize_t npu_debug_log_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos);
static ssize_t npu_debug_ctrl_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos);
@ -64,13 +56,6 @@ static const struct file_operations npu_off_fops = {
.write = npu_debug_off_write,
};
static const struct file_operations npu_log_fops = {
.open = npu_debug_open,
.release = npu_debug_release,
.read = npu_debug_log_read,
.write = NULL,
};
static const struct file_operations npu_ctrl_fops = {
.open = npu_debug_open,
.release = npu_debug_release,
@ -255,48 +240,6 @@ static ssize_t npu_debug_off_read(struct file *file,
return len;
}
/* -------------------------------------------------------------------------
* Function Implementations - DebugFS Log
* -------------------------------------------------------------------------
*/
static ssize_t npu_debug_log_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
size_t len = 0;
struct npu_device *npu_dev = file->private_data;
struct npu_debugfs_ctx *debugfs;
NPU_DBG("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
npu_dev = g_npu_dev;
debugfs = &npu_dev->debugfs_ctx;
/* mutex log lock */
mutex_lock(&debugfs->log_lock);
if (debugfs->log_num_bytes_buffered != 0) {
len = min(debugfs->log_num_bytes_buffered,
debugfs->log_buf_size - debugfs->log_read_index);
len = min(count, len);
if (copy_to_user(user_buf, (debugfs->log_buf +
debugfs->log_read_index), len)) {
NPU_ERR("failed to copy to user\n");
mutex_unlock(&debugfs->log_lock);
return -EFAULT;
}
debugfs->log_read_index += len;
if (debugfs->log_read_index == debugfs->log_buf_size)
debugfs->log_read_index = 0;
debugfs->log_num_bytes_buffered -= len;
*ppos += len;
}
/* mutex log unlock */
mutex_unlock(&debugfs->log_lock);
return len;
}
/* -------------------------------------------------------------------------
* Function Implementations - DebugFS Control
* -------------------------------------------------------------------------
@ -380,12 +323,6 @@ int npu_debugfs_init(struct npu_device *npu_dev)
goto err;
}
if (!debugfs_create_file("log", 0644, debugfs->root,
npu_dev, &npu_log_fops)) {
NPU_ERR("debugfs_create_file log fail\n");
goto err;
}
if (!debugfs_create_file("ctrl", 0644, debugfs->root,
npu_dev, &npu_ctrl_fops)) {
NPU_ERR("debugfs_create_file ctrl fail\n");
@ -428,15 +365,6 @@ int npu_debugfs_init(struct npu_device *npu_dev)
goto err;
}
debugfs->log_num_bytes_buffered = 0;
debugfs->log_read_index = 0;
debugfs->log_write_index = 0;
debugfs->log_buf_size = NPU_LOG_BUF_SIZE;
debugfs->log_buf = kzalloc(debugfs->log_buf_size, GFP_KERNEL);
if (!debugfs->log_buf)
goto err;
mutex_init(&debugfs->log_lock);
return 0;
err:
@ -448,12 +376,6 @@ void npu_debugfs_deinit(struct npu_device *npu_dev)
{
struct npu_debugfs_ctx *debugfs = &npu_dev->debugfs_ctx;
debugfs->log_num_bytes_buffered = 0;
debugfs->log_read_index = 0;
debugfs->log_write_index = 0;
debugfs->log_buf_size = 0;
kfree(debugfs->log_buf);
if (!IS_ERR_OR_NULL(debugfs->root)) {
debugfs_remove_recursive(debugfs->root);
debugfs->root = NULL;

View File

@ -38,15 +38,16 @@
struct npu_queue_tuple {
uint32_t size;
uint32_t hdr;
uint32_t start_offset;
};
static const struct npu_queue_tuple npu_q_setup[6] = {
{ 1024, IPC_QUEUE_CMD_HIGH_PRIORITY | TX_HDR_TYPE | RX_HDR_TYPE },
{ 4096, IPC_QUEUE_APPS_EXEC | TX_HDR_TYPE | RX_HDR_TYPE },
{ 4096, IPC_QUEUE_DSP_EXEC | TX_HDR_TYPE | RX_HDR_TYPE },
{ 4096, IPC_QUEUE_APPS_RSP | TX_HDR_TYPE | RX_HDR_TYPE },
{ 4096, IPC_QUEUE_DSP_RSP | TX_HDR_TYPE | RX_HDR_TYPE },
{ 1024, IPC_QUEUE_LOG | TX_HDR_TYPE | RX_HDR_TYPE },
static struct npu_queue_tuple npu_q_setup[6] = {
{ 1024, IPC_QUEUE_CMD_HIGH_PRIORITY | TX_HDR_TYPE | RX_HDR_TYPE, 0},
{ 4096, IPC_QUEUE_APPS_EXEC | TX_HDR_TYPE | RX_HDR_TYPE, 0},
{ 4096, IPC_QUEUE_DSP_EXEC | TX_HDR_TYPE | RX_HDR_TYPE, 0},
{ 4096, IPC_QUEUE_APPS_RSP | TX_HDR_TYPE | RX_HDR_TYPE, 0},
{ 4096, IPC_QUEUE_DSP_RSP | TX_HDR_TYPE | RX_HDR_TYPE, 0},
{ 1024, IPC_QUEUE_LOG | TX_HDR_TYPE | RX_HDR_TYPE, 0},
};
/* -------------------------------------------------------------------------
@ -116,6 +117,7 @@ static int npu_host_ipc_init_hfi(struct npu_device *npu_dev)
/* queue is active */
q_hdr->qhdr_status = 0x01;
q_hdr->qhdr_start_offset = cur_start_offset;
npu_q_setup[q_idx].start_offset = cur_start_offset;
q_size = npu_q_setup[q_idx].size;
q_hdr->qhdr_type = npu_q_setup[q_idx].hdr;
/* in bytes */
@ -216,6 +218,18 @@ static int ipc_queue_read(struct npu_device *npu_dev,
/* Read the queue */
MEMR(npu_dev, (void *)((size_t)offset), (uint8_t *)&queue,
HFI_QUEUE_HEADER_SIZE);
if (queue.qhdr_type != npu_q_setup[target_que].hdr ||
queue.qhdr_q_size != npu_q_setup[target_que].size ||
queue.qhdr_read_idx >= queue.qhdr_q_size ||
queue.qhdr_write_idx >= queue.qhdr_q_size ||
queue.qhdr_start_offset !=
npu_q_setup[target_que].start_offset) {
NPU_ERR("Invalid Queue header\n");
status = -EIO;
goto exit;
}
/* check if queue is empty */
if (queue.qhdr_read_idx == queue.qhdr_write_idx) {
/*
@ -313,6 +327,18 @@ static int ipc_queue_write(struct npu_device *npu_dev,
MEMR(npu_dev, (void *)((size_t)offset), (uint8_t *)&queue,
HFI_QUEUE_HEADER_SIZE);
if (queue.qhdr_type != npu_q_setup[target_que].hdr ||
queue.qhdr_q_size != npu_q_setup[target_que].size ||
queue.qhdr_read_idx >= queue.qhdr_q_size ||
queue.qhdr_write_idx >= queue.qhdr_q_size ||
queue.qhdr_start_offset !=
npu_q_setup[target_que].start_offset) {
NPU_ERR("Invalid Queue header\n");
status = -EIO;
goto exit;
}
packet_size = (*(uint32_t *)packet);
if (packet_size == 0) {
/* assign failed status and return */

View File

@ -437,50 +437,3 @@ void subsystem_put_local(void *sub_system_handle)
{
return subsystem_put(sub_system_handle);
}
/* -------------------------------------------------------------------------
* Functions - Log
* -------------------------------------------------------------------------
*/
void npu_process_log_message(struct npu_device *npu_dev, uint32_t *message,
uint32_t size)
{
struct npu_debugfs_ctx *debugfs = &npu_dev->debugfs_ctx;
/* mutex log lock */
mutex_lock(&debugfs->log_lock);
if ((debugfs->log_num_bytes_buffered + size) >
debugfs->log_buf_size) {
/* No more space, invalidate it all and start over */
debugfs->log_read_index = 0;
debugfs->log_write_index = size;
debugfs->log_num_bytes_buffered = size;
memcpy(debugfs->log_buf, message, size);
} else {
if ((debugfs->log_write_index + size) >
debugfs->log_buf_size) {
/* Wrap around case */
uint8_t *src_addr = (uint8_t *)message;
uint8_t *dst_addr = NULL;
uint32_t remaining_to_end = debugfs->log_buf_size -
debugfs->log_write_index + 1;
dst_addr = debugfs->log_buf + debugfs->log_write_index;
memcpy(dst_addr, src_addr, remaining_to_end);
src_addr = &(src_addr[remaining_to_end]);
dst_addr = debugfs->log_buf;
memcpy(dst_addr, src_addr, size-remaining_to_end);
debugfs->log_write_index = size-remaining_to_end;
} else {
memcpy((debugfs->log_buf + debugfs->log_write_index),
message, size);
debugfs->log_write_index += size;
if (debugfs->log_write_index == debugfs->log_buf_size)
debugfs->log_write_index = 0;
}
debugfs->log_num_bytes_buffered += size;
}
/* mutex log unlock */
mutex_unlock(&debugfs->log_lock);
}

View File

@ -98,7 +98,4 @@ void npu_disable_sys_cache(struct npu_device *npu_dev);
void *subsystem_get_local(char *sub_system);
void subsystem_put_local(void *sub_system_handle);
void npu_process_log_message(struct npu_device *npu_dev, uint32_t *msg,
uint32_t size);
#endif /* _NPU_HW_ACCESS_H*/

View File

@ -24,15 +24,6 @@
#include <soc/qcom/subsystem_restart.h>
#include <linux/reboot.h>
/* -------------------------------------------------------------------------
* Defines
* -------------------------------------------------------------------------
*/
#define LOG_MSG_HEADER_SIZE 20
#define LOG_MSG_START_MSG_INDEX 5
#define LOG_MSG_TOTAL_SIZE_INDEX 0
#define LOG_MSG_MSG_ID_INDEX 1
/* -------------------------------------------------------------------------
* File Scope Function Prototypes
* -------------------------------------------------------------------------
@ -58,9 +49,7 @@ static void free_network(struct npu_host_ctx *ctx, struct npu_client *client,
static int network_get(struct npu_network *network);
static int network_put(struct npu_network *network);
static int app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg);
static void log_msg_proc(struct npu_device *npu_dev, uint32_t *msg);
static void host_session_msg_hdlr(struct npu_device *npu_dev);
static void host_session_log_hdlr(struct npu_device *npu_dev);
static int host_error_hdlr(struct npu_device *npu_dev, bool force);
static int npu_send_network_cmd(struct npu_device *npu_dev,
struct npu_network *network, void *cmd_ptr,
@ -1156,7 +1145,6 @@ static void npu_ipc_irq_work(struct work_struct *work)
host_ctx = container_of(work, struct npu_host_ctx, ipc_irq_work);
npu_dev = container_of(host_ctx, struct npu_device, host_ctx);
host_session_log_hdlr(npu_dev);
host_session_msg_hdlr(npu_dev);
}
@ -1688,6 +1676,12 @@ static int app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
}
NPU_DBG("network id : %lld\n", network->id);
if (exe_rsp_pkt->header.size < sizeof(*exe_rsp_pkt)) {
NPU_ERR("invalid packet header size, header.size: %d",
exe_rsp_pkt->header.size);
network_put(network);
break;
}
stats_size = exe_rsp_pkt->header.size - sizeof(*exe_rsp_pkt);
NPU_DBG("stats_size %d:%d\n", exe_rsp_pkt->header.size,
stats_size);
@ -1929,47 +1923,6 @@ skip_read_msg:
mutex_unlock(&host_ctx->lock);
}
static void log_msg_proc(struct npu_device *npu_dev, uint32_t *msg)
{
uint32_t msg_id;
uint32_t *log_msg;
uint32_t size;
msg_id = msg[LOG_MSG_MSG_ID_INDEX];
size = msg[LOG_MSG_TOTAL_SIZE_INDEX] - LOG_MSG_HEADER_SIZE;
switch (msg_id) {
case NPU_IPC_MSG_EVENT_NOTIFY:
/* Process the message */
log_msg = &(msg[LOG_MSG_START_MSG_INDEX]);
npu_process_log_message(npu_dev, log_msg, size);
break;
default:
NPU_ERR("unsupported log response received %d\n", msg_id);
break;
}
}
static void host_session_log_hdlr(struct npu_device *npu_dev)
{
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
mutex_lock(&host_ctx->lock);
if (host_ctx->fw_state != FW_ENABLED) {
NPU_WARN("handle npu session msg when FW is disabled\n");
goto skip_read_msg;
}
while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_LOG,
host_ctx->ipc_msg_buf) == 0) {
NPU_DBG("received from log queue\n");
log_msg_proc(npu_dev, host_ctx->ipc_msg_buf);
}
skip_read_msg:
mutex_unlock(&host_ctx->lock);
}
/* -------------------------------------------------------------------------
* Function Definitions - Functionality
* -------------------------------------------------------------------------
@ -2367,7 +2320,10 @@ int32_t npu_host_get_fw_property(struct npu_device *npu_dev,
NPU_ERR("prop_id: %x\n", prop_from_fw->prop_id);
NPU_ERR("network_hdl: %x\n", prop_from_fw->network_hdl);
NPU_ERR("param_num: %x\n", prop_from_fw->num_of_params);
for (i = 0; i < prop_from_fw->num_of_params; i++)
num_of_params = min_t(uint32_t,
prop_from_fw->num_of_params,
(uint32_t)PROP_PARAM_MAX_SIZE);
for (i = 0; i < num_of_params; i++)
NPU_ERR("%x\n", prop_from_fw->prop_param[i]);
}

View File

@ -148,7 +148,6 @@ void handle_session_unregister_buffer_done(enum hal_command_response cmd,
break;
}
}
mutex_unlock(&inst->cvpbufs.lock);
if (!found) {
dprintk(VIDC_ERR, "%s: client_data %x not found\n",
__func__, response->data.unregbuf.client_data);
@ -170,12 +169,11 @@ void handle_session_unregister_buffer_done(enum hal_command_response cmd,
data[3] = cbuf->buf.offset;
v4l2_event_queue_fh(&inst->event_handler, &event);
mutex_lock(&inst->cvpbufs.lock);
list_del(&cbuf->list);
mutex_unlock(&inst->cvpbufs.lock);
kfree(cbuf);
cbuf = NULL;
exit:
mutex_unlock(&inst->cvpbufs.lock);
put_inst(inst);
}
@ -440,9 +438,9 @@ static int msm_cvp_unregister_buffer(struct msm_vidc_inst *inst,
break;
}
}
mutex_unlock(&inst->cvpbufs.lock);
if (!found) {
print_client_buffer(VIDC_ERR, "invalid", inst, buf);
mutex_unlock(&inst->cvpbufs.lock);
return -EINVAL;
}
@ -458,6 +456,7 @@ static int msm_cvp_unregister_buffer(struct msm_vidc_inst *inst,
if (rc)
print_cvp_buffer(VIDC_ERR, "unregister failed", inst, cbuf);
mutex_unlock(&inst->cvpbufs.lock);
return rc;
}

View File

@ -1864,7 +1864,7 @@ static void glink_bgcom_handle_rx_done(struct glink_bgcom *glink,
mutex_unlock(&channel->intent_lock);
}
static void glink_bgcom_process_cmd(struct glink_bgcom *glink, void *rx_data,
static int glink_bgcom_process_cmd(struct glink_bgcom *glink, void *rx_data,
u32 rx_size)
{
struct glink_bgcom_msg *msg;
@ -1873,12 +1873,19 @@ static void glink_bgcom_process_cmd(struct glink_bgcom *glink, void *rx_data,
unsigned int param3;
unsigned int param4;
unsigned int cmd;
int offset = 0;
int ret;
u32 offset = 0;
int ret = 0;
u16 name_len;
char *name;
while (offset < rx_size) {
if (rx_size - offset < sizeof(struct glink_bgcom_msg)) {
ret = -EBADMSG;
GLINK_ERR(glink, "%s: Error %d process cmd\n",
__func__, ret);
return ret;
}
msg = (struct glink_bgcom_msg *)(rx_data + offset);
offset += sizeof(*msg);
@ -1961,6 +1968,7 @@ static void glink_bgcom_process_cmd(struct glink_bgcom *glink, void *rx_data,
break;
}
}
return ret;
}
/**