msm: kgsl: remove unused l2pc qos

Change-Id: I5d10e07bab90bbd916b65022c66e1b3a7b20a42f
Signed-off-by: Yaroslav Furman <yaro330@gmail.com>
Signed-off-by: azrim <mirzaspc@gmail.com>
This commit is contained in:
Demon000 2020-04-23 11:09:16 +02:00 committed by azrim
parent 5d75b91d25
commit 1b9846f27f
No known key found for this signature in database
GPG Key ID: 497F8FB059B45D1C
6 changed files with 0 additions and 105 deletions

View File

@ -124,9 +124,6 @@ Optional Properties:
- qcom,pm-qos-wakeup-latency: - qcom,pm-qos-wakeup-latency:
Similar to the above. Driver votes against deep low Similar to the above. Driver votes against deep low
power modes right before GPU wakes up from sleep. power modes right before GPU wakes up from sleep.
- qcom,l2pc-cpu-mask-latency:
The CPU mask latency in microseconds to avoid L2PC
on masked CPUs.
- qcom,gpu-cx-ipeak: - qcom,gpu-cx-ipeak:
CX Ipeak is a mitigation scheme which throttles cDSP frequency CX Ipeak is a mitigation scheme which throttles cDSP frequency
@ -190,17 +187,6 @@ Optional Properties:
Based on the ubwc mode, program the appropriate bit into Based on the ubwc mode, program the appropriate bit into
certain protected registers and also pass to the user as certain protected registers and also pass to the user as
a property. a property.
- qcom,macrotiling-channels:
Specify the number of macrotiling channels for this chip.
This is programmed into certain registers and also pass to
the user as a property.
- qcom,l2pc-cpu-mask:
Disables L2PC on masked CPUs when any of Graphics
rendering thread is running on masked CPUs.
Bit 0 is for CPU-0, bit 1 is for CPU-1...
- qcom,l2pc-update-queue:
Disables L2PC on masked CPUs at queue time when it's true.
- qcom,snapshot-size: - qcom,snapshot-size:
Specify the size of snapshot in bytes. This will override Specify the size of snapshot in bytes. This will override

View File

@ -1006,11 +1006,6 @@ static int adreno_of_get_power(struct adreno_device *adreno_dev,
&device->pwrctrl.pm_qos_active_latency)) &device->pwrctrl.pm_qos_active_latency))
device->pwrctrl.pm_qos_active_latency = 501; device->pwrctrl.pm_qos_active_latency = 501;
/* get pm-qos-cpu-mask-latency, set it to default if not found */
if (of_property_read_u32(node, "qcom,l2pc-cpu-mask-latency",
&device->pwrctrl.pm_qos_cpu_mask_latency))
device->pwrctrl.pm_qos_cpu_mask_latency = 501;
/* get pm-qos-wakeup-latency, set it to default if not found */ /* get pm-qos-wakeup-latency, set it to default if not found */
if (of_property_read_u32(node, "qcom,pm-qos-wakeup-latency", if (of_property_read_u32(node, "qcom,pm-qos-wakeup-latency",
&device->pwrctrl.pm_qos_wakeup_latency)) &device->pwrctrl.pm_qos_wakeup_latency))
@ -1760,10 +1755,6 @@ static int _adreno_start(struct adreno_device *adreno_dev)
/* make sure ADRENO_DEVICE_STARTED is not set here */ /* make sure ADRENO_DEVICE_STARTED is not set here */
WARN_ON(test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv)); WARN_ON(test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv));
/* disallow l2pc during wake up to improve GPU wake up time */
kgsl_pwrctrl_update_l2pc(&adreno_dev->dev,
KGSL_L2PC_WAKEUP_TIMEOUT);
pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma, pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
pmqos_wakeup_vote); pmqos_wakeup_vote);

View File

@ -1447,10 +1447,6 @@ int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
spin_unlock(&drawctxt->lock); spin_unlock(&drawctxt->lock);
if (device->pwrctrl.l2pc_update_queue)
kgsl_pwrctrl_update_l2pc(&adreno_dev->dev,
KGSL_L2PC_QUEUE_TIMEOUT);
/* Add the context to the dispatcher pending list */ /* Add the context to the dispatcher pending list */
dispatcher_queue_context(adreno_dev, drawctxt); dispatcher_queue_context(adreno_dev, drawctxt);

View File

@ -4969,7 +4969,6 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
{ {
int status = -EINVAL; int status = -EINVAL;
struct resource *res; struct resource *res;
int cpu;
status = _register_device(device); status = _register_device(device);
if (status) if (status)
@ -5099,22 +5098,6 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
PM_QOS_CPU_DMA_LATENCY, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE); PM_QOS_DEFAULT_VALUE);
if (device->pwrctrl.l2pc_cpus_mask) {
struct pm_qos_request *qos = &device->pwrctrl.l2pc_cpus_qos;
qos->type = PM_QOS_REQ_AFFINE_CORES;
cpumask_empty(&qos->cpus_affine);
for_each_possible_cpu(cpu) {
if ((1 << cpu) & device->pwrctrl.l2pc_cpus_mask)
cpumask_set_cpu(cpu, &qos->cpus_affine);
}
pm_qos_add_request(&device->pwrctrl.l2pc_cpus_qos,
PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
}
device->events_wq = alloc_workqueue("kgsl-events", device->events_wq = alloc_workqueue("kgsl-events",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
@ -5151,8 +5134,6 @@ void kgsl_device_platform_remove(struct kgsl_device *device)
kgsl_pwrctrl_uninit_sysfs(device); kgsl_pwrctrl_uninit_sysfs(device);
pm_qos_remove_request(&device->pwrctrl.pm_qos_req_dma); pm_qos_remove_request(&device->pwrctrl.pm_qos_req_dma);
if (device->pwrctrl.l2pc_cpus_mask)
pm_qos_remove_request(&device->pwrctrl.l2pc_cpus_qos);
idr_destroy(&device->context_idr); idr_destroy(&device->context_idr);

View File

@ -572,35 +572,6 @@ void kgsl_pwrctrl_set_constraint(struct kgsl_device *device,
} }
EXPORT_SYMBOL(kgsl_pwrctrl_set_constraint); EXPORT_SYMBOL(kgsl_pwrctrl_set_constraint);
/**
* kgsl_pwrctrl_update_l2pc() - Update existing qos request
* @device: Pointer to the kgsl_device struct
* @timeout_us: the effective duration of qos request in usecs.
*
* Updates an existing qos request to avoid L2PC on the
* CPUs (which are selected through dtsi) on which GPU
* thread is running. This would help for performance.
*/
void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device,
unsigned long timeout_us)
{
int cpu;
if (device->pwrctrl.l2pc_cpus_mask == 0)
return;
cpu = get_cpu();
put_cpu();
if ((1 << cpu) & device->pwrctrl.l2pc_cpus_mask) {
pm_qos_update_request_timeout(
&device->pwrctrl.l2pc_cpus_qos,
device->pwrctrl.pm_qos_cpu_mask_latency,
timeout_us);
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_update_l2pc);
static ssize_t kgsl_pwrctrl_thermal_pwrlevel_store(struct device *dev, static ssize_t kgsl_pwrctrl_thermal_pwrlevel_store(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
@ -2229,13 +2200,6 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
pwr->power_flags = 0; pwr->power_flags = 0;
kgsl_property_read_u32(device, "qcom,l2pc-cpu-mask",
&pwr->l2pc_cpus_mask);
pwr->l2pc_update_queue = of_property_read_bool(
device->pdev->dev.of_node,
"qcom,l2pc-update-queue");
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);
ocmem_bus_node = of_find_node_by_name( ocmem_bus_node = of_find_node_by_name(
@ -2904,10 +2868,6 @@ _slumber(struct kgsl_device *device)
kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER); kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma, pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
PM_QOS_DEFAULT_VALUE); PM_QOS_DEFAULT_VALUE);
if (device->pwrctrl.l2pc_cpus_mask)
pm_qos_update_request(
&device->pwrctrl.l2pc_cpus_qos,
PM_QOS_DEFAULT_VALUE);
break; break;
case KGSL_STATE_SUSPEND: case KGSL_STATE_SUSPEND:
complete_all(&device->hwaccess_gate); complete_all(&device->hwaccess_gate);

View File

@ -57,19 +57,6 @@
#define KGSL_PWR_DEL_LIMIT 1 #define KGSL_PWR_DEL_LIMIT 1
#define KGSL_PWR_SET_LIMIT 2 #define KGSL_PWR_SET_LIMIT 2
/*
* The effective duration of qos request in usecs at queue time.
* After timeout, qos request is cancelled automatically.
* Kept 64ms default, inline with default GPU idle time.
*/
#define KGSL_L2PC_QUEUE_TIMEOUT (64 * 1000)
/*
* The effective duration of qos request in usecs at wakeup time.
* After timeout, qos request is cancelled automatically.
*/
#define KGSL_L2PC_WAKEUP_TIMEOUT (10 * 1000)
enum kgsl_pwrctrl_timer_type { enum kgsl_pwrctrl_timer_type {
KGSL_PWR_IDLE_TIMER, KGSL_PWR_IDLE_TIMER,
}; };
@ -149,9 +136,6 @@ struct kgsl_regulator {
* @ahbpath_pcl - CPU to AHB path bus scale identifier * @ahbpath_pcl - CPU to AHB path bus scale identifier
* @irq_name - resource name for the IRQ * @irq_name - resource name for the IRQ
* @clk_stats - structure of clock statistics * @clk_stats - structure of clock statistics
* @l2pc_cpus_mask - mask to avoid L2PC on masked CPUs
* @l2pc_update_queue - Boolean flag to avoid L2PC on masked CPUs at queue time
* @l2pc_cpus_qos - qos structure to avoid L2PC on CPUs
* @pm_qos_req_dma - the power management quality of service structure * @pm_qos_req_dma - the power management quality of service structure
* @pm_qos_active_latency - allowed CPU latency in microseconds when active * @pm_qos_active_latency - allowed CPU latency in microseconds when active
* @pm_qos_cpu_mask_latency - allowed CPU mask latency in microseconds * @pm_qos_cpu_mask_latency - allowed CPU mask latency in microseconds
@ -208,9 +192,6 @@ struct kgsl_pwrctrl {
uint32_t ahbpath_pcl; uint32_t ahbpath_pcl;
const char *irq_name; const char *irq_name;
struct kgsl_clk_stats clk_stats; struct kgsl_clk_stats clk_stats;
unsigned int l2pc_cpus_mask;
bool l2pc_update_queue;
struct pm_qos_request l2pc_cpus_qos;
struct pm_qos_request pm_qos_req_dma; struct pm_qos_request pm_qos_req_dma;
unsigned int pm_qos_active_latency; unsigned int pm_qos_active_latency;
unsigned int pm_qos_cpu_mask_latency; unsigned int pm_qos_cpu_mask_latency;