|
26 | 26 | #endif |
27 | 27 |
|
28 | 28 | #include <linux/clk.h> |
| 29 | +#include <linux/cpufreq.h> |
29 | 30 | #include <linux/devfreq.h> |
30 | 31 | #ifdef CONFIG_DEVFREQ_THERMAL |
31 | 32 | #include <linux/devfreq_cooling.h> |
|
46 | 47 | #define dev_pm_opp_find_freq_ceil opp_find_freq_ceil |
47 | 48 | #endif /* Linux >= 3.13 */ |
48 | 49 |
|
| 50 | +#define MAX_CLUSTERS 2 |
| 51 | + |
| 52 | +static struct cpumask allowed_cpus[MAX_CLUSTERS]; |
| 53 | +static unsigned int cpu_max_freq[MAX_CLUSTERS] = {UINT_MAX, UINT_MAX}; |
| 54 | +static unsigned int cpu_clipped_freq[MAX_CLUSTERS] = {UINT_MAX, UINT_MAX}; |
49 | 55 |
|
50 | 56 | static int |
51 | 57 | kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags) |
@@ -215,6 +221,63 @@ static void kbase_devfreq_exit(struct device *dev) |
215 | 221 | kbase_devfreq_term_freq_table(kbdev); |
216 | 222 | } |
217 | 223 |
|
| 224 | +static int kbase_devfreq_trans_notifier(struct notifier_block *nb, |
| 225 | + unsigned long val, void *data) |
| 226 | +{ |
| 227 | + struct kbase_device *kbdev = container_of(nb, struct kbase_device, |
| 228 | + gpu_trans_nb); |
| 229 | + struct devfreq_freqs *freqs = data; |
| 230 | + unsigned int new_rate = (unsigned int)(freqs->new / 1000); |
| 231 | + int i, cpu; |
| 232 | + |
| 233 | + if (!kbdev) |
| 234 | + goto out; |
| 235 | + |
| 236 | + dev_dbg(kbdev->dev, "%lu-->%lu cpu limit=%u, gpu limit=%u\n", |
| 237 | + freqs->old, freqs->new, |
| 238 | + kbdev->cpu_limit_freq, |
| 239 | + kbdev->gpu_limit_freq); |
| 240 | + |
| 241 | + if (val == DEVFREQ_PRECHANGE && |
| 242 | + new_rate >= kbdev->gpu_limit_freq) { |
| 243 | + for (i = 0; i < MAX_CLUSTERS; i++) { |
| 244 | + if (cpu_max_freq[i] > kbdev->cpu_limit_freq) { |
| 245 | + /* change policy->max right now */ |
| 246 | + cpu_clipped_freq[i] = kbdev->cpu_limit_freq; |
| 247 | + if (cpumask_empty(&allowed_cpus[i])) |
| 248 | + goto out; |
| 249 | + cpu = cpumask_any_and(&allowed_cpus[i], |
| 250 | + cpu_online_mask); |
| 251 | + if (cpu >= nr_cpu_ids) |
| 252 | + goto out; |
| 253 | + cpufreq_update_policy(cpu); |
| 254 | + } else { |
| 255 | + /* avoid someone changing policy->max */ |
| 256 | + cpu_clipped_freq[i] = kbdev->cpu_limit_freq; |
| 257 | + } |
| 258 | + } |
| 259 | + } else if (val == DEVFREQ_POSTCHANGE && |
| 260 | + new_rate < kbdev->gpu_limit_freq) { |
| 261 | + for (i = 0; i < MAX_CLUSTERS; i++) { |
| 262 | + if (cpu_clipped_freq[i] != UINT_MAX) { |
| 263 | + /* recover policy->max right now */ |
| 264 | + cpu_clipped_freq[i] = UINT_MAX; |
| 265 | + if (cpumask_empty(&allowed_cpus[i])) |
| 266 | + goto out; |
| 267 | + cpu = cpumask_any_and(&allowed_cpus[i], |
| 268 | + cpu_online_mask); |
| 269 | + if (cpu >= nr_cpu_ids) |
| 270 | + goto out; |
| 271 | + cpufreq_update_policy(cpu); |
| 272 | + } |
| 273 | + } |
| 274 | + } |
| 275 | + |
| 276 | +out: |
| 277 | + |
| 278 | + return NOTIFY_OK; |
| 279 | +} |
| 280 | + |
218 | 281 | int kbase_devfreq_init(struct kbase_device *kbdev) |
219 | 282 | { |
220 | 283 | struct devfreq_dev_profile *dp; |
@@ -257,6 +320,23 @@ int kbase_devfreq_init(struct kbase_device *kbdev) |
257 | 320 | goto opp_notifier_failed; |
258 | 321 | } |
259 | 322 |
|
| 323 | + if (of_property_read_u32(kbdev->dev->of_node, "cpu-limit-freq", |
| 324 | + &kbdev->cpu_limit_freq)) { |
| 325 | + dev_err(kbdev->dev, "Failed to get prop cpu-limit-freq\n"); |
| 326 | + kbdev->cpu_limit_freq = UINT_MAX; |
| 327 | + } |
| 328 | + if (of_property_read_u32(kbdev->dev->of_node, "gpu-limit-freq", |
| 329 | + &kbdev->gpu_limit_freq)) { |
| 330 | + dev_err(kbdev->dev, "Failed to get prop gpu-limit-freq\n"); |
| 331 | + kbdev->gpu_limit_freq = UINT_MAX; |
| 332 | + } |
| 333 | + |
| 334 | + kbdev->gpu_trans_nb.notifier_call = kbase_devfreq_trans_notifier; |
| 335 | + err = devfreq_register_notifier(kbdev->devfreq, &kbdev->gpu_trans_nb, |
| 336 | + DEVFREQ_TRANSITION_NOTIFIER); |
| 337 | + if (err) |
| 338 | + dev_err(kbdev->dev, "register gpu trans notifier (%d)\n", err); |
| 339 | + |
260 | 340 | #ifdef CONFIG_DEVFREQ_THERMAL |
261 | 341 | err = kbase_power_model_simple_init(kbdev); |
262 | 342 | if (err && err != -ENODEV && err != -EPROBE_DEFER) { |
@@ -319,3 +399,57 @@ void kbase_devfreq_term(struct kbase_device *kbdev) |
319 | 399 | else |
320 | 400 | kbdev->devfreq = NULL; |
321 | 401 | } |
| 402 | + |
| 403 | +static int kbase_cpufreq_policy_notifier(struct notifier_block *nb, |
| 404 | + unsigned long val, void *data) |
| 405 | +{ |
| 406 | + struct cpufreq_policy *policy = data; |
| 407 | + int i; |
| 408 | + |
| 409 | + if (val == CPUFREQ_START) { |
| 410 | + for (i = 0; i < MAX_CLUSTERS; i++) { |
| 411 | + if (cpumask_test_cpu(policy->cpu, |
| 412 | + &allowed_cpus[i])) |
| 413 | + break; |
| 414 | + if (cpumask_empty(&allowed_cpus[i])) { |
| 415 | + cpumask_copy(&allowed_cpus[i], |
| 416 | + policy->related_cpus); |
| 417 | + break; |
| 418 | + } |
| 419 | + } |
| 420 | + goto out; |
| 421 | + } |
| 422 | + |
| 423 | + if (val != CPUFREQ_ADJUST) |
| 424 | + goto out; |
| 425 | + |
| 426 | + for (i = 0; i < MAX_CLUSTERS; i++) { |
| 427 | + if (cpumask_test_cpu(policy->cpu, &allowed_cpus[i])) |
| 428 | + break; |
| 429 | + } |
| 430 | + if (i == MAX_CLUSTERS) |
| 431 | + goto out; |
| 432 | + |
| 433 | + if (policy->max > cpu_clipped_freq[i]) |
| 434 | + cpufreq_verify_within_limits(policy, 0, cpu_clipped_freq[i]); |
| 435 | + |
| 436 | + cpu_max_freq[i] = policy->max; |
| 437 | + pr_debug("cluster%d max=%u, gpu limit=%u\n", i, cpu_max_freq[i], |
| 438 | + cpu_clipped_freq[i]); |
| 439 | + |
| 440 | +out: |
| 441 | + |
| 442 | + return NOTIFY_OK; |
| 443 | +} |
| 444 | + |
| 445 | +static struct notifier_block notifier_policy_block = { |
| 446 | + .notifier_call = kbase_cpufreq_policy_notifier |
| 447 | +}; |
| 448 | + |
| 449 | +static int __init kbase_cpufreq_init(void) |
| 450 | +{ |
| 451 | + return cpufreq_register_notifier(¬ifier_policy_block, |
| 452 | + CPUFREQ_POLICY_NOTIFIER); |
| 453 | +} |
| 454 | + |
| 455 | +subsys_initcall(kbase_cpufreq_init); |
0 commit comments