1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
|
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
// http://www.samsung.com/
//
// Exynos - CPU PMU(Power Management Unit) support
#include <linux/array_size.h>
#include <linux/arm-smccc.h>
#include <linux/bitmap.h>
#include <linux/cpuhotplug.h>
#include <linux/cpu_pm.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/mfd/core.h>
#include <linux/mfd/syscon.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
#include "exynos-pmu.h"
#define PMUALIVE_MASK GENMASK(13, 0)
#define TENSOR_SET_BITS (BIT(15) | BIT(14))
#define TENSOR_CLR_BITS BIT(15)
#define TENSOR_SMC_PMU_SEC_REG 0x82000504
#define TENSOR_PMUREG_READ 0
#define TENSOR_PMUREG_WRITE 1
#define TENSOR_PMUREG_RMW 2
struct exynos_pmu_context {
struct device *dev;
const struct exynos_pmu_data *pmu_data;
struct regmap *pmureg;
struct regmap *pmuintrgen;
/*
* Serialization lock for CPU hot plug and cpuidle ACPM hint
* programming. Also protects in_cpuhp, sys_insuspend & sys_inreboot
* flags.
*/
raw_spinlock_t cpupm_lock;
unsigned long *in_cpuhp;
bool sys_insuspend;
bool sys_inreboot;
};
void __iomem *pmu_base_addr;
static struct exynos_pmu_context *pmu_context;
/* forward declaration */
static struct platform_driver exynos_pmu_driver;
/*
* Tensor SoCs are configured so that PMU_ALIVE registers can only be written
* from EL3, but are still read accessible. As Linux needs to write some of
* these registers, the following functions are provided and exposed via
* regmap.
*
* Note: This SMC interface is known to be implemented on gs101 and derivative
* SoCs.
*/
/* Write to a protected PMU register. */
static int tensor_sec_reg_write(void *context, unsigned int reg,
unsigned int val)
{
struct arm_smccc_res res;
unsigned long pmu_base = (unsigned long)context;
arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res);
/* returns -EINVAL if access isn't allowed or 0 */
if (res.a0)
pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
return (int)res.a0;
}
/* Read/Modify/Write a protected PMU register. */
static int tensor_sec_reg_rmw(void *context, unsigned int reg,
unsigned int mask, unsigned int val)
{
struct arm_smccc_res res;
unsigned long pmu_base = (unsigned long)context;
arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res);
/* returns -EINVAL if access isn't allowed or 0 */
if (res.a0)
pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
return (int)res.a0;
}
/*
* Read a protected PMU register. All PMU registers can be read by Linux.
* Note: The SMC read register is not used, as only registers that can be
* written are readable via SMC.
*/
static int tensor_sec_reg_read(void *context, unsigned int reg,
unsigned int *val)
{
*val = pmu_raw_readl(reg);
return 0;
}
/*
* For SoCs that have set/clear bit hardware this function can be used when
* the PMU register will be accessed by multiple masters.
*
* For example, to set bits 13:8 in PMU reg offset 0x3e80
* tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00);
*
* Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80
* tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00);
*/
static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val,
u32 mask)
{
int ret;
unsigned int i;
for (i = 0; i < 32; i++) {
if (!(mask & BIT(i)))
continue;
offset &= ~TENSOR_SET_BITS;
if (val & BIT(i))
offset |= TENSOR_SET_BITS;
else
offset |= TENSOR_CLR_BITS;
ret = tensor_sec_reg_write(ctx, offset, i);
if (ret)
return ret;
}
return 0;
}
static bool tensor_is_atomic(unsigned int reg)
{
/*
* Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF)
* as the target registers can be accessed by multiple masters. SFRs
* that don't support atomic are added to the switch statement below.
*/
if (reg > PMUALIVE_MASK)
return false;
switch (reg) {
case GS101_SYSIP_DAT0:
case GS101_SYSTEM_CONFIGURATION:
return false;
default:
return true;
}
}
static int tensor_sec_update_bits(void *ctx, unsigned int reg,
unsigned int mask, unsigned int val)
{
if (!tensor_is_atomic(reg))
return tensor_sec_reg_rmw(ctx, reg, mask, val);
return tensor_set_bits_atomic(ctx, reg, val, mask);
}
void pmu_raw_writel(u32 val, u32 offset)
{
writel_relaxed(val, pmu_base_addr + offset);
}
u32 pmu_raw_readl(u32 offset)
{
return readl_relaxed(pmu_base_addr + offset);
}
void exynos_sys_powerdown_conf(enum sys_powerdown mode)
{
unsigned int i;
const struct exynos_pmu_data *pmu_data;
if (!pmu_context || !pmu_context->pmu_data)
return;
pmu_data = pmu_context->pmu_data;
if (pmu_data->powerdown_conf)
pmu_data->powerdown_conf(mode);
if (pmu_data->pmu_config) {
for (i = 0; (pmu_data->pmu_config[i].offset != PMU_TABLE_END); i++)
pmu_raw_writel(pmu_data->pmu_config[i].val[mode],
pmu_data->pmu_config[i].offset);
}
if (pmu_data->powerdown_conf_extra)
pmu_data->powerdown_conf_extra(mode);
if (pmu_data->pmu_config_extra) {
for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++)
pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode],
pmu_data->pmu_config_extra[i].offset);
}
}
/*
* Split the data between ARM architectures because it is relatively big
* and useless on other arch.
*/
#ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS
#define exynos_pmu_data_arm_ptr(data) (&data)
#else
#define exynos_pmu_data_arm_ptr(data) NULL
#endif
static const struct regmap_config regmap_smccfg = {
.name = "pmu_regs",
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.fast_io = true,
.use_single_read = true,
.use_single_write = true,
.reg_read = tensor_sec_reg_read,
.reg_write = tensor_sec_reg_write,
.reg_update_bits = tensor_sec_update_bits,
.use_raw_spinlock = true,
};
static const struct regmap_config regmap_pmu_intr = {
.name = "pmu_intr_gen",
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.use_raw_spinlock = true,
};
static const struct exynos_pmu_data gs101_pmu_data = {
.pmu_secure = true,
.pmu_cpuhp = true,
};
/*
* PMU platform driver and devicetree bindings.
*/
static const struct of_device_id exynos_pmu_of_device_ids[] = {
{
.compatible = "google,gs101-pmu",
.data = &gs101_pmu_data,
}, {
.compatible = "samsung,exynos3250-pmu",
.data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data),
}, {
.compatible = "samsung,exynos4210-pmu",
.data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data),
}, {
.compatible = "samsung,exynos4212-pmu",
.data = exynos_pmu_data_arm_ptr(exynos4212_pmu_data),
}, {
.compatible = "samsung,exynos4412-pmu",
.data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data),
}, {
.compatible = "samsung,exynos5250-pmu",
.data = exynos_pmu_data_arm_ptr(exynos5250_pmu_data),
}, {
.compatible = "samsung,exynos5410-pmu",
}, {
.compatible = "samsung,exynos5420-pmu",
.data = exynos_pmu_data_arm_ptr(exynos5420_pmu_data),
}, {
.compatible = "samsung,exynos5433-pmu",
}, {
.compatible = "samsung,exynos7-pmu",
}, {
.compatible = "samsung,exynos850-pmu",
},
{ /*sentinel*/ },
};
static const struct mfd_cell exynos_pmu_devs[] = {
{ .name = "exynos-clkout", },
};
/**
* exynos_get_pmu_regmap() - Obtain pmureg regmap
*
* Find the pmureg regmap previously configured in probe() and return regmap
* pointer.
*
* Return: A pointer to regmap if found or ERR_PTR error value.
*/
struct regmap *exynos_get_pmu_regmap(void)
{
struct device_node *np = of_find_matching_node(NULL,
exynos_pmu_of_device_ids);
if (np)
return exynos_get_pmu_regmap_by_phandle(np, NULL);
return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
/**
* exynos_get_pmu_regmap_by_phandle() - Obtain pmureg regmap via phandle
* @np: Device node holding PMU phandle property
* @propname: Name of property holding phandle value
*
* Find the pmureg regmap previously configured in probe() and return regmap
* pointer.
*
* Return: A pointer to regmap if found or ERR_PTR error value.
*/
struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
const char *propname)
{
struct device_node *pmu_np;
struct device *dev;
if (propname)
pmu_np = of_parse_phandle(np, propname, 0);
else
pmu_np = np;
if (!pmu_np)
return ERR_PTR(-ENODEV);
/*
* Determine if exynos-pmu device has probed and therefore regmap
* has been created and can be returned to the caller. Otherwise we
* return -EPROBE_DEFER.
*/
dev = driver_find_device_by_of_node(&exynos_pmu_driver.driver,
(void *)pmu_np);
if (propname)
of_node_put(pmu_np);
if (!dev)
return ERR_PTR(-EPROBE_DEFER);
return syscon_node_to_regmap(pmu_np);
}
EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
/*
* CPU_INFORM register "hint" values are required to be programmed in addition to
* the standard PSCI calls to have functional CPU hotplug and CPU idle states.
* This is required to workaround limitations in the el3mon/ACPM firmware.
*/
#define CPU_INFORM_CLEAR 0
#define CPU_INFORM_C2 1
/*
* __gs101_cpu_pmu_ prefix functions are common code shared by CPU PM notifiers
* (CPUIdle) and CPU hotplug callbacks. Functions should be called with IRQs
* disabled and cpupm_lock held.
*/
static int __gs101_cpu_pmu_online(unsigned int cpu)
{
unsigned int cpuhint = smp_processor_id();
u32 reg, mask;
/* clear cpu inform hint */
regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
CPU_INFORM_CLEAR);
mask = BIT(cpu);
regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE,
mask, (0 << cpu));
regmap_read(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_UPEND, ®);
regmap_write(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_CLEAR,
reg & mask);
return 0;
}
/* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */
static int gs101_cpu_pmu_online(void)
{
int cpu;
raw_spin_lock(&pmu_context->cpupm_lock);
if (pmu_context->sys_inreboot) {
raw_spin_unlock(&pmu_context->cpupm_lock);
return NOTIFY_OK;
}
cpu = smp_processor_id();
__gs101_cpu_pmu_online(cpu);
raw_spin_unlock(&pmu_context->cpupm_lock);
return NOTIFY_OK;
}
/* Called from CPU hot plug callback with IRQs enabled */
static int gs101_cpuhp_pmu_online(unsigned int cpu)
{
unsigned long flags;
raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
__gs101_cpu_pmu_online(cpu);
/*
* Mark this CPU as having finished the hotplug.
* This means this CPU can now enter C2 idle state.
*/
clear_bit(cpu, pmu_context->in_cpuhp);
raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
return 0;
}
/* Common function shared by both CPU hot plug and CPUIdle */
static int __gs101_cpu_pmu_offline(unsigned int cpu)
{
unsigned int cpuhint = smp_processor_id();
u32 reg, mask;
/* set cpu inform hint */
regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
CPU_INFORM_C2);
mask = BIT(cpu);
regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE,
mask, BIT(cpu));
regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, ®);
regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
reg & mask);
mask = (BIT(cpu + 8));
regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, ®);
regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
reg & mask);
return 0;
}
/* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */
static int gs101_cpu_pmu_offline(void)
{
int cpu;
raw_spin_lock(&pmu_context->cpupm_lock);
cpu = smp_processor_id();
if (test_bit(cpu, pmu_context->in_cpuhp)) {
raw_spin_unlock(&pmu_context->cpupm_lock);
return NOTIFY_BAD;
}
/* Ignore CPU_PM_ENTER event in reboot or suspend sequence. */
if (pmu_context->sys_insuspend || pmu_context->sys_inreboot) {
raw_spin_unlock(&pmu_context->cpupm_lock);
return NOTIFY_OK;
}
__gs101_cpu_pmu_offline(cpu);
raw_spin_unlock(&pmu_context->cpupm_lock);
return NOTIFY_OK;
}
/* Called from CPU hot plug callback with IRQs enabled */
static int gs101_cpuhp_pmu_offline(unsigned int cpu)
{
unsigned long flags;
raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
/*
* Mark this CPU as entering hotplug. So as not to confuse
* ACPM the CPU entering hotplug should not enter C2 idle state.
*/
set_bit(cpu, pmu_context->in_cpuhp);
__gs101_cpu_pmu_offline(cpu);
raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
return 0;
}
static int gs101_cpu_pm_notify_callback(struct notifier_block *self,
unsigned long action, void *v)
{
switch (action) {
case CPU_PM_ENTER:
return gs101_cpu_pmu_offline();
case CPU_PM_EXIT:
return gs101_cpu_pmu_online();
}
return NOTIFY_OK;
}
static struct notifier_block gs101_cpu_pm_notifier = {
.notifier_call = gs101_cpu_pm_notify_callback,
/*
* We want to be called first, as the ACPM hint and handshake is what
* puts the CPU into C2.
*/
.priority = INT_MAX
};
static int exynos_cpupm_reboot_notifier(struct notifier_block *nb,
unsigned long event, void *v)
{
unsigned long flags;
switch (event) {
case SYS_POWER_OFF:
case SYS_RESTART:
raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
pmu_context->sys_inreboot = true;
raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
break;
}
return NOTIFY_OK;
}
static struct notifier_block exynos_cpupm_reboot_nb = {
.priority = INT_MAX,
.notifier_call = exynos_cpupm_reboot_notifier,
};
static int setup_cpuhp_and_cpuidle(struct device *dev)
{
struct device_node *intr_gen_node;
struct resource intrgen_res;
void __iomem *virt_addr;
int ret, cpu;
intr_gen_node = of_parse_phandle(dev->of_node,
"google,pmu-intr-gen-syscon", 0);
if (!intr_gen_node) {
/*
* To maintain support for older DTs that didn't specify syscon
* phandle just issue a warning rather than fail to probe.
*/
dev_warn(dev, "pmu-intr-gen syscon unavailable\n");
return 0;
}
/*
* To avoid lockdep issues (CPU PM notifiers use raw spinlocks) create
* a mmio regmap for pmu-intr-gen that uses raw spinlocks instead of
* syscon provided regmap.
*/
ret = of_address_to_resource(intr_gen_node, 0, &intrgen_res);
of_node_put(intr_gen_node);
virt_addr = devm_ioremap(dev, intrgen_res.start,
resource_size(&intrgen_res));
if (!virt_addr)
return -ENOMEM;
pmu_context->pmuintrgen = devm_regmap_init_mmio(dev, virt_addr,
®map_pmu_intr);
if (IS_ERR(pmu_context->pmuintrgen)) {
dev_err(dev, "failed to initialize pmu-intr-gen regmap\n");
return PTR_ERR(pmu_context->pmuintrgen);
}
/* register custom mmio regmap with syscon */
ret = of_syscon_register_regmap(intr_gen_node,
pmu_context->pmuintrgen);
if (ret)
return ret;
pmu_context->in_cpuhp = devm_bitmap_zalloc(dev, num_possible_cpus(),
GFP_KERNEL);
if (!pmu_context->in_cpuhp)
return -ENOMEM;
raw_spin_lock_init(&pmu_context->cpupm_lock);
pmu_context->sys_inreboot = false;
pmu_context->sys_insuspend = false;
/* set PMU to power on */
for_each_online_cpu(cpu)
gs101_cpuhp_pmu_online(cpu);
/* register CPU hotplug callbacks */
cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "soc/exynos-pmu:prepare",
gs101_cpuhp_pmu_online, NULL);
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/exynos-pmu:online",
NULL, gs101_cpuhp_pmu_offline);
/* register CPU PM notifiers for cpuidle */
cpu_pm_register_notifier(&gs101_cpu_pm_notifier);
register_reboot_notifier(&exynos_cpupm_reboot_nb);
return 0;
}
static int exynos_pmu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct regmap_config pmu_regmcfg;
struct regmap *regmap;
struct resource *res;
int ret;
pmu_base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmu_base_addr))
return PTR_ERR(pmu_base_addr);
pmu_context = devm_kzalloc(&pdev->dev,
sizeof(struct exynos_pmu_context),
GFP_KERNEL);
if (!pmu_context)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
pmu_context->pmu_data = of_device_get_match_data(dev);
/* For SoCs that secure PMU register writes use custom regmap */
if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_secure) {
pmu_regmcfg = regmap_smccfg;
pmu_regmcfg.max_register = resource_size(res) -
pmu_regmcfg.reg_stride;
/* Need physical address for SMC call */
regmap = devm_regmap_init(dev, NULL,
(void *)(uintptr_t)res->start,
&pmu_regmcfg);
if (IS_ERR(regmap))
return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
"regmap init failed\n");
ret = of_syscon_register_regmap(dev->of_node, regmap);
if (ret)
return ret;
} else {
/* let syscon create mmio regmap */
regmap = syscon_node_to_regmap(dev->of_node);
if (IS_ERR(regmap))
return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
"syscon_node_to_regmap failed\n");
}
pmu_context->pmureg = regmap;
pmu_context->dev = dev;
if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_cpuhp) {
ret = setup_cpuhp_and_cpuidle(dev);
if (ret)
return ret;
}
if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init)
pmu_context->pmu_data->pmu_init();
platform_set_drvdata(pdev, pmu_context);
ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, exynos_pmu_devs,
ARRAY_SIZE(exynos_pmu_devs), NULL, 0, NULL);
if (ret)
return ret;
if (devm_of_platform_populate(dev))
dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n");
dev_dbg(dev, "Exynos PMU Driver probe done\n");
return 0;
}
static int exynos_cpupm_suspend_noirq(struct device *dev)
{
raw_spin_lock(&pmu_context->cpupm_lock);
pmu_context->sys_insuspend = true;
raw_spin_unlock(&pmu_context->cpupm_lock);
return 0;
}
static int exynos_cpupm_resume_noirq(struct device *dev)
{
raw_spin_lock(&pmu_context->cpupm_lock);
pmu_context->sys_insuspend = false;
raw_spin_unlock(&pmu_context->cpupm_lock);
return 0;
}
static const struct dev_pm_ops cpupm_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_cpupm_suspend_noirq,
exynos_cpupm_resume_noirq)
};
static struct platform_driver exynos_pmu_driver = {
.driver = {
.name = "exynos-pmu",
.of_match_table = exynos_pmu_of_device_ids,
.pm = pm_sleep_ptr(&cpupm_pm_ops),
},
.probe = exynos_pmu_probe,
};
static int __init exynos_pmu_init(void)
{
return platform_driver_register(&exynos_pmu_driver);
}
postcore_initcall(exynos_pmu_init);
|