1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
|
/*
* This file contains low level CPU setup functions.
* Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
_GLOBAL(__cpu_preinit_ppc970)
/* Do nothing if not running in HV mode */
mfmsr r0
rldicl. r0,r0,4,63
beqlr
/* Make sure HID4:rm_ci is off before MMU is turned off, that large
* pages are enabled with HID4:61 and clear HID5:DCBZ_size and
* HID5:DCBZ32_ill
*/
li r0,0
mfspr r3,SPRN_HID4
rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */
rldimi r3,r0,2,61 /* clear bit 61 (lg_pg_en) */
sync
mtspr SPRN_HID4,r3
isync
sync
mfspr r3,SPRN_HID5
rldimi r3,r0,6,56 /* clear bits 56 & 57 (DCBZ*) */
sync
mtspr SPRN_HID5,r3
isync
sync
/* Setup some basic HID1 features */
mfspr r0,SPRN_HID1
li r3,0x1200 /* enable i-fetch cacheability */
sldi r3,r3,44 /* and prefetch */
or r0,r0,r3
mtspr SPRN_HID1,r0
mtspr SPRN_HID1,r0
isync
/* Clear HIOR */
li r0,0
sync
mtspr SPRN_HIOR,0 /* Clear interrupt prefix */
isync
blr
/* Definitions for the table use to save CPU states */
#define CS_HID0 0
#define CS_HID1 8
#define CS_HID4 16
#define CS_HID5 24
#define CS_SIZE 32
.data
.balign L1_CACHE_BYTES,0
cpu_state_storage:
.space CS_SIZE
.balign L1_CACHE_BYTES,0
.text
_GLOBAL(__setup_cpu_ppc970)
/* Do nothing if not running in HV mode */
mfmsr r0
rldicl. r0,r0,4,63
beqlr
mfspr r0,SPRN_HID0
li r11,5 /* clear DOZE and SLEEP */
rldimi r0,r11,52,8 /* set NAP and DPM */
mtspr SPRN_HID0,r0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
sync
isync
/* Save away cpu state */
LOAD_REG_IMMEDIATE(r5,cpu_state_storage)
/* Save HID0,1,4 and 5 */
mfspr r3,SPRN_HID0
std r3,CS_HID0(r5)
mfspr r3,SPRN_HID1
std r3,CS_HID1(r5)
mfspr r3,SPRN_HID4
std r3,CS_HID4(r5)
mfspr r3,SPRN_HID5
std r3,CS_HID5(r5)
blr
/* Called with no MMU context (typically MSR:IR/DR off) to
* restore CPU state as backed up by the previous
* function. This does not include cache setting
*/
_GLOBAL(__restore_cpu_ppc970)
/* Do nothing if not running in HV mode */
mfmsr r0
rldicl. r0,r0,4,63
beqlr
LOAD_REG_IMMEDIATE(r5,cpu_state_storage)
/* Before accessing memory, we make sure rm_ci is clear */
li r0,0
mfspr r3,SPRN_HID4
rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */
sync
mtspr SPRN_HID4,r3
isync
sync
/* Clear interrupt prefix */
li r0,0
sync
mtspr SPRN_HIOR,0
isync
/* Restore HID0 */
ld r3,CS_HID0(r5)
sync
isync
mtspr SPRN_HID0,r3
mfspr r3,SPRN_HID0
mfspr r3,SPRN_HID0
mfspr r3,SPRN_HID0
mfspr r3,SPRN_HID0
mfspr r3,SPRN_HID0
mfspr r3,SPRN_HID0
sync
isync
/* Restore HID1 */
ld r3,CS_HID1(r5)
sync
isync
mtspr SPRN_HID1,r3
mtspr SPRN_HID1,r3
sync
isync
/* Restore HID4 */
ld r3,CS_HID4(r5)
sync
isync
mtspr SPRN_HID4,r3
sync
isync
/* Restore HID5 */
ld r3,CS_HID5(r5)
sync
isync
mtspr SPRN_HID5,r3
sync
isync
blr
|