Line 0
Link Here
|
|
|
1 |
// SPDX-License-Identifier: GPL-2.0-only |
2 |
// Copyright (C) 2022 Linutronix GmbH, John Ogness |
3 |
// Copyright (C) 2022 Intel, Thomas Gleixner |
4 |
|
5 |
#include <linux/kernel.h> |
6 |
#include <linux/console.h> |
7 |
#include <linux/delay.h> |
8 |
#include <linux/kthread.h> |
9 |
#include <linux/slab.h> |
10 |
#include <linux/serial_core.h> |
11 |
#include <linux/syscore_ops.h> |
12 |
#include "printk_ringbuffer.h" |
13 |
#include "internal.h" |
14 |
/* |
15 |
* Printk console printing implementation for consoles which does not depend |
16 |
* on the legacy style console_lock mechanism. |
17 |
* |
18 |
* The state of the console is maintained in the "nbcon_state" atomic |
19 |
* variable. |
20 |
* |
21 |
* The console is locked when: |
22 |
* |
23 |
* - The 'prio' field contains the priority of the context that owns the |
24 |
* console. Only higher priority contexts are allowed to take over the |
25 |
* lock. A value of 0 (NBCON_PRIO_NONE) means the console is not locked. |
26 |
* |
27 |
* - The 'cpu' field denotes on which CPU the console is locked. It is used |
28 |
* to prevent busy waiting on the same CPU. Also it informs the lock owner |
29 |
* that it has lost the lock in a more complex scenario when the lock was |
30 |
* taken over by a higher priority context, released, and taken on another |
31 |
* CPU with the same priority as the interrupted owner. |
32 |
* |
33 |
* The acquire mechanism uses a few more fields: |
34 |
* |
35 |
* - The 'req_prio' field is used by the handover approach to make the |
36 |
* current owner aware that there is a context with a higher priority |
37 |
* waiting for the friendly handover. |
38 |
* |
39 |
* - The 'unsafe' field allows to take over the console in a safe way in the |
40 |
* middle of emitting a message. The field is set only when accessing some |
41 |
* shared resources or when the console device is manipulated. It can be |
42 |
* cleared, for example, after emitting one character when the console |
43 |
* device is in a consistent state. |
44 |
* |
45 |
* - The 'unsafe_takeover' field is set when a hostile takeover took the |
46 |
* console in an unsafe state. The console will stay in the unsafe state |
47 |
* until re-initialized. |
48 |
* |
49 |
* The acquire mechanism uses three approaches: |
50 |
* |
51 |
* 1) Direct acquire when the console is not owned or is owned by a lower |
52 |
* priority context and is in a safe state. |
53 |
* |
54 |
* 2) Friendly handover mechanism uses a request/grant handshake. It is used |
55 |
* when the current owner has lower priority and the console is in an |
56 |
* unsafe state. |
57 |
* |
58 |
* The requesting context: |
59 |
* |
60 |
* a) Sets its priority into the 'req_prio' field. |
61 |
* |
62 |
* b) Waits (with a timeout) for the owning context to unlock the |
63 |
* console. |
64 |
* |
65 |
* c) Takes the lock and clears the 'req_prio' field. |
66 |
* |
67 |
* The owning context: |
68 |
* |
69 |
* a) Observes the 'req_prio' field set on exit from the unsafe |
70 |
* console state. |
71 |
* |
72 |
* b) Gives up console ownership by clearing the 'prio' field. |
73 |
* |
74 |
* 3) Unsafe hostile takeover allows to take over the lock even when the |
75 |
* console is an unsafe state. It is used only in panic() by the final |
76 |
* attempt to flush consoles in a try and hope mode. |
77 |
* |
78 |
* Note that separate record buffers are used in panic(). As a result, |
79 |
* the messages can be read and formatted without any risk even after |
80 |
* using the hostile takeover in unsafe state. |
81 |
* |
82 |
* The release function simply clears the 'prio' field. |
83 |
* |
84 |
* All operations on @console::nbcon_state are atomic cmpxchg based to |
85 |
* handle concurrency. |
86 |
* |
87 |
* The acquire/release functions implement only minimal policies: |
88 |
* |
89 |
* - Preference for higher priority contexts. |
90 |
* - Protection of the panic CPU. |
91 |
* |
92 |
* All other policy decisions must be made at the call sites: |
93 |
* |
94 |
* - What is marked as an unsafe section. |
95 |
* - Whether to spin-wait if there is already an owner and the console is |
96 |
* in an unsafe state. |
97 |
* - Whether to attempt an unsafe hostile takeover. |
98 |
* |
99 |
* The design allows to implement the well known: |
100 |
* |
101 |
* acquire() |
102 |
* output_one_printk_record() |
103 |
* release() |
104 |
* |
105 |
* The output of one printk record might be interrupted with a higher priority |
106 |
* context. The new owner is supposed to reprint the entire interrupted record |
107 |
* from scratch. |
108 |
*/ |
109 |
|
110 |
/** |
111 |
* nbcon_state_set - Helper function to set the console state |
112 |
* @con: Console to update |
113 |
* @new: The new state to write |
114 |
* |
115 |
* Only to be used when the console is not yet or no longer visible in the |
116 |
* system. Otherwise use nbcon_state_try_cmpxchg(). |
117 |
*/ |
118 |
static inline void nbcon_state_set(struct console *con, struct nbcon_state *new) |
119 |
{ |
120 |
atomic_set(&ACCESS_PRIVATE(con, nbcon_state), new->atom); |
121 |
} |
122 |
|
123 |
/** |
124 |
* nbcon_state_read - Helper function to read the console state |
125 |
* @con: Console to read |
126 |
* @state: The state to store the result |
127 |
*/ |
128 |
static inline void nbcon_state_read(struct console *con, struct nbcon_state *state) |
129 |
{ |
130 |
state->atom = atomic_read(&ACCESS_PRIVATE(con, nbcon_state)); |
131 |
} |
132 |
|
133 |
/** |
134 |
* nbcon_state_try_cmpxchg() - Helper function for atomic_try_cmpxchg() on console state |
135 |
* @con: Console to update |
136 |
* @cur: Old/expected state |
137 |
* @new: New state |
138 |
* |
139 |
* Return: True on success. False on fail and @cur is updated. |
140 |
*/ |
141 |
static inline bool nbcon_state_try_cmpxchg(struct console *con, struct nbcon_state *cur, |
142 |
struct nbcon_state *new) |
143 |
{ |
144 |
return atomic_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_state), &cur->atom, new->atom); |
145 |
} |
146 |
|
147 |
#ifdef CONFIG_64BIT |
148 |
|
149 |
#define __seq_to_nbcon_seq(seq) (seq) |
150 |
#define __nbcon_seq_to_seq(seq) (seq) |
151 |
|
152 |
#else /* CONFIG_64BIT */ |
153 |
|
154 |
#define __seq_to_nbcon_seq(seq) ((u32)seq) |
155 |
|
156 |
static inline u64 __nbcon_seq_to_seq(u32 nbcon_seq) |
157 |
{ |
158 |
u64 seq; |
159 |
u64 rb_next_seq; |
160 |
|
161 |
/* |
162 |
* The provided sequence is only the lower 32 bits of the ringbuffer |
163 |
* sequence. It needs to be expanded to 64bit. Get the next sequence |
164 |
* number from the ringbuffer and fold it. |
165 |
* |
166 |
* Having a 32bit representation in the console is sufficient. |
167 |
* If a console ever gets more than 2^31 records behind |
168 |
* the ringbuffer then this is the least of the problems. |
169 |
* |
170 |
* Also the access to the ring buffer is always safe. |
171 |
*/ |
172 |
rb_next_seq = prb_next_seq(prb); |
173 |
seq = rb_next_seq - ((u32)rb_next_seq - nbcon_seq); |
174 |
|
175 |
return seq; |
176 |
} |
177 |
|
178 |
#endif /* CONFIG_64BIT */ |
179 |
|
180 |
/** |
181 |
* nbcon_seq_read - Read the current console sequence |
182 |
* @con: Console to read the sequence of |
183 |
* |
184 |
* Return: Sequence number of the next record to print on @con. |
185 |
*/ |
186 |
u64 nbcon_seq_read(struct console *con) |
187 |
{ |
188 |
unsigned long nbcon_seq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_seq)); |
189 |
|
190 |
return __nbcon_seq_to_seq(nbcon_seq); |
191 |
} |
192 |
|
193 |
/** |
194 |
* nbcon_seq_force - Force console sequence to a specific value |
195 |
* @con: Console to work on |
196 |
* @seq: Sequence number value to set |
197 |
* |
198 |
* Only to be used during init (before registration) or in extreme situations |
199 |
* (such as panic with CONSOLE_REPLAY_ALL). |
200 |
*/ |
201 |
void nbcon_seq_force(struct console *con, u64 seq) |
202 |
{ |
203 |
/* |
204 |
* If the specified record no longer exists, the oldest available record |
205 |
* is chosen. This is especially important on 32bit systems because only |
206 |
* the lower 32 bits of the sequence number are stored. The upper 32 bits |
207 |
* are derived from the sequence numbers available in the ringbuffer. |
208 |
*/ |
209 |
u64 valid_seq = max_t(u64, seq, prb_first_valid_seq(prb)); |
210 |
|
211 |
atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __seq_to_nbcon_seq(valid_seq)); |
212 |
|
213 |
/* Clear con->seq since nbcon consoles use con->nbcon_seq instead. */ |
214 |
con->seq = 0; |
215 |
} |
216 |
|
217 |
static void nbcon_context_seq_set(struct nbcon_context *ctxt) |
218 |
{ |
219 |
ctxt->seq = nbcon_seq_read(ctxt->console); |
220 |
} |
221 |
|
222 |
/** |
223 |
* nbcon_seq_try_update - Try to update the console sequence number |
224 |
* @ctxt: Pointer to an acquire context that contains |
225 |
* all information about the acquire mode |
226 |
* @new_seq: The new sequence number to set |
227 |
* |
228 |
* @ctxt->seq is updated to the new value of @con::nbcon_seq (expanded to |
229 |
* the 64bit value). This could be a different value than @new_seq if |
230 |
* nbcon_seq_force() was used or the current context no longer owns the |
231 |
* console. In the later case, it will stop printing anyway. |
232 |
*/ |
233 |
static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq) |
234 |
{ |
235 |
unsigned long nbcon_seq = __seq_to_nbcon_seq(ctxt->seq); |
236 |
struct console *con = ctxt->console; |
237 |
|
238 |
if (atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_seq), &nbcon_seq, |
239 |
__seq_to_nbcon_seq(new_seq))) { |
240 |
ctxt->seq = new_seq; |
241 |
} else { |
242 |
ctxt->seq = nbcon_seq_read(con); |
243 |
} |
244 |
} |
245 |
|
246 |
bool printk_threads_enabled __ro_after_init; |
247 |
|
248 |
/** |
249 |
* nbcon_context_try_acquire_direct - Try to acquire directly |
250 |
* @ctxt: The context of the caller |
251 |
* @cur: The current console state |
252 |
* |
253 |
* Acquire the console when it is released. Also acquire the console when |
254 |
* the current owner has a lower priority and the console is in a safe state. |
255 |
* |
256 |
* Return: 0 on success. Otherwise, an error code on failure. Also @cur |
257 |
* is updated to the latest state when failed to modify it. |
258 |
* |
259 |
* Errors: |
260 |
* |
261 |
* -EPERM: A panic is in progress and this is not the panic CPU. |
262 |
* Or the current owner or waiter has the same or higher |
263 |
* priority. No acquire method can be successful in |
264 |
* this case. |
265 |
* |
266 |
* -EBUSY: The current owner has a lower priority but the console |
267 |
* in an unsafe state. The caller should try using |
268 |
* the handover acquire method. |
269 |
*/ |
270 |
static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt, |
271 |
struct nbcon_state *cur) |
272 |
{ |
273 |
unsigned int cpu = smp_processor_id(); |
274 |
struct console *con = ctxt->console; |
275 |
struct nbcon_state new; |
276 |
|
277 |
do { |
278 |
if (other_cpu_in_panic()) |
279 |
return -EPERM; |
280 |
|
281 |
if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio) |
282 |
return -EPERM; |
283 |
|
284 |
if (cur->unsafe) |
285 |
return -EBUSY; |
286 |
|
287 |
/* |
288 |
* The console should never be safe for a direct acquire |
289 |
* if an unsafe hostile takeover has ever happened. |
290 |
*/ |
291 |
WARN_ON_ONCE(cur->unsafe_takeover); |
292 |
|
293 |
new.atom = cur->atom; |
294 |
new.prio = ctxt->prio; |
295 |
new.req_prio = NBCON_PRIO_NONE; |
296 |
new.unsafe = cur->unsafe_takeover; |
297 |
new.cpu = cpu; |
298 |
|
299 |
} while (!nbcon_state_try_cmpxchg(con, cur, &new)); |
300 |
|
301 |
return 0; |
302 |
} |
303 |
|
304 |
static bool nbcon_waiter_matches(struct nbcon_state *cur, int expected_prio) |
305 |
{ |
306 |
/* |
307 |
* The request context is well defined by the @req_prio because: |
308 |
* |
309 |
* - Only a context with a higher priority can take over the request. |
310 |
* - There are only three priorities. |
311 |
* - Only one CPU is allowed to request PANIC priority. |
312 |
* - Lower priorities are ignored during panic() until reboot. |
313 |
* |
314 |
* As a result, the following scenario is *not* possible: |
315 |
* |
316 |
* 1. Another context with a higher priority directly takes ownership. |
317 |
* 2. The higher priority context releases the ownership. |
318 |
* 3. A lower priority context takes the ownership. |
319 |
* 4. Another context with the same priority as this context |
320 |
* creates a request and starts waiting. |
321 |
*/ |
322 |
|
323 |
return (cur->req_prio == expected_prio); |
324 |
} |
325 |
|
326 |
/** |
327 |
* nbcon_context_try_acquire_requested - Try to acquire after having |
328 |
* requested a handover |
329 |
* @ctxt: The context of the caller |
330 |
* @cur: The current console state |
331 |
* |
332 |
* This is a helper function for nbcon_context_try_acquire_handover(). |
333 |
* It is called when the console is in an unsafe state. The current |
334 |
* owner will release the console on exit from the unsafe region. |
335 |
* |
336 |
* Return: 0 on success and @cur is updated to the new console state. |
337 |
* Otherwise an error code on failure. |
338 |
* |
339 |
* Errors: |
340 |
* |
341 |
* -EPERM: A panic is in progress and this is not the panic CPU |
342 |
* or this context is no longer the waiter. |
343 |
* |
344 |
* -EBUSY: The console is still locked. The caller should |
345 |
* continue waiting. |
346 |
* |
347 |
* Note: The caller must still remove the request when an error has occurred |
348 |
* except when this context is no longer the waiter. |
349 |
*/ |
350 |
static int nbcon_context_try_acquire_requested(struct nbcon_context *ctxt, |
351 |
struct nbcon_state *cur) |
352 |
{ |
353 |
unsigned int cpu = smp_processor_id(); |
354 |
struct console *con = ctxt->console; |
355 |
struct nbcon_state new; |
356 |
|
357 |
/* Note that the caller must still remove the request! */ |
358 |
if (other_cpu_in_panic()) |
359 |
return -EPERM; |
360 |
|
361 |
/* |
362 |
* Note that the waiter will also change if there was an unsafe |
363 |
* hostile takeover. |
364 |
*/ |
365 |
if (!nbcon_waiter_matches(cur, ctxt->prio)) |
366 |
return -EPERM; |
367 |
|
368 |
/* If still locked, caller should continue waiting. */ |
369 |
if (cur->prio != NBCON_PRIO_NONE) |
370 |
return -EBUSY; |
371 |
|
372 |
/* |
373 |
* The previous owner should have never released ownership |
374 |
* in an unsafe region. |
375 |
*/ |
376 |
WARN_ON_ONCE(cur->unsafe); |
377 |
|
378 |
new.atom = cur->atom; |
379 |
new.prio = ctxt->prio; |
380 |
new.req_prio = NBCON_PRIO_NONE; |
381 |
new.unsafe = cur->unsafe_takeover; |
382 |
new.cpu = cpu; |
383 |
|
384 |
if (!nbcon_state_try_cmpxchg(con, cur, &new)) { |
385 |
/* |
386 |
* The acquire could fail only when it has been taken |
387 |
* over by a higher priority context. |
388 |
*/ |
389 |
WARN_ON_ONCE(nbcon_waiter_matches(cur, ctxt->prio)); |
390 |
return -EPERM; |
391 |
} |
392 |
|
393 |
/* Handover success. This context now owns the console. */ |
394 |
return 0; |
395 |
} |
396 |
|
397 |
/** |
398 |
* nbcon_context_try_acquire_handover - Try to acquire via handover |
399 |
* @ctxt: The context of the caller |
400 |
* @cur: The current console state |
401 |
* |
402 |
* The function must be called only when the context has higher priority |
403 |
* than the current owner and the console is in an unsafe state. |
404 |
* It is the case when nbcon_context_try_acquire_direct() returns -EBUSY. |
405 |
* |
406 |
* The function sets "req_prio" field to make the current owner aware of |
407 |
* the request. Then it waits until the current owner releases the console, |
408 |
* or an even higher context takes over the request, or timeout expires. |
409 |
* |
410 |
* The current owner checks the "req_prio" field on exit from the unsafe |
411 |
* region and releases the console. It does not touch the "req_prio" field |
412 |
* so that the console stays reserved for the waiter. |
413 |
* |
414 |
* Return: 0 on success. Otherwise, an error code on failure. Also @cur |
415 |
* is updated to the latest state when failed to modify it. |
416 |
* |
417 |
* Errors: |
418 |
* |
419 |
* -EPERM: A panic is in progress and this is not the panic CPU. |
420 |
* Or a higher priority context has taken over the |
421 |
* console or the handover request. |
422 |
* |
423 |
* -EBUSY: The current owner is on the same CPU so that the hand |
424 |
* shake could not work. Or the current owner is not |
425 |
* willing to wait (zero timeout). Or the console does |
426 |
* not enter the safe state before timeout passed. The |
427 |
* caller might still use the unsafe hostile takeover |
428 |
* when allowed. |
429 |
* |
430 |
* -EAGAIN: @cur has changed when creating the handover request. |
431 |
* The caller should retry with direct acquire. |
432 |
*/ |
433 |
static int nbcon_context_try_acquire_handover(struct nbcon_context *ctxt, |
434 |
struct nbcon_state *cur) |
435 |
{ |
436 |
unsigned int cpu = smp_processor_id(); |
437 |
struct console *con = ctxt->console; |
438 |
struct nbcon_state new; |
439 |
int timeout; |
440 |
int request_err = -EBUSY; |
441 |
|
442 |
/* |
443 |
* Check that the handover is called when the direct acquire failed |
444 |
* with -EBUSY. |
445 |
*/ |
446 |
WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio); |
447 |
WARN_ON_ONCE(!cur->unsafe); |
448 |
|
449 |
/* Handover is not possible on the same CPU. */ |
450 |
if (cur->cpu == cpu) |
451 |
return -EBUSY; |
452 |
|
453 |
/* |
454 |
* Console stays unsafe after an unsafe takeover until re-initialized. |
455 |
* Waiting is not going to help in this case. |
456 |
*/ |
457 |
if (cur->unsafe_takeover) |
458 |
return -EBUSY; |
459 |
|
460 |
/* Is the caller willing to wait? */ |
461 |
if (ctxt->spinwait_max_us == 0) |
462 |
return -EBUSY; |
463 |
|
464 |
/* |
465 |
* Setup a request for the handover. The caller should try to acquire |
466 |
* the console directly when the current state has been modified. |
467 |
*/ |
468 |
new.atom = cur->atom; |
469 |
new.req_prio = ctxt->prio; |
470 |
if (!nbcon_state_try_cmpxchg(con, cur, &new)) |
471 |
return -EAGAIN; |
472 |
|
473 |
cur->atom = new.atom; |
474 |
|
475 |
/* Wait until there is no owner and then acquire the console. */ |
476 |
for (timeout = ctxt->spinwait_max_us; timeout >= 0; timeout--) { |
477 |
/* On successful acquire, this request is cleared. */ |
478 |
request_err = nbcon_context_try_acquire_requested(ctxt, cur); |
479 |
if (!request_err) |
480 |
return 0; |
481 |
|
482 |
/* |
483 |
* If the acquire should be aborted, it must be ensured |
484 |
* that the request is removed before returning to caller. |
485 |
*/ |
486 |
if (request_err == -EPERM) |
487 |
break; |
488 |
|
489 |
udelay(1); |
490 |
|
491 |
/* Re-read the state because some time has passed. */ |
492 |
nbcon_state_read(con, cur); |
493 |
} |
494 |
|
495 |
/* Timed out or aborted. Carefully remove handover request. */ |
496 |
do { |
497 |
/* |
498 |
* No need to remove request if there is a new waiter. This |
499 |
* can only happen if a higher priority context has taken over |
500 |
* the console or the handover request. |
501 |
*/ |
502 |
if (!nbcon_waiter_matches(cur, ctxt->prio)) |
503 |
return -EPERM; |
504 |
|
505 |
/* Unset request for handover. */ |
506 |
new.atom = cur->atom; |
507 |
new.req_prio = NBCON_PRIO_NONE; |
508 |
if (nbcon_state_try_cmpxchg(con, cur, &new)) { |
509 |
/* |
510 |
* Request successfully unset. Report failure of |
511 |
* acquiring via handover. |
512 |
*/ |
513 |
cur->atom = new.atom; |
514 |
return request_err; |
515 |
} |
516 |
|
517 |
/* |
518 |
* Unable to remove request. Try to acquire in case |
519 |
* the owner has released the lock. |
520 |
*/ |
521 |
} while (nbcon_context_try_acquire_requested(ctxt, cur)); |
522 |
|
523 |
/* Lucky timing. The acquire succeeded while removing the request. */ |
524 |
return 0; |
525 |
} |
526 |
|
527 |
/** |
528 |
* nbcon_context_try_acquire_hostile - Acquire via unsafe hostile takeover |
529 |
* @ctxt: The context of the caller |
530 |
* @cur: The current console state |
531 |
* |
532 |
* Acquire the console even in the unsafe state. |
533 |
* |
534 |
* It can be permitted by setting the 'allow_unsafe_takeover' field only |
535 |
* by the final attempt to flush messages in panic(). |
536 |
* |
537 |
* Return: 0 on success. -EPERM when not allowed by the context. |
538 |
*/ |
539 |
static int nbcon_context_try_acquire_hostile(struct nbcon_context *ctxt, |
540 |
struct nbcon_state *cur) |
541 |
{ |
542 |
unsigned int cpu = smp_processor_id(); |
543 |
struct console *con = ctxt->console; |
544 |
struct nbcon_state new; |
545 |
|
546 |
if (!ctxt->allow_unsafe_takeover) |
547 |
return -EPERM; |
548 |
|
549 |
/* Ensure caller is allowed to perform unsafe hostile takeovers. */ |
550 |
if (WARN_ON_ONCE(ctxt->prio != NBCON_PRIO_PANIC)) |
551 |
return -EPERM; |
552 |
|
553 |
/* |
554 |
* Check that try_acquire_direct() and try_acquire_handover() returned |
555 |
* -EBUSY in the right situation. |
556 |
*/ |
557 |
WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio); |
558 |
WARN_ON_ONCE(cur->unsafe != true); |
559 |
|
560 |
do { |
561 |
new.atom = cur->atom; |
562 |
new.cpu = cpu; |
563 |
new.prio = ctxt->prio; |
564 |
new.unsafe |= cur->unsafe_takeover; |
565 |
new.unsafe_takeover |= cur->unsafe; |
566 |
|
567 |
} while (!nbcon_state_try_cmpxchg(con, cur, &new)); |
568 |
|
569 |
return 0; |
570 |
} |
571 |
|
572 |
static struct printk_buffers panic_nbcon_pbufs; |
573 |
|
574 |
/** |
575 |
* nbcon_context_try_acquire - Try to acquire nbcon console |
576 |
* @ctxt: The context of the caller |
577 |
* |
578 |
* Return: True if the console was acquired. False otherwise. |
579 |
* |
580 |
* If the caller allowed an unsafe hostile takeover, on success the |
581 |
* caller should check the current console state to see if it is |
582 |
* in an unsafe state. Otherwise, on success the caller may assume |
583 |
* the console is not in an unsafe state. |
584 |
*/ |
585 |
static bool nbcon_context_try_acquire(struct nbcon_context *ctxt) |
586 |
{ |
587 |
unsigned int cpu = smp_processor_id(); |
588 |
struct console *con = ctxt->console; |
589 |
struct nbcon_state cur; |
590 |
int err; |
591 |
|
592 |
nbcon_state_read(con, &cur); |
593 |
try_again: |
594 |
err = nbcon_context_try_acquire_direct(ctxt, &cur); |
595 |
if (err != -EBUSY) |
596 |
goto out; |
597 |
|
598 |
err = nbcon_context_try_acquire_handover(ctxt, &cur); |
599 |
if (err == -EAGAIN) |
600 |
goto try_again; |
601 |
if (err != -EBUSY) |
602 |
goto out; |
603 |
|
604 |
err = nbcon_context_try_acquire_hostile(ctxt, &cur); |
605 |
out: |
606 |
if (err) |
607 |
return false; |
608 |
|
609 |
/* Acquire succeeded. */ |
610 |
|
611 |
/* Assign the appropriate buffer for this context. */ |
612 |
if (atomic_read(&panic_cpu) == cpu) |
613 |
ctxt->pbufs = &panic_nbcon_pbufs; |
614 |
else |
615 |
ctxt->pbufs = con->pbufs; |
616 |
|
617 |
/* Set the record sequence for this context to print. */ |
618 |
ctxt->seq = nbcon_seq_read(ctxt->console); |
619 |
|
620 |
return true; |
621 |
} |
622 |
|
623 |
static bool nbcon_owner_matches(struct nbcon_state *cur, int expected_cpu, |
624 |
int expected_prio) |
625 |
{ |
626 |
/* |
627 |
* Since consoles can only be acquired by higher priorities, |
628 |
* owning contexts are uniquely identified by @prio. However, |
629 |
* since contexts can unexpectedly lose ownership, it is |
630 |
* possible that later another owner appears with the same |
631 |
* priority. For this reason @cpu is also needed. |
632 |
*/ |
633 |
|
634 |
if (cur->prio != expected_prio) |
635 |
return false; |
636 |
|
637 |
if (cur->cpu != expected_cpu) |
638 |
return false; |
639 |
|
640 |
return true; |
641 |
} |
642 |
|
643 |
/** |
644 |
* nbcon_context_release - Release the console |
645 |
* @ctxt: The nbcon context from nbcon_context_try_acquire() |
646 |
*/ |
647 |
static void nbcon_context_release(struct nbcon_context *ctxt) |
648 |
{ |
649 |
unsigned int cpu = smp_processor_id(); |
650 |
struct console *con = ctxt->console; |
651 |
struct nbcon_state cur; |
652 |
struct nbcon_state new; |
653 |
|
654 |
nbcon_state_read(con, &cur); |
655 |
|
656 |
do { |
657 |
if (!nbcon_owner_matches(&cur, cpu, ctxt->prio)) |
658 |
break; |
659 |
|
660 |
new.atom = cur.atom; |
661 |
new.prio = NBCON_PRIO_NONE; |
662 |
|
663 |
/* |
664 |
* If @unsafe_takeover is set, it is kept set so that |
665 |
* the state remains permanently unsafe. |
666 |
*/ |
667 |
new.unsafe |= cur.unsafe_takeover; |
668 |
|
669 |
} while (!nbcon_state_try_cmpxchg(con, &cur, &new)); |
670 |
|
671 |
ctxt->pbufs = NULL; |
672 |
} |
673 |
|
674 |
/** |
675 |
* nbcon_context_can_proceed - Check whether ownership can proceed |
676 |
* @ctxt: The nbcon context from nbcon_context_try_acquire() |
677 |
* @cur: The current console state |
678 |
* |
679 |
* Return: True if this context still owns the console. False if |
680 |
* ownership was handed over or taken. |
681 |
* |
682 |
* Must be invoked when entering the unsafe state to make sure that it still |
683 |
* owns the lock. Also must be invoked when exiting the unsafe context |
684 |
* to eventually free the lock for a higher priority context which asked |
685 |
* for the friendly handover. |
686 |
* |
687 |
* It can be called inside an unsafe section when the console is just |
688 |
* temporary in safe state instead of exiting and entering the unsafe |
689 |
* state. |
690 |
* |
691 |
* Also it can be called in the safe context before doing an expensive |
692 |
* safe operation. It does not make sense to do the operation when |
693 |
* a higher priority context took the lock. |
694 |
* |
695 |
* When this function returns false then the calling context no longer owns |
696 |
* the console and is no longer allowed to go forward. In this case it must |
697 |
* back out immediately and carefully. The buffer content is also no longer |
698 |
* trusted since it no longer belongs to the calling context. |
699 |
*/ |
700 |
static bool nbcon_context_can_proceed(struct nbcon_context *ctxt, struct nbcon_state *cur) |
701 |
{ |
702 |
unsigned int cpu = smp_processor_id(); |
703 |
|
704 |
/* Make sure this context still owns the console. */ |
705 |
if (!nbcon_owner_matches(cur, cpu, ctxt->prio)) |
706 |
return false; |
707 |
|
708 |
/* The console owner can proceed if there is no waiter. */ |
709 |
if (cur->req_prio == NBCON_PRIO_NONE) |
710 |
return true; |
711 |
|
712 |
/* |
713 |
* A console owner within an unsafe region is always allowed to |
714 |
* proceed, even if there are waiters. It can perform a handover |
715 |
* when exiting the unsafe region. Otherwise the waiter will |
716 |
* need to perform an unsafe hostile takeover. |
717 |
*/ |
718 |
if (cur->unsafe) |
719 |
return true; |
720 |
|
721 |
/* Waiters always have higher priorities than owners. */ |
722 |
WARN_ON_ONCE(cur->req_prio <= cur->prio); |
723 |
|
724 |
/* |
725 |
* Having a safe point for take over and eventually a few |
726 |
* duplicated characters or a full line is way better than a |
727 |
* hostile takeover. Post processing can take care of the garbage. |
728 |
* Release and hand over. |
729 |
*/ |
730 |
nbcon_context_release(ctxt); |
731 |
|
732 |
/* |
733 |
* It is not clear whether the waiter really took over ownership. The |
734 |
* outermost callsite must make the final decision whether console |
735 |
* ownership is needed for it to proceed. If yes, it must reacquire |
736 |
* ownership (possibly hostile) before carefully proceeding. |
737 |
* |
738 |
* The calling context no longer owns the console so go back all the |
739 |
* way instead of trying to implement reacquire heuristics in tons of |
740 |
* places. |
741 |
*/ |
742 |
return false; |
743 |
} |
744 |
|
745 |
/** |
746 |
* nbcon_can_proceed - Check whether ownership can proceed |
747 |
* @wctxt: The write context that was handed to the write function |
748 |
* |
749 |
* Return: True if this context still owns the console. False if |
750 |
* ownership was handed over or taken. |
751 |
* |
752 |
* It is used in nbcon_enter_unsafe() to make sure that it still owns the |
753 |
* lock. Also it is used in nbcon_exit_unsafe() to eventually free the lock |
754 |
* for a higher priority context which asked for the friendly handover. |
755 |
* |
756 |
* It can be called inside an unsafe section when the console is just |
757 |
* temporary in safe state instead of exiting and entering the unsafe state. |
758 |
* |
759 |
* Also it can be called in the safe context before doing an expensive safe |
760 |
* operation. It does not make sense to do the operation when a higher |
761 |
* priority context took the lock. |
762 |
* |
763 |
* When this function returns false then the calling context no longer owns |
764 |
* the console and is no longer allowed to go forward. In this case it must |
765 |
* back out immediately and carefully. The buffer content is also no longer |
766 |
* trusted since it no longer belongs to the calling context. |
767 |
*/ |
768 |
bool nbcon_can_proceed(struct nbcon_write_context *wctxt) |
769 |
{ |
770 |
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); |
771 |
struct console *con = ctxt->console; |
772 |
struct nbcon_state cur; |
773 |
|
774 |
nbcon_state_read(con, &cur); |
775 |
|
776 |
return nbcon_context_can_proceed(ctxt, &cur); |
777 |
} |
778 |
EXPORT_SYMBOL_GPL(nbcon_can_proceed); |
779 |
|
780 |
#define nbcon_context_enter_unsafe(c) __nbcon_context_update_unsafe(c, true) |
781 |
#define nbcon_context_exit_unsafe(c) __nbcon_context_update_unsafe(c, false) |
782 |
|
783 |
/** |
784 |
* __nbcon_context_update_unsafe - Update the unsafe bit in @con->nbcon_state |
785 |
* @ctxt: The nbcon context from nbcon_context_try_acquire() |
786 |
* @unsafe: The new value for the unsafe bit |
787 |
* |
788 |
* Return: True if the unsafe state was updated and this context still |
789 |
* owns the console. Otherwise false if ownership was handed |
790 |
* over or taken. |
791 |
* |
792 |
* This function allows console owners to modify the unsafe status of the |
793 |
* console. |
794 |
* |
795 |
* When this function returns false then the calling context no longer owns |
796 |
* the console and is no longer allowed to go forward. In this case it must |
797 |
* back out immediately and carefully. The buffer content is also no longer |
798 |
* trusted since it no longer belongs to the calling context. |
799 |
* |
800 |
* Internal helper to avoid duplicated code. |
801 |
*/ |
802 |
static bool __nbcon_context_update_unsafe(struct nbcon_context *ctxt, bool unsafe) |
803 |
{ |
804 |
struct console *con = ctxt->console; |
805 |
struct nbcon_state cur; |
806 |
struct nbcon_state new; |
807 |
|
808 |
nbcon_state_read(con, &cur); |
809 |
|
810 |
do { |
811 |
/* |
812 |
* The unsafe bit must not be cleared if an |
813 |
* unsafe hostile takeover has occurred. |
814 |
*/ |
815 |
if (!unsafe && cur.unsafe_takeover) |
816 |
goto out; |
817 |
|
818 |
if (!nbcon_context_can_proceed(ctxt, &cur)) |
819 |
return false; |
820 |
|
821 |
new.atom = cur.atom; |
822 |
new.unsafe = unsafe; |
823 |
} while (!nbcon_state_try_cmpxchg(con, &cur, &new)); |
824 |
|
825 |
cur.atom = new.atom; |
826 |
out: |
827 |
return nbcon_context_can_proceed(ctxt, &cur); |
828 |
} |
829 |
|
830 |
/** |
831 |
* nbcon_enter_unsafe - Enter an unsafe region in the driver |
832 |
* @wctxt: The write context that was handed to the write function |
833 |
* |
834 |
* Return: True if this context still owns the console. False if |
835 |
* ownership was handed over or taken. |
836 |
* |
837 |
* When this function returns false then the calling context no longer owns |
838 |
* the console and is no longer allowed to go forward. In this case it must |
839 |
* back out immediately and carefully. The buffer content is also no longer |
840 |
* trusted since it no longer belongs to the calling context. |
841 |
*/ |
842 |
bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) |
843 |
{ |
844 |
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); |
845 |
|
846 |
return nbcon_context_enter_unsafe(ctxt); |
847 |
} |
848 |
EXPORT_SYMBOL_GPL(nbcon_enter_unsafe); |
849 |
|
850 |
/** |
851 |
* nbcon_exit_unsafe - Exit an unsafe region in the driver |
852 |
* @wctxt: The write context that was handed to the write function |
853 |
* |
854 |
* Return: True if this context still owns the console. False if |
855 |
* ownership was handed over or taken. |
856 |
* |
857 |
* When this function returns false then the calling context no longer owns |
858 |
* the console and is no longer allowed to go forward. In this case it must |
859 |
* back out immediately and carefully. The buffer content is also no longer |
860 |
* trusted since it no longer belongs to the calling context. |
861 |
*/ |
862 |
bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) |
863 |
{ |
864 |
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); |
865 |
|
866 |
return nbcon_context_exit_unsafe(ctxt); |
867 |
} |
868 |
EXPORT_SYMBOL_GPL(nbcon_exit_unsafe); |
869 |
|
870 |
void nbcon_reacquire(struct nbcon_write_context *wctxt) |
871 |
{ |
872 |
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); |
873 |
struct console *con = ctxt->console; |
874 |
struct nbcon_state cur; |
875 |
|
876 |
while (!nbcon_context_try_acquire(ctxt)) |
877 |
cpu_relax(); |
878 |
|
879 |
wctxt->outbuf = NULL; |
880 |
wctxt->len = 0; |
881 |
nbcon_state_read(con, &cur); |
882 |
wctxt->unsafe_takeover = cur.unsafe_takeover; |
883 |
} |
884 |
EXPORT_SYMBOL_GPL(nbcon_reacquire); |
885 |
|
886 |
/** |
887 |
* nbcon_emit_next_record - Emit a record in the acquired context |
888 |
* @wctxt: The write context that will be handed to the write function |
889 |
* @use_atomic: True if the write_atomic callback is to be used |
890 |
* |
891 |
* Return: True if this context still owns the console. False if |
892 |
* ownership was handed over or taken. |
893 |
* |
894 |
* When this function returns false then the calling context no longer owns |
895 |
* the console and is no longer allowed to go forward. In this case it must |
896 |
* back out immediately and carefully. The buffer content is also no longer |
897 |
* trusted since it no longer belongs to the calling context. If the caller |
898 |
* wants to do more it must reacquire the console first. |
899 |
* |
900 |
* When true is returned, @wctxt->ctxt.backlog indicates whether there are |
901 |
* still records pending in the ringbuffer, |
902 |
*/ |
903 |
static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt, bool use_atomic) |
904 |
{ |
905 |
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); |
906 |
struct console *con = ctxt->console; |
907 |
bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED; |
908 |
struct printk_message pmsg = { |
909 |
.pbufs = ctxt->pbufs, |
910 |
}; |
911 |
unsigned long con_dropped; |
912 |
struct nbcon_state cur; |
913 |
unsigned long dropped; |
914 |
bool done; |
915 |
|
916 |
/* |
917 |
* The printk buffers are filled within an unsafe section. This |
918 |
* prevents NBCON_PRIO_NORMAL and NBCON_PRIO_EMERGENCY from |
919 |
* clobbering each other. |
920 |
*/ |
921 |
|
922 |
if (!nbcon_context_enter_unsafe(ctxt)) |
923 |
return false; |
924 |
|
925 |
ctxt->backlog = printk_get_next_message(&pmsg, ctxt->seq, is_extended, true); |
926 |
if (!ctxt->backlog) |
927 |
return nbcon_context_exit_unsafe(ctxt); |
928 |
|
929 |
/* |
930 |
* @con->dropped is not protected in case of an unsafe hostile |
931 |
* takeover. In that situation the update can be racy so |
932 |
* annotate it accordingly. |
933 |
*/ |
934 |
con_dropped = data_race(READ_ONCE(con->dropped)); |
935 |
|
936 |
dropped = con_dropped + pmsg.dropped; |
937 |
if (dropped && !is_extended) |
938 |
console_prepend_dropped(&pmsg, dropped); |
939 |
|
940 |
if (!nbcon_context_exit_unsafe(ctxt)) |
941 |
return false; |
942 |
|
943 |
/* For skipped records just update seq/dropped in @con. */ |
944 |
if (pmsg.outbuf_len == 0) |
945 |
goto update_con; |
946 |
|
947 |
/* Initialize the write context for driver callbacks. */ |
948 |
wctxt->outbuf = &pmsg.pbufs->outbuf[0]; |
949 |
wctxt->len = pmsg.outbuf_len; |
950 |
nbcon_state_read(con, &cur); |
951 |
wctxt->unsafe_takeover = cur.unsafe_takeover; |
952 |
|
953 |
if (use_atomic && |
954 |
con->write_atomic) { |
955 |
done = con->write_atomic(con, wctxt); |
956 |
|
957 |
} else if (!use_atomic && |
958 |
con->write_thread && |
959 |
con->kthread) { |
960 |
WARN_ON_ONCE(con->kthread != current); |
961 |
done = con->write_thread(con, wctxt); |
962 |
|
963 |
} else { |
964 |
WARN_ON_ONCE(1); |
965 |
done = false; |
966 |
} |
967 |
|
968 |
if (!done) { |
969 |
/* |
970 |
* The emit was aborted. This may have been due to a loss |
971 |
* of ownership. Explicitly release ownership to be sure. |
972 |
*/ |
973 |
nbcon_context_release(ctxt); |
974 |
return false; |
975 |
} |
976 |
|
977 |
/* |
978 |
* Since any dropped message was successfully output, reset the |
979 |
* dropped count for the console. |
980 |
*/ |
981 |
dropped = 0; |
982 |
update_con: |
983 |
/* |
984 |
* The dropped count and the sequence number are updated within an |
985 |
* unsafe section. This limits update races to the panic context and |
986 |
* allows the panic context to win. |
987 |
*/ |
988 |
|
989 |
if (!nbcon_context_enter_unsafe(ctxt)) |
990 |
return false; |
991 |
|
992 |
if (dropped != con_dropped) { |
993 |
/* Counterpart to the READ_ONCE() above. */ |
994 |
WRITE_ONCE(con->dropped, dropped); |
995 |
} |
996 |
|
997 |
nbcon_seq_try_update(ctxt, pmsg.seq + 1); |
998 |
|
999 |
return nbcon_context_exit_unsafe(ctxt); |
1000 |
} |
1001 |
|
1002 |
/** |
1003 |
* nbcon_kthread_should_wakeup - Check whether a printer thread should wakeup |
1004 |
* @con: Console to operate on |
1005 |
* @ctxt: The acquire context that contains the state |
1006 |
* at console_acquire() |
1007 |
* |
1008 |
* Return: True if the thread should shutdown or if the console is |
1009 |
* allowed to print and a record is available. False otherwise. |
1010 |
* |
1011 |
* After the thread wakes up, it must first check if it should shutdown before |
1012 |
* attempting any printing. |
1013 |
*/ |
1014 |
static bool nbcon_kthread_should_wakeup(struct console *con, struct nbcon_context *ctxt) |
1015 |
{ |
1016 |
struct nbcon_state cur; |
1017 |
bool is_usable; |
1018 |
short flags; |
1019 |
int cookie; |
1020 |
|
1021 |
do { |
1022 |
if (kthread_should_stop()) |
1023 |
return true; |
1024 |
|
1025 |
cookie = console_srcu_read_lock(); |
1026 |
flags = console_srcu_read_flags(con); |
1027 |
is_usable = console_is_usable(con, flags, false); |
1028 |
console_srcu_read_unlock(cookie); |
1029 |
|
1030 |
if (!is_usable) |
1031 |
return false; |
1032 |
|
1033 |
nbcon_state_read(con, &cur); |
1034 |
|
1035 |
/* |
1036 |
* Some other CPU is using the console. Patiently poll |
1037 |
* to see if it becomes available. This is more efficient |
1038 |
* than having every release trigger an irq_work to wake |
1039 |
* the kthread. |
1040 |
*/ |
1041 |
msleep(1); |
1042 |
} while (cur.prio != NBCON_PRIO_NONE); |
1043 |
|
1044 |
/* Bring the sequence in @ctxt up to date */ |
1045 |
nbcon_context_seq_set(ctxt); |
1046 |
|
1047 |
return prb_read_valid(prb, ctxt->seq, NULL); |
1048 |
} |
1049 |
|
1050 |
/** |
1051 |
* nbcon_kthread_func - The printer thread function |
1052 |
* @__console: Console to operate on |
1053 |
*/ |
1054 |
static int nbcon_kthread_func(void *__console) |
1055 |
{ |
1056 |
struct console *con = __console; |
1057 |
struct nbcon_write_context wctxt = { |
1058 |
.ctxt.console = con, |
1059 |
.ctxt.prio = NBCON_PRIO_NORMAL, |
1060 |
}; |
1061 |
struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt); |
1062 |
struct uart_port *port = NULL; |
1063 |
unsigned long flags; |
1064 |
short con_flags; |
1065 |
bool backlog; |
1066 |
int cookie; |
1067 |
int ret; |
1068 |
|
1069 |
if (con->uart_port) |
1070 |
port = con->uart_port(con); |
1071 |
|
1072 |
wait_for_event: |
1073 |
/* |
1074 |
* Guarantee this task is visible on the rcuwait before |
1075 |
* checking the wake condition. |
1076 |
* |
1077 |
* The full memory barrier within set_current_state() of |
1078 |
* ___rcuwait_wait_event() pairs with the full memory |
1079 |
* barrier within rcuwait_has_sleeper(). |
1080 |
* |
1081 |
* This pairs with rcuwait_has_sleeper:A and nbcon_kthread_wake:A. |
1082 |
*/ |
1083 |
ret = rcuwait_wait_event(&con->rcuwait, |
1084 |
nbcon_kthread_should_wakeup(con, ctxt), |
1085 |
TASK_INTERRUPTIBLE); /* LMM(nbcon_kthread_func:A) */ |
1086 |
|
1087 |
if (kthread_should_stop()) |
1088 |
return 0; |
1089 |
|
1090 |
/* Wait was interrupted by a spurious signal, go back to sleep. */ |
1091 |
if (ret) |
1092 |
goto wait_for_event; |
1093 |
|
1094 |
do { |
1095 |
backlog = false; |
1096 |
|
1097 |
cookie = console_srcu_read_lock(); |
1098 |
|
1099 |
con_flags = console_srcu_read_flags(con); |
1100 |
|
1101 |
if (console_is_usable(con, con_flags, false)) { |
1102 |
/* |
1103 |
* Ensure this stays on the CPU to make handover and |
1104 |
* takeover possible. |
1105 |
*/ |
1106 |
if (port) |
1107 |
spin_lock_irqsave(&port->lock, flags); |
1108 |
else |
1109 |
migrate_disable(); |
1110 |
|
1111 |
if (nbcon_context_try_acquire(ctxt)) { |
1112 |
/* |
1113 |
* If the emit fails, this context is no |
1114 |
* longer the owner. |
1115 |
*/ |
1116 |
if (nbcon_emit_next_record(&wctxt, false)) { |
1117 |
nbcon_context_release(ctxt); |
1118 |
backlog = ctxt->backlog; |
1119 |
} |
1120 |
} |
1121 |
|
1122 |
if (port) |
1123 |
spin_unlock_irqrestore(&port->lock, flags); |
1124 |
else |
1125 |
migrate_enable(); |
1126 |
} |
1127 |
|
1128 |
console_srcu_read_unlock(cookie); |
1129 |
|
1130 |
cond_resched(); |
1131 |
|
1132 |
} while (backlog); |
1133 |
|
1134 |
goto wait_for_event; |
1135 |
} |
1136 |
|
1137 |
/** |
1138 |
* nbcon_irq_work - irq work to wake printk thread |
1139 |
* @irq_work: The irq work to operate on |
1140 |
*/ |
1141 |
static void nbcon_irq_work(struct irq_work *irq_work) |
1142 |
{ |
1143 |
struct console *con = container_of(irq_work, struct console, irq_work); |
1144 |
|
1145 |
nbcon_kthread_wake(con); |
1146 |
} |
1147 |
|
1148 |
static inline bool rcuwait_has_sleeper(struct rcuwait *w) |
1149 |
{ |
1150 |
bool has_sleeper; |
1151 |
|
1152 |
rcu_read_lock(); |
1153 |
/* |
1154 |
* Guarantee any new records can be seen by tasks preparing to wait |
1155 |
* before this context checks if the rcuwait is empty. |
1156 |
* |
1157 |
* This full memory barrier pairs with the full memory barrier within |
1158 |
* set_current_state() of ___rcuwait_wait_event(), which is called |
1159 |
* after prepare_to_rcuwait() adds the waiter but before it has |
1160 |
* checked the wait condition. |
1161 |
* |
1162 |
* This pairs with nbcon_kthread_func:A. |
1163 |
*/ |
1164 |
smp_mb(); /* LMM(rcuwait_has_sleeper:A) */ |
1165 |
has_sleeper = !!rcu_dereference(w->task); |
1166 |
rcu_read_unlock(); |
1167 |
|
1168 |
return has_sleeper; |
1169 |
} |
1170 |
|
1171 |
/** |
1172 |
* nbcon_wake_threads - Wake up printing threads using irq_work |
1173 |
*/ |
1174 |
void nbcon_wake_threads(void) |
1175 |
{ |
1176 |
struct console *con; |
1177 |
int cookie; |
1178 |
|
1179 |
cookie = console_srcu_read_lock(); |
1180 |
for_each_console_srcu(con) { |
1181 |
/* |
1182 |
* Only schedule irq_work if the printing thread is |
1183 |
* actively waiting. If not waiting, the thread will |
1184 |
* notice by itself that it has work to do. |
1185 |
*/ |
1186 |
if (con->kthread && rcuwait_has_sleeper(&con->rcuwait)) |
1187 |
irq_work_queue(&con->irq_work); |
1188 |
} |
1189 |
console_srcu_read_unlock(cookie); |
1190 |
} |
1191 |
|
1192 |
/* Track the nbcon emergency nesting per CPU. */ |
1193 |
static DEFINE_PER_CPU(unsigned int, nbcon_pcpu_emergency_nesting); |
1194 |
static unsigned int early_nbcon_pcpu_emergency_nesting __initdata; |
1195 |
|
1196 |
/** |
1197 |
* nbcon_get_cpu_emergency_nesting - Get the per CPU emergency nesting pointer |
1198 |
* |
1199 |
* Return: Either a pointer to the per CPU emergency nesting counter of |
1200 |
* the current CPU or to the init data during early boot. |
1201 |
*/ |
1202 |
static __ref unsigned int *nbcon_get_cpu_emergency_nesting(void) |
1203 |
{ |
1204 |
/* |
1205 |
* The value of __printk_percpu_data_ready gets set in normal |
1206 |
* context and before SMP initialization. As a result it could |
1207 |
* never change while inside an nbcon emergency section. |
1208 |
*/ |
1209 |
if (!printk_percpu_data_ready()) |
1210 |
return &early_nbcon_pcpu_emergency_nesting; |
1211 |
|
1212 |
return this_cpu_ptr(&nbcon_pcpu_emergency_nesting); |
1213 |
} |
1214 |
|
1215 |
/** |
1216 |
* nbcon_atomic_emit_one - Print one record for an nbcon console using the |
1217 |
* write_atomic() callback |
1218 |
* @wctxt: An initialized write context struct to use |
1219 |
* for this context |
1220 |
* |
1221 |
* Return: False if the given console could not print a record or there |
1222 |
* are no more records to print, otherwise true. |
1223 |
* |
1224 |
* This is an internal helper to handle the locking of the console before |
1225 |
* calling nbcon_emit_next_record(). |
1226 |
*/ |
1227 |
static bool nbcon_atomic_emit_one(struct nbcon_write_context *wctxt) |
1228 |
{ |
1229 |
struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); |
1230 |
|
1231 |
if (!nbcon_context_try_acquire(ctxt)) |
1232 |
return false; |
1233 |
|
1234 |
/* |
1235 |
* nbcon_emit_next_record() returns false when the console was |
1236 |
* handed over or taken over. In both cases the context is no |
1237 |
* longer valid. |
1238 |
*/ |
1239 |
if (!nbcon_emit_next_record(wctxt, true)) |
1240 |
return false; |
1241 |
|
1242 |
nbcon_context_release(ctxt); |
1243 |
|
1244 |
return ctxt->backlog; |
1245 |
} |
1246 |
|
1247 |
/** |
1248 |
* nbcon_get_default_prio - The appropriate nbcon priority to use for nbcon |
1249 |
* printing on the current CPU |
1250 |
* |
1251 |
* Return: The nbcon_prio to use for acquiring an nbcon console in this |
1252 |
* context for printing. |
1253 |
*/ |
1254 |
enum nbcon_prio nbcon_get_default_prio(void) |
1255 |
{ |
1256 |
unsigned int *cpu_emergency_nesting; |
1257 |
|
1258 |
if (this_cpu_in_panic()) |
1259 |
return NBCON_PRIO_PANIC; |
1260 |
|
1261 |
cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting(); |
1262 |
if (*cpu_emergency_nesting) |
1263 |
return NBCON_PRIO_EMERGENCY; |
1264 |
|
1265 |
return NBCON_PRIO_NORMAL; |
1266 |
} |
1267 |
|
1268 |
/** |
1269 |
* nbcon_atomic_emit_next_record - Print one record for an nbcon console |
1270 |
* using the write_atomic() callback |
1271 |
* @con: The console to print on |
1272 |
* |
1273 |
* Return: True if a record could be printed, otherwise false. |
1274 |
* Context: Any context which could not be migrated to another CPU. |
1275 |
* |
1276 |
* This function is meant to be called by console_flush_all() to print records |
1277 |
* on nbcon consoles using the write_atomic() callback. Essentially it is the |
1278 |
* nbcon version of console_emit_next_record(). |
1279 |
*/ |
1280 |
bool nbcon_atomic_emit_next_record(struct console *con) |
1281 |
{ |
1282 |
struct uart_port *port = con->uart_port(con); |
1283 |
static DEFINE_SPINLOCK(shared_spinlock); |
1284 |
bool progress = false; |
1285 |
enum nbcon_prio prio; |
1286 |
unsigned long flags; |
1287 |
|
1288 |
/* |
1289 |
* If there is no port lock available, fallback to a shared |
1290 |
* spinlock. This serves to provide the necessary type of |
1291 |
* migration/preemption disabling while printing. |
1292 |
*/ |
1293 |
if (port) |
1294 |
spin_lock_irqsave(&port->lock, flags); |
1295 |
else |
1296 |
spin_lock_irqsave(&shared_spinlock, flags); |
1297 |
|
1298 |
/* |
1299 |
* Do not emit for EMERGENCY priority. The console will be |
1300 |
* explicitly flushed when exiting the emergency section. |
1301 |
*/ |
1302 |
prio = nbcon_get_default_prio(); |
1303 |
if (prio != NBCON_PRIO_EMERGENCY) { |
1304 |
struct nbcon_write_context wctxt = { }; |
1305 |
struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt); |
1306 |
|
1307 |
ctxt->console = con; |
1308 |
ctxt->prio = prio; |
1309 |
|
1310 |
progress = nbcon_atomic_emit_one(&wctxt); |
1311 |
} |
1312 |
|
1313 |
if (port) |
1314 |
spin_unlock_irqrestore(&port->lock, flags); |
1315 |
else |
1316 |
spin_unlock_irqrestore(&shared_spinlock, flags); |
1317 |
|
1318 |
return progress; |
1319 |
} |
1320 |
|
1321 |
/** |
1322 |
* __nbcon_atomic_flush_all - Flush all nbcon consoles using their |
1323 |
* write_atomic() callback |
1324 |
* @stop_seq: Flush up until this record |
1325 |
* @allow_unsafe_takeover: True, to allow unsafe hostile takeovers |
1326 |
*/ |
1327 |
static void __nbcon_atomic_flush_all(u64 stop_seq, bool allow_unsafe_takeover) |
1328 |
{ |
1329 |
struct nbcon_write_context wctxt = { }; |
1330 |
struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt); |
1331 |
enum nbcon_prio prio = nbcon_get_default_prio(); |
1332 |
struct console *con; |
1333 |
bool any_progress; |
1334 |
int cookie; |
1335 |
|
1336 |
do { |
1337 |
any_progress = false; |
1338 |
|
1339 |
cookie = console_srcu_read_lock(); |
1340 |
for_each_console_srcu(con) { |
1341 |
short flags = console_srcu_read_flags(con); |
1342 |
|
1343 |
if (!(flags & CON_NBCON)) |
1344 |
continue; |
1345 |
|
1346 |
if (!console_is_usable(con, flags, true)) |
1347 |
continue; |
1348 |
|
1349 |
if (nbcon_seq_read(con) >= stop_seq) |
1350 |
continue; |
1351 |
|
1352 |
memset(ctxt, 0, sizeof(*ctxt)); |
1353 |
ctxt->console = con; |
1354 |
ctxt->spinwait_max_us = 2000; |
1355 |
ctxt->prio = prio; |
1356 |
ctxt->allow_unsafe_takeover = allow_unsafe_takeover; |
1357 |
|
1358 |
any_progress |= nbcon_atomic_emit_one(&wctxt); |
1359 |
} |
1360 |
console_srcu_read_unlock(cookie); |
1361 |
} while (any_progress); |
1362 |
} |
1363 |
|
1364 |
/** |
1365 |
* nbcon_atomic_flush_all - Flush all nbcon consoles using their |
1366 |
* write_atomic() callback |
1367 |
* |
1368 |
* Flush the backlog up through the currently newest record. Any new |
1369 |
* records added while flushing will not be flushed. This is to avoid |
1370 |
* one CPU printing unbounded because other CPUs continue to add records. |
1371 |
* |
1372 |
* Context: Any context which could not be migrated to another CPU. |
1373 |
*/ |
1374 |
void nbcon_atomic_flush_all(void) |
1375 |
{ |
1376 |
__nbcon_atomic_flush_all(prb_next_reserve_seq(prb), false); |
1377 |
} |
1378 |
|
1379 |
/** |
1380 |
* nbcon_atomic_flush_unsafe - Flush all nbcon consoles using their |
1381 |
* write_atomic() callback and allowing unsafe hostile takeovers |
1382 |
* |
1383 |
* Flush the backlog up through the currently newest record. Unsafe hostile |
1384 |
* takeovers will be performed, if necessary. |
1385 |
* |
1386 |
* Context: Any context which could not be migrated to another CPU. |
1387 |
*/ |
1388 |
void nbcon_atomic_flush_unsafe(void) |
1389 |
{ |
1390 |
__nbcon_atomic_flush_all(prb_next_reserve_seq(prb), true); |
1391 |
} |
1392 |
|
1393 |
/** |
1394 |
* nbcon_cpu_emergency_enter - Enter an emergency section where printk() |
1395 |
* messages for that CPU are only stored |
1396 |
* |
1397 |
* Upon exiting the emergency section, all stored messages are flushed. |
1398 |
* |
1399 |
* Context: Any context. Disables preemption. |
1400 |
* |
1401 |
* When within an emergency section, no printing occurs on that CPU. This |
1402 |
* is to allow all emergency messages to be dumped into the ringbuffer before |
1403 |
* flushing the ringbuffer. The actual printing occurs when exiting the |
1404 |
* outermost emergency section. |
1405 |
*/ |
1406 |
void nbcon_cpu_emergency_enter(void) |
1407 |
{ |
1408 |
unsigned int *cpu_emergency_nesting; |
1409 |
|
1410 |
preempt_disable(); |
1411 |
|
1412 |
cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting(); |
1413 |
(*cpu_emergency_nesting)++; |
1414 |
} |
1415 |
|
1416 |
/** |
1417 |
* nbcon_cpu_emergency_exit - Exit an emergency section and flush the |
1418 |
* stored messages |
1419 |
* |
1420 |
* Flushing only occurs when exiting all nesting for the CPU. |
1421 |
* |
1422 |
* Context: Any context. Enables preemption. |
1423 |
*/ |
1424 |
void nbcon_cpu_emergency_exit(void) |
1425 |
{ |
1426 |
unsigned int *cpu_emergency_nesting; |
1427 |
|
1428 |
cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting(); |
1429 |
|
1430 |
WARN_ON_ONCE(*cpu_emergency_nesting == 0); |
1431 |
|
1432 |
if (*cpu_emergency_nesting == 1) |
1433 |
printk_trigger_flush(); |
1434 |
|
1435 |
/* Undo the nesting count of nbcon_cpu_emergency_enter(). */ |
1436 |
(*cpu_emergency_nesting)--; |
1437 |
|
1438 |
preempt_enable(); |
1439 |
} |
1440 |
|
1441 |
/** |
1442 |
* nbcon_kthread_stop - Stop a printer thread |
1443 |
* @con: Console to operate on |
1444 |
*/ |
1445 |
static void nbcon_kthread_stop(struct console *con) |
1446 |
{ |
1447 |
lockdep_assert_console_list_lock_held(); |
1448 |
|
1449 |
if (!con->kthread) |
1450 |
return; |
1451 |
|
1452 |
kthread_stop(con->kthread); |
1453 |
con->kthread = NULL; |
1454 |
} |
1455 |
|
1456 |
/** |
1457 |
* nbcon_kthread_create - Create a printer thread |
1458 |
* @con: Console to operate on |
1459 |
* |
1460 |
* If it fails, let the console proceed. The atomic part might |
1461 |
* be usable and useful. |
1462 |
*/ |
1463 |
void nbcon_kthread_create(struct console *con) |
1464 |
{ |
1465 |
struct task_struct *kt; |
1466 |
|
1467 |
lockdep_assert_console_list_lock_held(); |
1468 |
|
1469 |
if (!(con->flags & CON_NBCON) || !con->write_thread) |
1470 |
return; |
1471 |
|
1472 |
if (!printk_threads_enabled || con->kthread) |
1473 |
return; |
1474 |
|
1475 |
/* |
1476 |
* Printer threads cannot be started as long as any boot console is |
1477 |
* registered because there is no way to synchronize the hardware |
1478 |
* registers between boot console code and regular console code. |
1479 |
*/ |
1480 |
if (have_boot_console) |
1481 |
return; |
1482 |
|
1483 |
kt = kthread_run(nbcon_kthread_func, con, "pr/%s%d", con->name, con->index); |
1484 |
if (IS_ERR(kt)) { |
1485 |
con_printk(KERN_ERR, con, "failed to start printing thread\n"); |
1486 |
return; |
1487 |
} |
1488 |
|
1489 |
con->kthread = kt; |
1490 |
|
1491 |
/* |
1492 |
* It is important that console printing threads are scheduled |
1493 |
* shortly after a printk call and with generous runtime budgets. |
1494 |
*/ |
1495 |
sched_set_normal(con->kthread, -20); |
1496 |
} |
1497 |
|
1498 |
static int __init printk_setup_threads(void) |
1499 |
{ |
1500 |
struct console *con; |
1501 |
|
1502 |
console_list_lock(); |
1503 |
printk_threads_enabled = true; |
1504 |
for_each_console(con) |
1505 |
nbcon_kthread_create(con); |
1506 |
if (IS_ENABLED(CONFIG_PREEMPT_RT) && printing_via_unlock) |
1507 |
nbcon_legacy_kthread_create(); |
1508 |
console_list_unlock(); |
1509 |
return 0; |
1510 |
} |
1511 |
early_initcall(printk_setup_threads); |
1512 |
|
1513 |
/** |
1514 |
* nbcon_alloc - Allocate buffers needed by the nbcon console |
1515 |
* @con: Console to allocate buffers for |
1516 |
* |
1517 |
* Return: True on success. False otherwise and the console cannot |
1518 |
* be used. |
1519 |
* |
1520 |
* This is not part of nbcon_init() because buffer allocation must |
1521 |
* be performed earlier in the console registration process. |
1522 |
*/ |
1523 |
bool nbcon_alloc(struct console *con) |
1524 |
{ |
1525 |
if (con->flags & CON_BOOT) { |
1526 |
/* |
1527 |
* Boot console printing is synchronized with legacy console |
1528 |
* printing, so boot consoles can share the same global printk |
1529 |
* buffers. |
1530 |
*/ |
1531 |
con->pbufs = &printk_shared_pbufs; |
1532 |
} else { |
1533 |
con->pbufs = kmalloc(sizeof(*con->pbufs), GFP_KERNEL); |
1534 |
if (!con->pbufs) { |
1535 |
con_printk(KERN_ERR, con, "failed to allocate printing buffer\n"); |
1536 |
return false; |
1537 |
} |
1538 |
} |
1539 |
|
1540 |
return true; |
1541 |
} |
1542 |
|
1543 |
/** |
1544 |
* nbcon_init - Initialize the nbcon console specific data |
1545 |
* @con: Console to initialize |
1546 |
* |
1547 |
* nbcon_alloc() *must* be called and succeed before this function |
1548 |
* is called. |
1549 |
* |
1550 |
* This function expects that the legacy @con->seq has been set. |
1551 |
*/ |
1552 |
void nbcon_init(struct console *con) |
1553 |
{ |
1554 |
struct nbcon_state state = { }; |
1555 |
|
1556 |
/* nbcon_alloc() must have been called and successful! */ |
1557 |
BUG_ON(!con->pbufs); |
1558 |
|
1559 |
rcuwait_init(&con->rcuwait); |
1560 |
init_irq_work(&con->irq_work, nbcon_irq_work); |
1561 |
nbcon_seq_force(con, con->seq); |
1562 |
nbcon_state_set(con, &state); |
1563 |
nbcon_kthread_create(con); |
1564 |
} |
1565 |
|
1566 |
/** |
1567 |
* nbcon_free - Free and cleanup the nbcon console specific data |
1568 |
* @con: Console to free/cleanup nbcon data |
1569 |
*/ |
1570 |
void nbcon_free(struct console *con) |
1571 |
{ |
1572 |
struct nbcon_state state = { }; |
1573 |
|
1574 |
nbcon_kthread_stop(con); |
1575 |
nbcon_state_set(con, &state); |
1576 |
|
1577 |
/* Boot consoles share global printk buffers. */ |
1578 |
if (!(con->flags & CON_BOOT)) |
1579 |
kfree(con->pbufs); |
1580 |
|
1581 |
con->pbufs = NULL; |
1582 |
} |
1583 |
|
1584 |
static inline bool uart_is_nbcon(struct uart_port *up) |
1585 |
{ |
1586 |
int cookie; |
1587 |
bool ret; |
1588 |
|
1589 |
if (!uart_console(up)) |
1590 |
return false; |
1591 |
|
1592 |
cookie = console_srcu_read_lock(); |
1593 |
ret = (console_srcu_read_flags(up->cons) & CON_NBCON); |
1594 |
console_srcu_read_unlock(cookie); |
1595 |
return ret; |
1596 |
} |
1597 |
|
1598 |
/** |
1599 |
* nbcon_handle_port_lock - The second half of the port locking wrapper |
1600 |
* @up: The uart port whose @lock was locked |
1601 |
* |
1602 |
* The uart_port_lock() wrappers will first lock the spin_lock @up->lock. |
1603 |
* Then this function is called to implement nbcon-specific processing. |
1604 |
* |
1605 |
* If @up is an nbcon console, this console will be acquired and marked as |
1606 |
* unsafe. Otherwise this function does nothing. |
1607 |
* |
1608 |
* nbcon consoles acquired via the port lock wrapper always use priority |
1609 |
* NBCON_PRIO_NORMAL. |
1610 |
*/ |
1611 |
void nbcon_handle_port_lock(struct uart_port *up) |
1612 |
{ |
1613 |
struct console *con = up->cons; |
1614 |
struct nbcon_context ctxt; |
1615 |
|
1616 |
if (!uart_is_nbcon(up)) |
1617 |
return; |
1618 |
|
1619 |
WARN_ON_ONCE(con->locked_port); |
1620 |
|
1621 |
do { |
1622 |
do { |
1623 |
memset(&ctxt, 0, sizeof(ctxt)); |
1624 |
ctxt.console = con; |
1625 |
ctxt.prio = NBCON_PRIO_NORMAL; |
1626 |
} while (!nbcon_context_try_acquire(&ctxt)); |
1627 |
|
1628 |
} while (!nbcon_context_enter_unsafe(&ctxt)); |
1629 |
|
1630 |
con->locked_port = true; |
1631 |
} |
1632 |
EXPORT_SYMBOL_GPL(nbcon_handle_port_lock); |
1633 |
|
1634 |
/** |
1635 |
* nbcon_handle_port_unlock - The first half of the port unlocking wrapper |
1636 |
* @up: The uart port whose @lock is about to be unlocked |
1637 |
* |
1638 |
* The uart_port_unlock() wrappers will first call this function to implement |
1639 |
* nbcon-specific processing. Then afterwards the uart_port_unlock() wrappers |
1640 |
* will unlock the spin_lock @up->lock. |
1641 |
* |
1642 |
* If @up is an nbcon console, the console will be marked as safe and |
1643 |
* released. Otherwise this function does nothing. |
1644 |
* |
1645 |
* nbcon consoles acquired via the port lock wrapper always use priority |
1646 |
* NBCON_PRIO_NORMAL. |
1647 |
*/ |
1648 |
void nbcon_handle_port_unlock(struct uart_port *up) |
1649 |
{ |
1650 |
struct console *con = up->cons; |
1651 |
struct nbcon_context ctxt = { |
1652 |
.console = con, |
1653 |
.prio = NBCON_PRIO_NORMAL, |
1654 |
}; |
1655 |
|
1656 |
if (!uart_is_nbcon(up)) |
1657 |
return; |
1658 |
|
1659 |
WARN_ON_ONCE(!con->locked_port); |
1660 |
|
1661 |
if (nbcon_context_exit_unsafe(&ctxt)) |
1662 |
nbcon_context_release(&ctxt); |
1663 |
|
1664 |
con->locked_port = false; |
1665 |
} |
1666 |
EXPORT_SYMBOL_GPL(nbcon_handle_port_unlock); |
1667 |
|
1668 |
/** |
1669 |
* printk_kthread_shutdown - shutdown all threaded printers |
1670 |
* |
1671 |
* On system shutdown all threaded printers are stopped. This allows printk |
1672 |
* to transition back to atomic printing, thus providing a robust mechanism |
1673 |
* for the final shutdown/reboot messages to be output. |
1674 |
*/ |
1675 |
static void printk_kthread_shutdown(void) |
1676 |
{ |
1677 |
struct console *con; |
1678 |
|
1679 |
console_list_lock(); |
1680 |
for_each_console(con) { |
1681 |
if (con->flags & CON_NBCON) |
1682 |
nbcon_kthread_stop(con); |
1683 |
} |
1684 |
console_list_unlock(); |
1685 |
} |
1686 |
|
1687 |
static struct syscore_ops printk_syscore_ops = { |
1688 |
.shutdown = printk_kthread_shutdown, |
1689 |
}; |
1690 |
|
1691 |
static int __init printk_init_ops(void) |
1692 |
{ |
1693 |
register_syscore_ops(&printk_syscore_ops); |
1694 |
return 0; |
1695 |
} |
1696 |
device_initcall(printk_init_ops); |