Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 916954 | Differences between
and this patch

Collapse All | Expand All

(-)a/arch/arm/Kconfig (-1 / +3 lines)
Lines 34-39 config ARM Link Here
34
	select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
34
	select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
35
	select ARCH_SUPPORTS_ATOMIC_RMW
35
	select ARCH_SUPPORTS_ATOMIC_RMW
36
	select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE
36
	select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE
37
	select ARCH_SUPPORTS_RT if HAVE_POSIX_CPU_TIMERS_TASK_WORK
37
	select ARCH_USE_BUILTIN_BSWAP
38
	select ARCH_USE_BUILTIN_BSWAP
38
	select ARCH_USE_CMPXCHG_LOCKREF
39
	select ARCH_USE_CMPXCHG_LOCKREF
39
	select ARCH_USE_MEMTEST
40
	select ARCH_USE_MEMTEST
Lines 73-79 config ARM Link Here
73
	select HAS_IOPORT
74
	select HAS_IOPORT
74
	select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
75
	select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
75
	select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
76
	select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
76
	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
77
	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT
77
	select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL
78
	select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL
78
	select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
79
	select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
79
	select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
80
	select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
Lines 118-123 config ARM Link Here
118
	select HAVE_PERF_EVENTS
119
	select HAVE_PERF_EVENTS
119
	select HAVE_PERF_REGS
120
	select HAVE_PERF_REGS
120
	select HAVE_PERF_USER_STACK_DUMP
121
	select HAVE_PERF_USER_STACK_DUMP
122
	select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM
121
	select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
123
	select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
122
	select HAVE_REGS_AND_STACK_ACCESS_API
124
	select HAVE_REGS_AND_STACK_ACCESS_API
123
	select HAVE_RSEQ
125
	select HAVE_RSEQ
(-)a/arch/arm/mm/fault.c (+6 lines)
Lines 404-409 do_translation_fault(unsigned long addr, unsigned int fsr, Link Here
404
	if (addr < TASK_SIZE)
404
	if (addr < TASK_SIZE)
405
		return do_page_fault(addr, fsr, regs);
405
		return do_page_fault(addr, fsr, regs);
406
406
407
	if (interrupts_enabled(regs))
408
		local_irq_enable();
409
407
	if (user_mode(regs))
410
	if (user_mode(regs))
408
		goto bad_area;
411
		goto bad_area;
409
412
Lines 474-479 do_translation_fault(unsigned long addr, unsigned int fsr, Link Here
474
static int
477
static int
475
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
478
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
476
{
479
{
480
	if (interrupts_enabled(regs))
481
		local_irq_enable();
482
477
	do_bad_area(addr, fsr, regs);
483
	do_bad_area(addr, fsr, regs);
478
	return 0;
484
	return 0;
479
}
485
}
(-)a/arch/arm/vfp/vfpmodule.c (-21 / +53 lines)
Lines 55-60 extern unsigned int VFP_arch_feroceon __alias(VFP_arch); Link Here
55
 */
55
 */
56
union vfp_state *vfp_current_hw_state[NR_CPUS];
56
union vfp_state *vfp_current_hw_state[NR_CPUS];
57
57
58
/*
59
 * Claim ownership of the VFP unit.
60
 *
61
 * The caller may change VFP registers until vfp_unlock() is called.
62
 *
63
 * local_bh_disable() is used to disable preemption and to disable VFP
64
 * processing in softirq context. On PREEMPT_RT kernels local_bh_disable() is
65
 * not sufficient because it only serializes soft interrupt related sections
66
 * via a local lock, but stays preemptible. Disabling preemption is the right
67
 * choice here as bottom half processing is always in thread context on RT
68
 * kernels so it implicitly prevents bottom half processing as well.
69
 */
70
static void vfp_lock(void)
71
{
72
	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
73
		local_bh_disable();
74
	else
75
		preempt_disable();
76
}
77
78
static void vfp_unlock(void)
79
{
80
	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
81
		local_bh_enable();
82
	else
83
		preempt_enable();
84
}
85
58
/*
86
/*
59
 * Is 'thread's most up to date state stored in this CPUs hardware?
87
 * Is 'thread's most up to date state stored in this CPUs hardware?
60
 * Must be called from non-preemptible context.
88
 * Must be called from non-preemptible context.
Lines 240-246 static void vfp_panic(char *reason, u32 inst) Link Here
240
/*
268
/*
241
 * Process bitmask of exception conditions.
269
 * Process bitmask of exception conditions.
242
 */
270
 */
243
static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
271
static int vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr)
244
{
272
{
245
	int si_code = 0;
273
	int si_code = 0;
246
274
Lines 248-255 static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_ Link Here
248
276
249
	if (exceptions == VFP_EXCEPTION_ERROR) {
277
	if (exceptions == VFP_EXCEPTION_ERROR) {
250
		vfp_panic("unhandled bounce", inst);
278
		vfp_panic("unhandled bounce", inst);
251
		vfp_raise_sigfpe(FPE_FLTINV, regs);
279
		return FPE_FLTINV;
252
		return;
253
	}
280
	}
254
281
255
	/*
282
	/*
Lines 277-284 static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_ Link Here
277
	RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
304
	RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
278
	RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
305
	RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
279
306
280
	if (si_code)
307
	return si_code;
281
		vfp_raise_sigfpe(si_code, regs);
282
}
308
}
283
309
284
/*
310
/*
Lines 324-329 static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) Link Here
324
static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
350
static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
325
{
351
{
326
	u32 fpscr, orig_fpscr, fpsid, exceptions;
352
	u32 fpscr, orig_fpscr, fpsid, exceptions;
353
	int si_code2 = 0;
354
	int si_code = 0;
327
355
328
	pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
356
	pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
329
357
Lines 369-376 static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) Link Here
369
		 * unallocated VFP instruction but with FPSCR.IXE set and not
397
		 * unallocated VFP instruction but with FPSCR.IXE set and not
370
		 * on VFP subarch 1.
398
		 * on VFP subarch 1.
371
		 */
399
		 */
372
		 vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
400
		si_code = vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr);
373
		return;
401
		goto exit;
374
	}
402
	}
375
403
376
	/*
404
	/*
Lines 394-407 static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) Link Here
394
	 */
422
	 */
395
	exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
423
	exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
396
	if (exceptions)
424
	if (exceptions)
397
		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
425
		si_code2 = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
398
426
399
	/*
427
	/*
400
	 * If there isn't a second FP instruction, exit now. Note that
428
	 * If there isn't a second FP instruction, exit now. Note that
401
	 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
429
	 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
402
	 */
430
	 */
403
	if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
431
	if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
404
		return;
432
		goto exit;
405
433
406
	/*
434
	/*
407
	 * The barrier() here prevents fpinst2 being read
435
	 * The barrier() here prevents fpinst2 being read
Lines 413-419 static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) Link Here
413
 emulate:
441
 emulate:
414
	exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
442
	exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
415
	if (exceptions)
443
	if (exceptions)
416
		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
444
		si_code = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
445
exit:
446
	vfp_unlock();
447
	if (si_code2)
448
		vfp_raise_sigfpe(si_code2, regs);
449
	if (si_code)
450
		vfp_raise_sigfpe(si_code, regs);
417
}
451
}
418
452
419
static void vfp_enable(void *unused)
453
static void vfp_enable(void *unused)
Lines 512-522 static inline void vfp_pm_init(void) { } Link Here
512
 */
546
 */
513
void vfp_sync_hwstate(struct thread_info *thread)
547
void vfp_sync_hwstate(struct thread_info *thread)
514
{
548
{
515
	unsigned int cpu = get_cpu();
549
	vfp_lock();
516
550
517
	local_bh_disable();
551
	if (vfp_state_in_hw(raw_smp_processor_id(), thread)) {
518
519
	if (vfp_state_in_hw(cpu, thread)) {
520
		u32 fpexc = fmrx(FPEXC);
552
		u32 fpexc = fmrx(FPEXC);
521
553
522
		/*
554
		/*
Lines 527-534 void vfp_sync_hwstate(struct thread_info *thread) Link Here
527
		fmxr(FPEXC, fpexc);
559
		fmxr(FPEXC, fpexc);
528
	}
560
	}
529
561
530
	local_bh_enable();
562
	vfp_unlock();
531
	put_cpu();
532
}
563
}
533
564
534
/* Ensure that the thread reloads the hardware VFP state on the next use. */
565
/* Ensure that the thread reloads the hardware VFP state on the next use. */
Lines 683-689 static int vfp_support_entry(struct pt_regs *regs, u32 trigger) Link Here
683
	if (!user_mode(regs))
714
	if (!user_mode(regs))
684
		return vfp_kmode_exception(regs, trigger);
715
		return vfp_kmode_exception(regs, trigger);
685
716
686
	local_bh_disable();
717
	vfp_lock();
687
	fpexc = fmrx(FPEXC);
718
	fpexc = fmrx(FPEXC);
688
719
689
	/*
720
	/*
Lines 748-753 static int vfp_support_entry(struct pt_regs *regs, u32 trigger) Link Here
748
		 * replay the instruction that trapped.
779
		 * replay the instruction that trapped.
749
		 */
780
		 */
750
		fmxr(FPEXC, fpexc);
781
		fmxr(FPEXC, fpexc);
782
		vfp_unlock();
751
	} else {
783
	} else {
752
		/* Check for synchronous or asynchronous exceptions */
784
		/* Check for synchronous or asynchronous exceptions */
753
		if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) {
785
		if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) {
Lines 762-778 static int vfp_support_entry(struct pt_regs *regs, u32 trigger) Link Here
762
			if (!(fpscr & FPSCR_IXE)) {
794
			if (!(fpscr & FPSCR_IXE)) {
763
				if (!(fpscr & FPSCR_LENGTH_MASK)) {
795
				if (!(fpscr & FPSCR_LENGTH_MASK)) {
764
					pr_debug("not VFP\n");
796
					pr_debug("not VFP\n");
765
					local_bh_enable();
797
					vfp_unlock();
766
					return -ENOEXEC;
798
					return -ENOEXEC;
767
				}
799
				}
768
				fpexc |= FPEXC_DEX;
800
				fpexc |= FPEXC_DEX;
769
			}
801
			}
770
		}
802
		}
771
bounce:		regs->ARM_pc += 4;
803
bounce:		regs->ARM_pc += 4;
804
		/* VFP_bounce() will invoke vfp_unlock() */
772
		VFP_bounce(trigger, fpexc, regs);
805
		VFP_bounce(trigger, fpexc, regs);
773
	}
806
	}
774
807
775
	local_bh_enable();
776
	return 0;
808
	return 0;
777
}
809
}
778
810
Lines 819-825 void kernel_neon_begin(void) Link Here
819
	unsigned int cpu;
851
	unsigned int cpu;
820
	u32 fpexc;
852
	u32 fpexc;
821
853
822
	local_bh_disable();
854
	vfp_lock();
823
855
824
	/*
856
	/*
825
	 * Kernel mode NEON is only allowed outside of hardirq context with
857
	 * Kernel mode NEON is only allowed outside of hardirq context with
Lines 850-856 void kernel_neon_end(void) Link Here
850
{
882
{
851
	/* Disable the NEON/VFP unit. */
883
	/* Disable the NEON/VFP unit. */
852
	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
884
	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
853
	local_bh_enable();
885
	vfp_unlock();
854
}
886
}
855
EXPORT_SYMBOL(kernel_neon_end);
887
EXPORT_SYMBOL(kernel_neon_end);
856
888
(-)a/arch/arm64/Kconfig (+1 lines)
Lines 97-102 config ARM64 Link Here
97
	select ARCH_SUPPORTS_NUMA_BALANCING
97
	select ARCH_SUPPORTS_NUMA_BALANCING
98
	select ARCH_SUPPORTS_PAGE_TABLE_CHECK
98
	select ARCH_SUPPORTS_PAGE_TABLE_CHECK
99
	select ARCH_SUPPORTS_PER_VMA_LOCK
99
	select ARCH_SUPPORTS_PER_VMA_LOCK
100
	select ARCH_SUPPORTS_RT
100
	select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
101
	select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
101
	select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
102
	select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
102
	select ARCH_WANT_DEFAULT_BPF_JIT
103
	select ARCH_WANT_DEFAULT_BPF_JIT
(-)a/arch/powerpc/Kconfig (+2 lines)
Lines 166-171 config PPC Link Here
166
	select ARCH_STACKWALK
166
	select ARCH_STACKWALK
167
	select ARCH_SUPPORTS_ATOMIC_RMW
167
	select ARCH_SUPPORTS_ATOMIC_RMW
168
	select ARCH_SUPPORTS_DEBUG_PAGEALLOC	if PPC_BOOK3S || PPC_8xx || 40x
168
	select ARCH_SUPPORTS_DEBUG_PAGEALLOC	if PPC_BOOK3S || PPC_8xx || 40x
169
	select ARCH_SUPPORTS_RT			if HAVE_POSIX_CPU_TIMERS_TASK_WORK
169
	select ARCH_USE_BUILTIN_BSWAP
170
	select ARCH_USE_BUILTIN_BSWAP
170
	select ARCH_USE_CMPXCHG_LOCKREF		if PPC64
171
	select ARCH_USE_CMPXCHG_LOCKREF		if PPC64
171
	select ARCH_USE_MEMTEST
172
	select ARCH_USE_MEMTEST
Lines 268-273 config PPC Link Here
268
	select HAVE_PERF_USER_STACK_DUMP
269
	select HAVE_PERF_USER_STACK_DUMP
269
	select HAVE_REGS_AND_STACK_ACCESS_API
270
	select HAVE_REGS_AND_STACK_ACCESS_API
270
	select HAVE_RELIABLE_STACKTRACE
271
	select HAVE_RELIABLE_STACKTRACE
272
	select HAVE_POSIX_CPU_TIMERS_TASK_WORK	if !KVM
271
	select HAVE_RSEQ
273
	select HAVE_RSEQ
272
	select HAVE_SETUP_PER_CPU_AREA		if PPC64
274
	select HAVE_SETUP_PER_CPU_AREA		if PPC64
273
	select HAVE_SOFTIRQ_ON_OWN_STACK
275
	select HAVE_SOFTIRQ_ON_OWN_STACK
(-)a/arch/powerpc/include/asm/stackprotector.h (-1 / +6 lines)
Lines 19-26 Link Here
19
 */
19
 */
20
static __always_inline void boot_init_stack_canary(void)
20
static __always_inline void boot_init_stack_canary(void)
21
{
21
{
22
	unsigned long canary = get_random_canary();
22
	unsigned long canary;
23
23
24
#ifndef CONFIG_PREEMPT_RT
25
	canary = get_random_canary();
26
#else
27
	canary = ((unsigned long)&canary) & CANARY_MASK;
28
#endif
24
	current->stack_canary = canary;
29
	current->stack_canary = canary;
25
#ifdef CONFIG_PPC64
30
#ifdef CONFIG_PPC64
26
	get_paca()->canary = canary;
31
	get_paca()->canary = canary;
(-)a/arch/powerpc/kernel/traps.c (-1 / +6 lines)
Lines 261-272 static char *get_mmu_str(void) Link Here
261
261
262
static int __die(const char *str, struct pt_regs *regs, long err)
262
static int __die(const char *str, struct pt_regs *regs, long err)
263
{
263
{
264
	const char *pr = "";
265
264
	printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
266
	printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
265
267
268
	if (IS_ENABLED(CONFIG_PREEMPTION))
269
		pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
270
266
	printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
271
	printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
267
	       IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
272
	       IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
268
	       PAGE_SIZE / 1024, get_mmu_str(),
273
	       PAGE_SIZE / 1024, get_mmu_str(),
269
	       IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
274
	       pr,
270
	       IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
275
	       IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
271
	       IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
276
	       IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
272
	       debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
277
	       debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
(-)a/arch/powerpc/kvm/Kconfig (+1 lines)
Lines 224-229 config KVM_E500MC Link Here
224
config KVM_MPIC
224
config KVM_MPIC
225
	bool "KVM in-kernel MPIC emulation"
225
	bool "KVM in-kernel MPIC emulation"
226
	depends on KVM && PPC_E500
226
	depends on KVM && PPC_E500
227
	depends on !PREEMPT_RT
227
	select HAVE_KVM_IRQCHIP
228
	select HAVE_KVM_IRQCHIP
228
	select HAVE_KVM_IRQFD
229
	select HAVE_KVM_IRQFD
229
	select HAVE_KVM_IRQ_ROUTING
230
	select HAVE_KVM_IRQ_ROUTING
(-)a/arch/powerpc/perf/imc-pmu.c (-1 / +1 lines)
Lines 51-57 static int trace_imc_mem_size; Link Here
51
 * core and trace-imc
51
 * core and trace-imc
52
 */
52
 */
53
static struct imc_pmu_ref imc_global_refc = {
53
static struct imc_pmu_ref imc_global_refc = {
54
	.lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
54
	.lock = __SPIN_LOCK_UNLOCKED(imc_global_refc.lock),
55
	.id = 0,
55
	.id = 0,
56
	.refc = 0,
56
	.refc = 0,
57
};
57
};
(-)a/arch/powerpc/platforms/pseries/Kconfig (+1 lines)
Lines 2-7 Link Here
2
config PPC_PSERIES
2
config PPC_PSERIES
3
	depends on PPC64 && PPC_BOOK3S
3
	depends on PPC64 && PPC_BOOK3S
4
	bool "IBM pSeries & new (POWER5-based) iSeries"
4
	bool "IBM pSeries & new (POWER5-based) iSeries"
5
	select GENERIC_ALLOCATOR
5
	select HAVE_PCSPKR_PLATFORM
6
	select HAVE_PCSPKR_PLATFORM
6
	select MPIC
7
	select MPIC
7
	select OF_DYNAMIC
8
	select OF_DYNAMIC
(-)a/arch/powerpc/platforms/pseries/iommu.c (-11 / +20 lines)
Lines 25-30 Link Here
25
#include <linux/of_address.h>
25
#include <linux/of_address.h>
26
#include <linux/iommu.h>
26
#include <linux/iommu.h>
27
#include <linux/rculist.h>
27
#include <linux/rculist.h>
28
#include <linux/local_lock.h>
28
#include <asm/io.h>
29
#include <asm/io.h>
29
#include <asm/prom.h>
30
#include <asm/prom.h>
30
#include <asm/rtas.h>
31
#include <asm/rtas.h>
Lines 206-212 static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift, Link Here
206
	return ret;
207
	return ret;
207
}
208
}
208
209
209
static DEFINE_PER_CPU(__be64 *, tce_page);
210
struct tce_page {
211
	__be64 * page;
212
	local_lock_t lock;
213
};
214
static DEFINE_PER_CPU(struct tce_page, tce_page) = {
215
	.lock = INIT_LOCAL_LOCK(lock),
216
};
210
217
211
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
218
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
212
				     long npages, unsigned long uaddr,
219
				     long npages, unsigned long uaddr,
Lines 229-237 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, Link Here
229
		                           direction, attrs);
236
		                           direction, attrs);
230
	}
237
	}
231
238
232
	local_irq_save(flags);	/* to protect tcep and the page behind it */
239
	/* to protect tcep and the page behind it */
240
	local_lock_irqsave(&tce_page.lock, flags);
233
241
234
	tcep = __this_cpu_read(tce_page);
242
	tcep = __this_cpu_read(tce_page.page);
235
243
236
	/* This is safe to do since interrupts are off when we're called
244
	/* This is safe to do since interrupts are off when we're called
237
	 * from iommu_alloc{,_sg}()
245
	 * from iommu_alloc{,_sg}()
Lines 240-251 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, Link Here
240
		tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
248
		tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
241
		/* If allocation fails, fall back to the loop implementation */
249
		/* If allocation fails, fall back to the loop implementation */
242
		if (!tcep) {
250
		if (!tcep) {
243
			local_irq_restore(flags);
251
			local_unlock_irqrestore(&tce_page.lock, flags);
244
			return tce_build_pSeriesLP(tbl->it_index, tcenum,
252
			return tce_build_pSeriesLP(tbl->it_index, tcenum,
245
					tceshift,
253
					tceshift,
246
					npages, uaddr, direction, attrs);
254
					npages, uaddr, direction, attrs);
247
		}
255
		}
248
		__this_cpu_write(tce_page, tcep);
256
		__this_cpu_write(tce_page.page, tcep);
249
	}
257
	}
250
258
251
	rpn = __pa(uaddr) >> tceshift;
259
	rpn = __pa(uaddr) >> tceshift;
Lines 275-281 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, Link Here
275
		tcenum += limit;
283
		tcenum += limit;
276
	} while (npages > 0 && !rc);
284
	} while (npages > 0 && !rc);
277
285
278
	local_irq_restore(flags);
286
	local_unlock_irqrestore(&tce_page.lock, flags);
279
287
280
	if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
288
	if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
281
		ret = (int)rc;
289
		ret = (int)rc;
Lines 459-474 static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, Link Here
459
				DMA_BIDIRECTIONAL, 0);
467
				DMA_BIDIRECTIONAL, 0);
460
	}
468
	}
461
469
462
	local_irq_disable();	/* to protect tcep and the page behind it */
470
	/* to protect tcep and the page behind it */
463
	tcep = __this_cpu_read(tce_page);
471
	local_lock_irq(&tce_page.lock);
472
	tcep = __this_cpu_read(tce_page.page);
464
473
465
	if (!tcep) {
474
	if (!tcep) {
466
		tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
475
		tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
467
		if (!tcep) {
476
		if (!tcep) {
468
			local_irq_enable();
477
			local_unlock_irq(&tce_page.lock);
469
			return -ENOMEM;
478
			return -ENOMEM;
470
		}
479
		}
471
		__this_cpu_write(tce_page, tcep);
480
		__this_cpu_write(tce_page.page, tcep);
472
	}
481
	}
473
482
474
	proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
483
	proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
Lines 511-517 static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, Link Here
511
520
512
	/* error cleanup: caller will clear whole range */
521
	/* error cleanup: caller will clear whole range */
513
522
514
	local_irq_enable();
523
	local_unlock_irq(&tce_page.lock);
515
	return rc;
524
	return rc;
516
}
525
}
517
526
(-)a/arch/x86/Kconfig (+2 lines)
Lines 117-122 config X86 Link Here
117
	select ARCH_USES_CFI_TRAPS		if X86_64 && CFI_CLANG
117
	select ARCH_USES_CFI_TRAPS		if X86_64 && CFI_CLANG
118
	select ARCH_SUPPORTS_LTO_CLANG
118
	select ARCH_SUPPORTS_LTO_CLANG
119
	select ARCH_SUPPORTS_LTO_CLANG_THIN
119
	select ARCH_SUPPORTS_LTO_CLANG_THIN
120
	select ARCH_SUPPORTS_RT
120
	select ARCH_USE_BUILTIN_BSWAP
121
	select ARCH_USE_BUILTIN_BSWAP
121
	select ARCH_USE_MEMTEST
122
	select ARCH_USE_MEMTEST
122
	select ARCH_USE_QUEUED_RWLOCKS
123
	select ARCH_USE_QUEUED_RWLOCKS
Lines 271-276 config X86 Link Here
271
	select HAVE_STATIC_CALL
272
	select HAVE_STATIC_CALL
272
	select HAVE_STATIC_CALL_INLINE		if HAVE_OBJTOOL
273
	select HAVE_STATIC_CALL_INLINE		if HAVE_OBJTOOL
273
	select HAVE_PREEMPT_DYNAMIC_CALL
274
	select HAVE_PREEMPT_DYNAMIC_CALL
275
	select HAVE_PREEMPT_AUTO
274
	select HAVE_RSEQ
276
	select HAVE_RSEQ
275
	select HAVE_RUST			if X86_64
277
	select HAVE_RUST			if X86_64
276
	select HAVE_SYSCALL_TRACEPOINTS
278
	select HAVE_SYSCALL_TRACEPOINTS
(-)a/arch/x86/include/asm/thread_info.h (-2 / +4 lines)
Lines 81-88 struct thread_info { Link Here
81
#define TIF_NOTIFY_RESUME	1	/* callback before returning to user */
81
#define TIF_NOTIFY_RESUME	1	/* callback before returning to user */
82
#define TIF_SIGPENDING		2	/* signal pending */
82
#define TIF_SIGPENDING		2	/* signal pending */
83
#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
83
#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
84
#define TIF_SINGLESTEP		4	/* reenable singlestep on user return*/
84
#define TIF_ARCH_RESCHED_LAZY	4	/* Lazy rescheduling */
85
#define TIF_SSBD		5	/* Speculative store bypass disable */
85
#define TIF_SINGLESTEP		5	/* reenable singlestep on user return*/
86
#define TIF_SSBD		6	/* Speculative store bypass disable */
86
#define TIF_SPEC_IB		9	/* Indirect branch speculation mitigation */
87
#define TIF_SPEC_IB		9	/* Indirect branch speculation mitigation */
87
#define TIF_SPEC_L1D_FLUSH	10	/* Flush L1D on mm switches (processes) */
88
#define TIF_SPEC_L1D_FLUSH	10	/* Flush L1D on mm switches (processes) */
88
#define TIF_USER_RETURN_NOTIFY	11	/* notify kernel of userspace return */
89
#define TIF_USER_RETURN_NOTIFY	11	/* notify kernel of userspace return */
Lines 104-109 struct thread_info { Link Here
104
#define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
105
#define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
105
#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
106
#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
106
#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
107
#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
108
#define _TIF_ARCH_RESCHED_LAZY	(1 << TIF_ARCH_RESCHED_LAZY)
107
#define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
109
#define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
108
#define _TIF_SSBD		(1 << TIF_SSBD)
110
#define _TIF_SSBD		(1 << TIF_SSBD)
109
#define _TIF_SPEC_IB		(1 << TIF_SPEC_IB)
111
#define _TIF_SPEC_IB		(1 << TIF_SPEC_IB)
(-)a/drivers/acpi/processor_idle.c (-1 / +1 lines)
Lines 108-114 static const struct dmi_system_id processor_power_dmi_table[] = { Link Here
108
 */
108
 */
109
static void __cpuidle acpi_safe_halt(void)
109
static void __cpuidle acpi_safe_halt(void)
110
{
110
{
111
	if (!tif_need_resched()) {
111
	if (!need_resched()) {
112
		raw_safe_halt();
112
		raw_safe_halt();
113
		raw_local_irq_disable();
113
		raw_local_irq_disable();
114
	}
114
	}
(-)a/drivers/block/zram/zram_drv.c (+37 lines)
Lines 57-62 static void zram_free_page(struct zram *zram, size_t index); Link Here
57
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
57
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
58
			  struct bio *parent);
58
			  struct bio *parent);
59
59
60
#ifdef CONFIG_PREEMPT_RT
61
static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages)
62
{
63
	size_t index;
64
65
	for (index = 0; index < num_pages; index++)
66
		spin_lock_init(&zram->table[index].lock);
67
}
68
69
static int zram_slot_trylock(struct zram *zram, u32 index)
70
{
71
	int ret;
72
73
	ret = spin_trylock(&zram->table[index].lock);
74
	if (ret)
75
		__set_bit(ZRAM_LOCK, &zram->table[index].flags);
76
	return ret;
77
}
78
79
static void zram_slot_lock(struct zram *zram, u32 index)
80
{
81
	spin_lock(&zram->table[index].lock);
82
	__set_bit(ZRAM_LOCK, &zram->table[index].flags);
83
}
84
85
static void zram_slot_unlock(struct zram *zram, u32 index)
86
{
87
	__clear_bit(ZRAM_LOCK, &zram->table[index].flags);
88
	spin_unlock(&zram->table[index].lock);
89
}
90
91
#else
92
93
static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { }
94
60
static int zram_slot_trylock(struct zram *zram, u32 index)
95
static int zram_slot_trylock(struct zram *zram, u32 index)
61
{
96
{
62
	return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
97
	return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
Lines 71-76 static void zram_slot_unlock(struct zram *zram, u32 index) Link Here
71
{
106
{
72
	bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
107
	bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
73
}
108
}
109
#endif
74
110
75
static inline bool init_done(struct zram *zram)
111
static inline bool init_done(struct zram *zram)
76
{
112
{
Lines 1245-1250 static bool zram_meta_alloc(struct zram *zram, u64 disksize) Link Here
1245
1281
1246
	if (!huge_class_size)
1282
	if (!huge_class_size)
1247
		huge_class_size = zs_huge_class_size(zram->mem_pool);
1283
		huge_class_size = zs_huge_class_size(zram->mem_pool);
1284
	zram_meta_init_table_locks(zram, num_pages);
1248
	return true;
1285
	return true;
1249
}
1286
}
1250
1287
(-)a/drivers/block/zram/zram_drv.h (+3 lines)
Lines 69-74 struct zram_table_entry { Link Here
69
		unsigned long element;
69
		unsigned long element;
70
	};
70
	};
71
	unsigned long flags;
71
	unsigned long flags;
72
#ifdef CONFIG_PREEMPT_RT
73
	spinlock_t lock;
74
#endif
72
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
75
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
73
	ktime_t ac_time;
76
	ktime_t ac_time;
74
#endif
77
#endif
(-)a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c (-32 / +21 lines)
Lines 60-70 static DEFINE_PER_CPU(int, fpu_recursion_depth); Link Here
60
 */
60
 */
61
inline void dc_assert_fp_enabled(void)
61
inline void dc_assert_fp_enabled(void)
62
{
62
{
63
	int *pcpu, depth = 0;
63
	int depth;
64
64
65
	pcpu = get_cpu_ptr(&fpu_recursion_depth);
65
	depth = __this_cpu_read(fpu_recursion_depth);
66
	depth = *pcpu;
67
	put_cpu_ptr(&fpu_recursion_depth);
68
66
69
	ASSERT(depth >= 1);
67
	ASSERT(depth >= 1);
70
}
68
}
Lines 84-116 inline void dc_assert_fp_enabled(void) Link Here
84
 */
82
 */
85
void dc_fpu_begin(const char *function_name, const int line)
83
void dc_fpu_begin(const char *function_name, const int line)
86
{
84
{
87
	int *pcpu;
85
	int depth;
88
86
89
	pcpu = get_cpu_ptr(&fpu_recursion_depth);
87
	WARN_ON_ONCE(!in_task());
90
	*pcpu += 1;
88
	preempt_disable();
89
	depth = __this_cpu_inc_return(fpu_recursion_depth);
91
90
92
	if (*pcpu == 1) {
91
	if (depth == 1) {
93
#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
92
#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
94
		migrate_disable();
95
		kernel_fpu_begin();
93
		kernel_fpu_begin();
96
#elif defined(CONFIG_PPC64)
94
#elif defined(CONFIG_PPC64)
97
		if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
95
		if (cpu_has_feature(CPU_FTR_VSX_COMP))
98
			preempt_disable();
99
			enable_kernel_vsx();
96
			enable_kernel_vsx();
100
		} else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
97
		else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP))
101
			preempt_disable();
102
			enable_kernel_altivec();
98
			enable_kernel_altivec();
103
		} else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
99
		else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE))
104
			preempt_disable();
105
			enable_kernel_fp();
100
			enable_kernel_fp();
106
		}
107
#elif defined(CONFIG_ARM64)
101
#elif defined(CONFIG_ARM64)
108
		kernel_neon_begin();
102
		kernel_neon_begin();
109
#endif
103
#endif
110
	}
104
	}
111
105
112
	TRACE_DCN_FPU(true, function_name, line, *pcpu);
106
	TRACE_DCN_FPU(true, function_name, line, depth);
113
	put_cpu_ptr(&fpu_recursion_depth);
114
}
107
}
115
108
116
/**
109
/**
Lines 125-154 void dc_fpu_begin(const char *function_name, const int line) Link Here
125
 */
118
 */
126
void dc_fpu_end(const char *function_name, const int line)
119
void dc_fpu_end(const char *function_name, const int line)
127
{
120
{
128
	int *pcpu;
121
	int depth;
129
122
130
	pcpu = get_cpu_ptr(&fpu_recursion_depth);
123
	depth = __this_cpu_dec_return(fpu_recursion_depth);
131
	*pcpu -= 1;
124
	if (depth == 0) {
132
	if (*pcpu <= 0) {
133
#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
125
#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
134
		kernel_fpu_end();
126
		kernel_fpu_end();
135
		migrate_enable();
136
#elif defined(CONFIG_PPC64)
127
#elif defined(CONFIG_PPC64)
137
		if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
128
		if (cpu_has_feature(CPU_FTR_VSX_COMP))
138
			disable_kernel_vsx();
129
			disable_kernel_vsx();
139
			preempt_enable();
130
		else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP))
140
		} else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
141
			disable_kernel_altivec();
131
			disable_kernel_altivec();
142
			preempt_enable();
132
		else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE))
143
		} else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
144
			disable_kernel_fp();
133
			disable_kernel_fp();
145
			preempt_enable();
146
		}
147
#elif defined(CONFIG_ARM64)
134
#elif defined(CONFIG_ARM64)
148
		kernel_neon_end();
135
		kernel_neon_end();
149
#endif
136
#endif
137
	} else {
138
		WARN_ON_ONCE(depth < 0);
150
	}
139
	}
151
140
152
	TRACE_DCN_FPU(false, function_name, line, *pcpu);
141
	TRACE_DCN_FPU(false, function_name, line, depth);
153
	put_cpu_ptr(&fpu_recursion_depth);
142
	preempt_enable();
154
}
143
}
(-)a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c (-1 / +9 lines)
Lines 2141-2149 bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, Link Here
2141
		bool fast_validate)
2141
		bool fast_validate)
2142
{
2142
{
2143
	bool voltage_supported;
2143
	bool voltage_supported;
2144
	display_e2e_pipe_params_st *pipes;
2145
2146
	pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
2147
	if (!pipes)
2148
		return false;
2149
2144
	DC_FP_START();
2150
	DC_FP_START();
2145
	voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
2151
	voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes);
2146
	DC_FP_END();
2152
	DC_FP_END();
2153
2154
	kfree(pipes);
2147
	return voltage_supported;
2155
	return voltage_supported;
2148
}
2156
}
2149
2157
(-)a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c (-1 / +9 lines)
Lines 953-961 static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, Link Here
953
		bool fast_validate)
953
		bool fast_validate)
954
{
954
{
955
	bool voltage_supported;
955
	bool voltage_supported;
956
	display_e2e_pipe_params_st *pipes;
957
958
	pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
959
	if (!pipes)
960
		return false;
961
956
	DC_FP_START();
962
	DC_FP_START();
957
	voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate);
963
	voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes);
958
	DC_FP_END();
964
	DC_FP_END();
965
966
	kfree(pipes);
959
	return voltage_supported;
967
	return voltage_supported;
960
}
968
}
961
969
(-)a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c (-14 / +9 lines)
Lines 1923-1929 void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st Link Here
1923
}
1923
}
1924
1924
1925
static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
1925
static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
1926
		bool fast_validate)
1926
		bool fast_validate, display_e2e_pipe_params_st *pipes)
1927
{
1927
{
1928
	bool out = false;
1928
	bool out = false;
1929
1929
Lines 1932-1938 static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co Link Here
1932
	int vlevel = 0;
1932
	int vlevel = 0;
1933
	int pipe_split_from[MAX_PIPES];
1933
	int pipe_split_from[MAX_PIPES];
1934
	int pipe_cnt = 0;
1934
	int pipe_cnt = 0;
1935
	display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
1936
	DC_LOGGER_INIT(dc->ctx->logger);
1935
	DC_LOGGER_INIT(dc->ctx->logger);
1937
1936
1938
	BW_VAL_TRACE_COUNT();
1937
	BW_VAL_TRACE_COUNT();
Lines 1967-1982 static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co Link Here
1967
	out = false;
1966
	out = false;
1968
1967
1969
validate_out:
1968
validate_out:
1970
	kfree(pipes);
1971
1969
1972
	BW_VAL_TRACE_FINISH();
1970
	BW_VAL_TRACE_FINISH();
1973
1971
1974
	return out;
1972
	return out;
1975
}
1973
}
1976
1974
1977
bool dcn20_validate_bandwidth_fp(struct dc *dc,
1975
bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
1978
				 struct dc_state *context,
1976
				 bool fast_validate, display_e2e_pipe_params_st *pipes)
1979
				 bool fast_validate)
1980
{
1977
{
1981
	bool voltage_supported = false;
1978
	bool voltage_supported = false;
1982
	bool full_pstate_supported = false;
1979
	bool full_pstate_supported = false;
Lines 1995-2005 bool dcn20_validate_bandwidth_fp(struct dc *dc, Link Here
1995
	ASSERT(context != dc->current_state);
1992
	ASSERT(context != dc->current_state);
1996
1993
1997
	if (fast_validate) {
1994
	if (fast_validate) {
1998
		return dcn20_validate_bandwidth_internal(dc, context, true);
1995
		return dcn20_validate_bandwidth_internal(dc, context, true, pipes);
1999
	}
1996
	}
2000
1997
2001
	// Best case, we support full UCLK switch latency
1998
	// Best case, we support full UCLK switch latency
2002
	voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
1999
	voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
2003
	full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
2000
	full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
2004
2001
2005
	if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
2002
	if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
Lines 2011-2017 bool dcn20_validate_bandwidth_fp(struct dc *dc, Link Here
2011
	// Fallback: Try to only support G6 temperature read latency
2008
	// Fallback: Try to only support G6 temperature read latency
2012
	context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
2009
	context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
2013
2010
2014
	voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
2011
	memset(pipes, 0, dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st));
2012
	voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
2015
	dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
2013
	dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
2016
2014
2017
	if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
2015
	if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
Lines 2216-2224 static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context, Link Here
2216
						&context->bw_ctx.dml, pipes, pipe_cnt);
2214
						&context->bw_ctx.dml, pipes, pipe_cnt);
2217
}
2215
}
2218
2216
2219
bool dcn21_validate_bandwidth_fp(struct dc *dc,
2217
bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
2220
				 struct dc_state *context,
2218
				 bool fast_validate, display_e2e_pipe_params_st *pipes)
2221
				 bool fast_validate)
2222
{
2219
{
2223
	bool out = false;
2220
	bool out = false;
2224
2221
Lines 2227-2233 bool dcn21_validate_bandwidth_fp(struct dc *dc, Link Here
2227
	int vlevel = 0;
2224
	int vlevel = 0;
2228
	int pipe_split_from[MAX_PIPES];
2225
	int pipe_split_from[MAX_PIPES];
2229
	int pipe_cnt = 0;
2226
	int pipe_cnt = 0;
2230
	display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
2231
	DC_LOGGER_INIT(dc->ctx->logger);
2227
	DC_LOGGER_INIT(dc->ctx->logger);
2232
2228
2233
	BW_VAL_TRACE_COUNT();
2229
	BW_VAL_TRACE_COUNT();
Lines 2267-2273 bool dcn21_validate_bandwidth_fp(struct dc *dc, Link Here
2267
	out = false;
2263
	out = false;
2268
2264
2269
validate_out:
2265
validate_out:
2270
	kfree(pipes);
2271
2266
2272
	BW_VAL_TRACE_FINISH();
2267
	BW_VAL_TRACE_FINISH();
2273
2268
(-)a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h (-6 / +4 lines)
Lines 61-69 void dcn20_update_bounding_box(struct dc *dc, Link Here
61
			       unsigned int num_states);
61
			       unsigned int num_states);
62
void dcn20_patch_bounding_box(struct dc *dc,
62
void dcn20_patch_bounding_box(struct dc *dc,
63
			      struct _vcs_dpi_soc_bounding_box_st *bb);
63
			      struct _vcs_dpi_soc_bounding_box_st *bb);
64
bool dcn20_validate_bandwidth_fp(struct dc *dc,
64
bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
65
				 struct dc_state *context,
65
				 bool fast_validate, display_e2e_pipe_params_st *pipes);
66
				 bool fast_validate);
67
void dcn20_fpu_set_wm_ranges(int i,
66
void dcn20_fpu_set_wm_ranges(int i,
68
			     struct pp_smu_wm_range_sets *ranges,
67
			     struct pp_smu_wm_range_sets *ranges,
69
			     struct _vcs_dpi_soc_bounding_box_st *loaded_bb);
68
			     struct _vcs_dpi_soc_bounding_box_st *loaded_bb);
Lines 77-85 int dcn21_populate_dml_pipes_from_context(struct dc *dc, Link Here
77
					  struct dc_state *context,
76
					  struct dc_state *context,
78
					  display_e2e_pipe_params_st *pipes,
77
					  display_e2e_pipe_params_st *pipes,
79
					  bool fast_validate);
78
					  bool fast_validate);
80
bool dcn21_validate_bandwidth_fp(struct dc *dc,
79
bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, bool
81
				 struct dc_state *context,
80
				 fast_validate, display_e2e_pipe_params_st *pipes);
82
				 bool fast_validate);
83
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
81
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
84
82
85
void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params);
83
void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params);
(-)a/drivers/gpu/drm/i915/Kconfig (-1 lines)
Lines 3-9 config DRM_I915 Link Here
3
	tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
3
	tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
4
	depends on DRM
4
	depends on DRM
5
	depends on X86 && PCI
5
	depends on X86 && PCI
6
	depends on !PREEMPT_RT
7
	select INTEL_GTT if X86
6
	select INTEL_GTT if X86
8
	select INTERVAL_TREE
7
	select INTERVAL_TREE
9
	# we need shmfs for the swappable backing store, and in particular
8
	# we need shmfs for the swappable backing store, and in particular
(-)a/drivers/gpu/drm/i915/display/intel_crtc.c (-5 / +10 lines)
Lines 534-540 void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state) Link Here
534
	 */
534
	 */
535
	intel_psr_wait_for_idle_locked(new_crtc_state);
535
	intel_psr_wait_for_idle_locked(new_crtc_state);
536
536
537
	local_irq_disable();
537
	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
538
		local_irq_disable();
538
539
539
	crtc->debug.min_vbl = min;
540
	crtc->debug.min_vbl = min;
540
	crtc->debug.max_vbl = max;
541
	crtc->debug.max_vbl = max;
Lines 559-569 void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state) Link Here
559
			break;
560
			break;
560
		}
561
		}
561
562
562
		local_irq_enable();
563
		if (!IS_ENABLED(CONFIG_PREEMPT_RT))
564
			local_irq_enable();
563
565
564
		timeout = schedule_timeout(timeout);
566
		timeout = schedule_timeout(timeout);
565
567
566
		local_irq_disable();
568
		if (!IS_ENABLED(CONFIG_PREEMPT_RT))
569
			local_irq_disable();
567
	}
570
	}
568
571
569
	finish_wait(wq, &wait);
572
	finish_wait(wq, &wait);
Lines 596-602 void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state) Link Here
596
	return;
599
	return;
597
600
598
irq_disable:
601
irq_disable:
599
	local_irq_disable();
602
	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
603
		local_irq_disable();
600
}
604
}
601
605
602
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
606
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
Lines 706-712 void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) Link Here
706
		intel_crtc_update_active_timings(new_crtc_state,
710
		intel_crtc_update_active_timings(new_crtc_state,
707
						 new_crtc_state->vrr.enable);
711
						 new_crtc_state->vrr.enable);
708
712
709
	local_irq_enable();
713
	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
714
		local_irq_enable();
710
715
711
	if (intel_vgpu_active(dev_priv))
716
	if (intel_vgpu_active(dev_priv))
712
		return;
717
		return;
(-)a/drivers/gpu/drm/i915/display/intel_vblank.c (-2 / +4 lines)
Lines 294-300 static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, Link Here
294
	 */
294
	 */
295
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
295
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
296
296
297
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
297
	if (IS_ENABLED(CONFIG_PREEMPT_RT))
298
		preempt_disable();
298
299
299
	/* Get optional system timestamp before query. */
300
	/* Get optional system timestamp before query. */
300
	if (stime)
301
	if (stime)
Lines 358-364 static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, Link Here
358
	if (etime)
359
	if (etime)
359
		*etime = ktime_get();
360
		*etime = ktime_get();
360
361
361
	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
362
	if (IS_ENABLED(CONFIG_PREEMPT_RT))
363
		preempt_enable();
362
364
363
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
365
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
364
366
(-)a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c (-3 / +2 lines)
Lines 312-321 void __intel_breadcrumbs_park(struct intel_breadcrumbs *b) Link Here
312
	/* Kick the work once more to drain the signalers, and disarm the irq */
312
	/* Kick the work once more to drain the signalers, and disarm the irq */
313
	irq_work_sync(&b->irq_work);
313
	irq_work_sync(&b->irq_work);
314
	while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
314
	while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
315
		local_irq_disable();
315
		irq_work_queue(&b->irq_work);
316
		signal_irq_work(&b->irq_work);
317
		local_irq_enable();
318
		cond_resched();
316
		cond_resched();
317
		irq_work_sync(&b->irq_work);
319
	}
318
	}
320
}
319
}
321
320
(-)a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c (-12 / +5 lines)
Lines 1303-1309 static void execlists_dequeue(struct intel_engine_cs *engine) Link Here
1303
	 * and context switches) submission.
1303
	 * and context switches) submission.
1304
	 */
1304
	 */
1305
1305
1306
	spin_lock(&sched_engine->lock);
1306
	spin_lock_irq(&sched_engine->lock);
1307
1307
1308
	/*
1308
	/*
1309
	 * If the queue is higher priority than the last
1309
	 * If the queue is higher priority than the last
Lines 1403-1409 static void execlists_dequeue(struct intel_engine_cs *engine) Link Here
1403
				 * Even if ELSP[1] is occupied and not worthy
1403
				 * Even if ELSP[1] is occupied and not worthy
1404
				 * of timeslices, our queue might be.
1404
				 * of timeslices, our queue might be.
1405
				 */
1405
				 */
1406
				spin_unlock(&sched_engine->lock);
1406
				spin_unlock_irq(&sched_engine->lock);
1407
				return;
1407
				return;
1408
			}
1408
			}
1409
		}
1409
		}
Lines 1429-1435 static void execlists_dequeue(struct intel_engine_cs *engine) Link Here
1429
1429
1430
		if (last && !can_merge_rq(last, rq)) {
1430
		if (last && !can_merge_rq(last, rq)) {
1431
			spin_unlock(&ve->base.sched_engine->lock);
1431
			spin_unlock(&ve->base.sched_engine->lock);
1432
			spin_unlock(&engine->sched_engine->lock);
1432
			spin_unlock_irq(&engine->sched_engine->lock);
1433
			return; /* leave this for another sibling */
1433
			return; /* leave this for another sibling */
1434
		}
1434
		}
1435
1435
Lines 1591-1597 static void execlists_dequeue(struct intel_engine_cs *engine) Link Here
1591
	 */
1591
	 */
1592
	sched_engine->queue_priority_hint = queue_prio(sched_engine);
1592
	sched_engine->queue_priority_hint = queue_prio(sched_engine);
1593
	i915_sched_engine_reset_on_empty(sched_engine);
1593
	i915_sched_engine_reset_on_empty(sched_engine);
1594
	spin_unlock(&sched_engine->lock);
1594
	spin_unlock_irq(&sched_engine->lock);
1595
1595
1596
	/*
1596
	/*
1597
	 * We can skip poking the HW if we ended up with exactly the same set
1597
	 * We can skip poking the HW if we ended up with exactly the same set
Lines 1617-1629 static void execlists_dequeue(struct intel_engine_cs *engine) Link Here
1617
	}
1617
	}
1618
}
1618
}
1619
1619
1620
static void execlists_dequeue_irq(struct intel_engine_cs *engine)
1621
{
1622
	local_irq_disable(); /* Suspend interrupts across request submission */
1623
	execlists_dequeue(engine);
1624
	local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */
1625
}
1626
1627
static void clear_ports(struct i915_request **ports, int count)
1620
static void clear_ports(struct i915_request **ports, int count)
1628
{
1621
{
1629
	memset_p((void **)ports, NULL, count);
1622
	memset_p((void **)ports, NULL, count);
Lines 2478-2484 static void execlists_submission_tasklet(struct tasklet_struct *t) Link Here
2478
	}
2471
	}
2479
2472
2480
	if (!engine->execlists.pending[0]) {
2473
	if (!engine->execlists.pending[0]) {
2481
		execlists_dequeue_irq(engine);
2474
		execlists_dequeue(engine);
2482
		start_timeslice(engine);
2475
		start_timeslice(engine);
2483
	}
2476
	}
2484
2477
(-)a/drivers/gpu/drm/i915/gt/intel_reset.c (-7 / +5 lines)
Lines 164-176 static int i915_do_reset(struct intel_gt *gt, Link Here
164
	/* Assert reset for at least 20 usec, and wait for acknowledgement. */
164
	/* Assert reset for at least 20 usec, and wait for acknowledgement. */
165
	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
165
	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
166
	udelay(50);
166
	udelay(50);
167
	err = wait_for_atomic(i915_in_reset(pdev), 50);
167
	err = _wait_for_atomic(i915_in_reset(pdev), 50, 0);
168
168
169
	/* Clear the reset request. */
169
	/* Clear the reset request. */
170
	pci_write_config_byte(pdev, I915_GDRST, 0);
170
	pci_write_config_byte(pdev, I915_GDRST, 0);
171
	udelay(50);
171
	udelay(50);
172
	if (!err)
172
	if (!err)
173
		err = wait_for_atomic(!i915_in_reset(pdev), 50);
173
		err = _wait_for_atomic(!i915_in_reset(pdev), 50, 0);
174
174
175
	return err;
175
	return err;
176
}
176
}
Lines 190-196 static int g33_do_reset(struct intel_gt *gt, Link Here
190
	struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
190
	struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
191
191
192
	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
192
	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
193
	return wait_for_atomic(g4x_reset_complete(pdev), 50);
193
	return _wait_for_atomic(g4x_reset_complete(pdev), 50, 0);
194
}
194
}
195
195
196
static int g4x_do_reset(struct intel_gt *gt,
196
static int g4x_do_reset(struct intel_gt *gt,
Lines 207-213 static int g4x_do_reset(struct intel_gt *gt, Link Here
207
207
208
	pci_write_config_byte(pdev, I915_GDRST,
208
	pci_write_config_byte(pdev, I915_GDRST,
209
			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
209
			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
210
	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
210
	ret =  _wait_for_atomic(g4x_reset_complete(pdev), 50, 0);
211
	if (ret) {
211
	if (ret) {
212
		GT_TRACE(gt, "Wait for media reset failed\n");
212
		GT_TRACE(gt, "Wait for media reset failed\n");
213
		goto out;
213
		goto out;
Lines 215-221 static int g4x_do_reset(struct intel_gt *gt, Link Here
215
215
216
	pci_write_config_byte(pdev, I915_GDRST,
216
	pci_write_config_byte(pdev, I915_GDRST,
217
			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
217
			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
218
	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
218
	ret =  _wait_for_atomic(g4x_reset_complete(pdev), 50, 0);
219
	if (ret) {
219
	if (ret) {
220
		GT_TRACE(gt, "Wait for render reset failed\n");
220
		GT_TRACE(gt, "Wait for render reset failed\n");
221
		goto out;
221
		goto out;
Lines 785-793 int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) Link Here
785
		reset_mask = wa_14015076503_start(gt, engine_mask, !retry);
785
		reset_mask = wa_14015076503_start(gt, engine_mask, !retry);
786
786
787
		GT_TRACE(gt, "engine_mask=%x\n", reset_mask);
787
		GT_TRACE(gt, "engine_mask=%x\n", reset_mask);
788
		preempt_disable();
789
		ret = reset(gt, reset_mask, retry);
788
		ret = reset(gt, reset_mask, retry);
790
		preempt_enable();
791
789
792
		wa_14015076503_end(gt, reset_mask);
790
		wa_14015076503_end(gt, reset_mask);
793
	}
791
	}
(-)a/drivers/gpu/drm/i915/gt/uc/intel_guc.h (-1 / +1 lines)
Lines 317-323 static inline int intel_guc_send_busy_loop(struct intel_guc *guc, Link Here
317
{
317
{
318
	int err;
318
	int err;
319
	unsigned int sleep_period_ms = 1;
319
	unsigned int sleep_period_ms = 1;
320
	bool not_atomic = !in_atomic() && !irqs_disabled();
320
	bool not_atomic = !in_atomic() && !irqs_disabled() && !rcu_preempt_depth();
321
321
322
	/*
322
	/*
323
	 * FIXME: Have caller pass in if we are in an atomic context to avoid
323
	 * FIXME: Have caller pass in if we are in an atomic context to avoid
(-)a/drivers/gpu/drm/i915/i915_request.c (-2 lines)
Lines 609-615 bool __i915_request_submit(struct i915_request *request) Link Here
609
609
610
	RQ_TRACE(request, "\n");
610
	RQ_TRACE(request, "\n");
611
611
612
	GEM_BUG_ON(!irqs_disabled());
613
	lockdep_assert_held(&engine->sched_engine->lock);
612
	lockdep_assert_held(&engine->sched_engine->lock);
614
613
615
	/*
614
	/*
Lines 718-724 void __i915_request_unsubmit(struct i915_request *request) Link Here
718
	 */
717
	 */
719
	RQ_TRACE(request, "\n");
718
	RQ_TRACE(request, "\n");
720
719
721
	GEM_BUG_ON(!irqs_disabled());
722
	lockdep_assert_held(&engine->sched_engine->lock);
720
	lockdep_assert_held(&engine->sched_engine->lock);
723
721
724
	/*
722
	/*
(-)a/drivers/gpu/drm/i915/i915_trace.h (-1 / +5 lines)
Lines 6-11 Link Here
6
#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
6
#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
7
#define _I915_TRACE_H_
7
#define _I915_TRACE_H_
8
8
9
#ifdef CONFIG_PREEMPT_RT
10
#define NOTRACE
11
#endif
12
9
#include <linux/stringify.h>
13
#include <linux/stringify.h>
10
#include <linux/types.h>
14
#include <linux/types.h>
11
#include <linux/tracepoint.h>
15
#include <linux/tracepoint.h>
Lines 322-328 DEFINE_EVENT(i915_request, i915_request_add, Link Here
322
	     TP_ARGS(rq)
326
	     TP_ARGS(rq)
323
);
327
);
324
328
325
#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
329
#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) && !defined(NOTRACE)
326
DEFINE_EVENT(i915_request, i915_request_guc_submit,
330
DEFINE_EVENT(i915_request, i915_request_guc_submit,
327
	     TP_PROTO(struct i915_request *rq),
331
	     TP_PROTO(struct i915_request *rq),
328
	     TP_ARGS(rq)
332
	     TP_ARGS(rq)
(-)a/drivers/gpu/drm/i915/i915_utils.h (-1 / +1 lines)
Lines 288-294 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) Link Here
288
#define wait_for(COND, MS)		_wait_for((COND), (MS) * 1000, 10, 1000)
288
#define wait_for(COND, MS)		_wait_for((COND), (MS) * 1000, 10, 1000)
289
289
290
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
290
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
291
#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
291
#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT)
292
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
292
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
293
#else
293
#else
294
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
294
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
(-)a/drivers/tty/serial/21285.c (-4 / +4 lines)
Lines 185-198 static void serial21285_break_ctl(struct uart_port *port, int break_state) Link Here
185
	unsigned long flags;
185
	unsigned long flags;
186
	unsigned int h_lcr;
186
	unsigned int h_lcr;
187
187
188
	spin_lock_irqsave(&port->lock, flags);
188
	uart_port_lock_irqsave(port, &flags);
189
	h_lcr = *CSR_H_UBRLCR;
189
	h_lcr = *CSR_H_UBRLCR;
190
	if (break_state)
190
	if (break_state)
191
		h_lcr |= H_UBRLCR_BREAK;
191
		h_lcr |= H_UBRLCR_BREAK;
192
	else
192
	else
193
		h_lcr &= ~H_UBRLCR_BREAK;
193
		h_lcr &= ~H_UBRLCR_BREAK;
194
	*CSR_H_UBRLCR = h_lcr;
194
	*CSR_H_UBRLCR = h_lcr;
195
	spin_unlock_irqrestore(&port->lock, flags);
195
	uart_port_unlock_irqrestore(port, flags);
196
}
196
}
197
197
198
static int serial21285_startup(struct uart_port *port)
198
static int serial21285_startup(struct uart_port *port)
Lines 272-278 serial21285_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
272
	if (port->fifosize)
272
	if (port->fifosize)
273
		h_lcr |= H_UBRLCR_FIFO;
273
		h_lcr |= H_UBRLCR_FIFO;
274
274
275
	spin_lock_irqsave(&port->lock, flags);
275
	uart_port_lock_irqsave(port, &flags);
276
276
277
	/*
277
	/*
278
	 * Update the per-port timeout.
278
	 * Update the per-port timeout.
Lines 309-315 serial21285_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
309
	*CSR_H_UBRLCR = h_lcr;
309
	*CSR_H_UBRLCR = h_lcr;
310
	*CSR_UARTCON = 1;
310
	*CSR_UARTCON = 1;
311
311
312
	spin_unlock_irqrestore(&port->lock, flags);
312
	uart_port_unlock_irqrestore(port, flags);
313
}
313
}
314
314
315
static const char *serial21285_type(struct uart_port *port)
315
static const char *serial21285_type(struct uart_port *port)
(-)a/drivers/tty/serial/8250/8250_aspeed_vuart.c (-3 / +3 lines)
Lines 288-296 static void aspeed_vuart_set_throttle(struct uart_port *port, bool throttle) Link Here
288
	struct uart_8250_port *up = up_to_u8250p(port);
288
	struct uart_8250_port *up = up_to_u8250p(port);
289
	unsigned long flags;
289
	unsigned long flags;
290
290
291
	spin_lock_irqsave(&port->lock, flags);
291
	uart_port_lock_irqsave(port, &flags);
292
	__aspeed_vuart_set_throttle(up, throttle);
292
	__aspeed_vuart_set_throttle(up, throttle);
293
	spin_unlock_irqrestore(&port->lock, flags);
293
	uart_port_unlock_irqrestore(port, flags);
294
}
294
}
295
295
296
static void aspeed_vuart_throttle(struct uart_port *port)
296
static void aspeed_vuart_throttle(struct uart_port *port)
Lines 340-346 static int aspeed_vuart_handle_irq(struct uart_port *port) Link Here
340
	if (iir & UART_IIR_NO_INT)
340
	if (iir & UART_IIR_NO_INT)
341
		return 0;
341
		return 0;
342
342
343
	spin_lock_irqsave(&port->lock, flags);
343
	uart_port_lock_irqsave(port, &flags);
344
344
345
	lsr = serial_port_in(port, UART_LSR);
345
	lsr = serial_port_in(port, UART_LSR);
346
346
(-)a/drivers/tty/serial/8250/8250_bcm7271.c (-14 / +14 lines)
Lines 567-573 static irqreturn_t brcmuart_isr(int irq, void *dev_id) Link Here
567
	if (interrupts == 0)
567
	if (interrupts == 0)
568
		return IRQ_NONE;
568
		return IRQ_NONE;
569
569
570
	spin_lock_irqsave(&up->lock, flags);
570
	uart_port_lock_irqsave(up, &flags);
571
571
572
	/* Clear all interrupts */
572
	/* Clear all interrupts */
573
	udma_writel(priv, REGS_DMA_ISR, UDMA_INTR_CLEAR, interrupts);
573
	udma_writel(priv, REGS_DMA_ISR, UDMA_INTR_CLEAR, interrupts);
Lines 581-587 static irqreturn_t brcmuart_isr(int irq, void *dev_id) Link Here
581
	if ((rval | tval) == 0)
581
	if ((rval | tval) == 0)
582
		dev_warn(dev, "Spurious interrupt: 0x%x\n", interrupts);
582
		dev_warn(dev, "Spurious interrupt: 0x%x\n", interrupts);
583
583
584
	spin_unlock_irqrestore(&up->lock, flags);
584
	uart_port_unlock_irqrestore(up, flags);
585
	return IRQ_HANDLED;
585
	return IRQ_HANDLED;
586
}
586
}
587
587
Lines 608-617 static int brcmuart_startup(struct uart_port *port) Link Here
608
	 *
608
	 *
609
	 * Synchronize UART_IER access against the console.
609
	 * Synchronize UART_IER access against the console.
610
	 */
610
	 */
611
	spin_lock_irq(&port->lock);
611
	uart_port_lock_irq(port);
612
	up->ier &= ~UART_IER_RDI;
612
	up->ier &= ~UART_IER_RDI;
613
	serial_port_out(port, UART_IER, up->ier);
613
	serial_port_out(port, UART_IER, up->ier);
614
	spin_unlock_irq(&port->lock);
614
	uart_port_unlock_irq(port);
615
615
616
	priv->tx_running = false;
616
	priv->tx_running = false;
617
	priv->dma.rx_dma = NULL;
617
	priv->dma.rx_dma = NULL;
Lines 629-635 static void brcmuart_shutdown(struct uart_port *port) Link Here
629
	struct brcmuart_priv *priv = up->port.private_data;
629
	struct brcmuart_priv *priv = up->port.private_data;
630
	unsigned long flags;
630
	unsigned long flags;
631
631
632
	spin_lock_irqsave(&port->lock, flags);
632
	uart_port_lock_irqsave(port, &flags);
633
	priv->shutdown = true;
633
	priv->shutdown = true;
634
	if (priv->dma_enabled) {
634
	if (priv->dma_enabled) {
635
		stop_rx_dma(up);
635
		stop_rx_dma(up);
Lines 645-651 static void brcmuart_shutdown(struct uart_port *port) Link Here
645
	 */
645
	 */
646
	up->dma = NULL;
646
	up->dma = NULL;
647
647
648
	spin_unlock_irqrestore(&port->lock, flags);
648
	uart_port_unlock_irqrestore(port, flags);
649
	serial8250_do_shutdown(port);
649
	serial8250_do_shutdown(port);
650
}
650
}
651
651
Lines 788-794 static int brcmuart_handle_irq(struct uart_port *p) Link Here
788
	 * interrupt but there is no data ready.
788
	 * interrupt but there is no data ready.
789
	 */
789
	 */
790
	if (((iir & UART_IIR_ID) == UART_IIR_RX_TIMEOUT) && !(priv->shutdown)) {
790
	if (((iir & UART_IIR_ID) == UART_IIR_RX_TIMEOUT) && !(priv->shutdown)) {
791
		spin_lock_irqsave(&p->lock, flags);
791
		uart_port_lock_irqsave(p, &flags);
792
		status = serial_port_in(p, UART_LSR);
792
		status = serial_port_in(p, UART_LSR);
793
		if ((status & UART_LSR_DR) == 0) {
793
		if ((status & UART_LSR_DR) == 0) {
794
794
Lines 813-819 static int brcmuart_handle_irq(struct uart_port *p) Link Here
813
813
814
			handled = 1;
814
			handled = 1;
815
		}
815
		}
816
		spin_unlock_irqrestore(&p->lock, flags);
816
		uart_port_unlock_irqrestore(p, flags);
817
		if (handled)
817
		if (handled)
818
			return 1;
818
			return 1;
819
	}
819
	}
Lines 831-837 static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t) Link Here
831
	if (priv->shutdown)
831
	if (priv->shutdown)
832
		return HRTIMER_NORESTART;
832
		return HRTIMER_NORESTART;
833
833
834
	spin_lock_irqsave(&p->lock, flags);
834
	uart_port_lock_irqsave(p, &flags);
835
	status = serial_port_in(p, UART_LSR);
835
	status = serial_port_in(p, UART_LSR);
836
836
837
	/*
837
	/*
Lines 855-861 static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t) Link Here
855
		status |= UART_MCR_RTS;
855
		status |= UART_MCR_RTS;
856
		serial_port_out(p, UART_MCR, status);
856
		serial_port_out(p, UART_MCR, status);
857
	}
857
	}
858
	spin_unlock_irqrestore(&p->lock, flags);
858
	uart_port_unlock_irqrestore(p, flags);
859
	return HRTIMER_NORESTART;
859
	return HRTIMER_NORESTART;
860
}
860
}
861
861
Lines 1154-1163 static int __maybe_unused brcmuart_suspend(struct device *dev) Link Here
1154
	 * This will prevent resume from enabling RTS before the
1154
	 * This will prevent resume from enabling RTS before the
1155
	 *  baud rate has been restored.
1155
	 *  baud rate has been restored.
1156
	 */
1156
	 */
1157
	spin_lock_irqsave(&port->lock, flags);
1157
	uart_port_lock_irqsave(port, &flags);
1158
	priv->saved_mctrl = port->mctrl;
1158
	priv->saved_mctrl = port->mctrl;
1159
	port->mctrl &= ~TIOCM_RTS;
1159
	port->mctrl &= ~TIOCM_RTS;
1160
	spin_unlock_irqrestore(&port->lock, flags);
1160
	uart_port_unlock_irqrestore(port, flags);
1161
1161
1162
	serial8250_suspend_port(priv->line);
1162
	serial8250_suspend_port(priv->line);
1163
	clk_disable_unprepare(priv->baud_mux_clk);
1163
	clk_disable_unprepare(priv->baud_mux_clk);
Lines 1196-1205 static int __maybe_unused brcmuart_resume(struct device *dev) Link Here
1196
1196
1197
	if (priv->saved_mctrl & TIOCM_RTS) {
1197
	if (priv->saved_mctrl & TIOCM_RTS) {
1198
		/* Restore RTS */
1198
		/* Restore RTS */
1199
		spin_lock_irqsave(&port->lock, flags);
1199
		uart_port_lock_irqsave(port, &flags);
1200
		port->mctrl |= TIOCM_RTS;
1200
		port->mctrl |= TIOCM_RTS;
1201
		port->ops->set_mctrl(port, port->mctrl);
1201
		port->ops->set_mctrl(port, port->mctrl);
1202
		spin_unlock_irqrestore(&port->lock, flags);
1202
		uart_port_unlock_irqrestore(port, flags);
1203
	}
1203
	}
1204
1204
1205
	return 0;
1205
	return 0;
(-)a/drivers/tty/serial/8250/8250_core.c (-7 / +23 lines)
Lines 259-265 static void serial8250_backup_timeout(struct timer_list *t) Link Here
259
	unsigned int iir, ier = 0, lsr;
259
	unsigned int iir, ier = 0, lsr;
260
	unsigned long flags;
260
	unsigned long flags;
261
261
262
	spin_lock_irqsave(&up->port.lock, flags);
262
	uart_port_lock_irqsave(&up->port, &flags);
263
263
264
	/*
264
	/*
265
	 * Must disable interrupts or else we risk racing with the interrupt
265
	 * Must disable interrupts or else we risk racing with the interrupt
Lines 292-298 static void serial8250_backup_timeout(struct timer_list *t) Link Here
292
	if (up->port.irq)
292
	if (up->port.irq)
293
		serial_out(up, UART_IER, ier);
293
		serial_out(up, UART_IER, ier);
294
294
295
	spin_unlock_irqrestore(&up->port.lock, flags);
295
	uart_port_unlock_irqrestore(&up->port, flags);
296
296
297
	/* Standard timer interval plus 0.2s to keep the port running */
297
	/* Standard timer interval plus 0.2s to keep the port running */
298
	mod_timer(&up->timer,
298
	mod_timer(&up->timer,
Lines 600-605 static void univ8250_console_write(struct console *co, const char *s, Link Here
600
	serial8250_console_write(up, s, count);
600
	serial8250_console_write(up, s, count);
601
}
601
}
602
602
603
static bool univ8250_console_write_atomic(struct console *co,
604
					  struct nbcon_write_context *wctxt)
605
{
606
	struct uart_8250_port *up = &serial8250_ports[co->index];
607
608
	return serial8250_console_write_atomic(up, wctxt);
609
}
610
611
static struct uart_port *univ8250_console_uart_port(struct console *con)
612
{
613
	return &serial8250_ports[con->index].port;
614
}
615
603
static int univ8250_console_setup(struct console *co, char *options)
616
static int univ8250_console_setup(struct console *co, char *options)
604
{
617
{
605
	struct uart_8250_port *up;
618
	struct uart_8250_port *up;
Lines 699-709 static int univ8250_console_match(struct console *co, char *name, int idx, Link Here
699
static struct console univ8250_console = {
712
static struct console univ8250_console = {
700
	.name		= "ttyS",
713
	.name		= "ttyS",
701
	.write		= univ8250_console_write,
714
	.write		= univ8250_console_write,
715
	.write_atomic	= univ8250_console_write_atomic,
716
	.write_thread	= univ8250_console_write_atomic,
717
	.uart_port	= univ8250_console_uart_port,
702
	.device		= uart_console_device,
718
	.device		= uart_console_device,
703
	.setup		= univ8250_console_setup,
719
	.setup		= univ8250_console_setup,
704
	.exit		= univ8250_console_exit,
720
	.exit		= univ8250_console_exit,
705
	.match		= univ8250_console_match,
721
	.match		= univ8250_console_match,
706
	.flags		= CON_PRINTBUFFER | CON_ANYTIME,
722
	.flags		= CON_PRINTBUFFER | CON_ANYTIME | CON_NBCON,
707
	.index		= -1,
723
	.index		= -1,
708
	.data		= &serial8250_reg,
724
	.data		= &serial8250_reg,
709
};
725
};
Lines 992-1002 static void serial_8250_overrun_backoff_work(struct work_struct *work) Link Here
992
	struct uart_port *port = &up->port;
1008
	struct uart_port *port = &up->port;
993
	unsigned long flags;
1009
	unsigned long flags;
994
1010
995
	spin_lock_irqsave(&port->lock, flags);
1011
	uart_port_lock_irqsave(port, &flags);
996
	up->ier |= UART_IER_RLSI | UART_IER_RDI;
1012
	up->ier |= UART_IER_RLSI | UART_IER_RDI;
997
	up->port.read_status_mask |= UART_LSR_DR;
1013
	up->port.read_status_mask |= UART_LSR_DR;
998
	serial_out(up, UART_IER, up->ier);
1014
	serial_out(up, UART_IER, up->ier);
999
	spin_unlock_irqrestore(&port->lock, flags);
1015
	uart_port_unlock_irqrestore(port, flags);
1000
}
1016
}
1001
1017
1002
/**
1018
/**
Lines 1194-1202 void serial8250_unregister_port(int line) Link Here
1194
	if (uart->em485) {
1210
	if (uart->em485) {
1195
		unsigned long flags;
1211
		unsigned long flags;
1196
1212
1197
		spin_lock_irqsave(&uart->port.lock, flags);
1213
		uart_port_lock_irqsave(&uart->port, &flags);
1198
		serial8250_em485_destroy(uart);
1214
		serial8250_em485_destroy(uart);
1199
		spin_unlock_irqrestore(&uart->port.lock, flags);
1215
		uart_port_unlock_irqrestore(&uart->port, flags);
1200
	}
1216
	}
1201
1217
1202
	uart_remove_one_port(&serial8250_reg, &uart->port);
1218
	uart_remove_one_port(&serial8250_reg, &uart->port);
(-)a/drivers/tty/serial/8250/8250_dma.c (-4 / +4 lines)
Lines 22-28 static void __dma_tx_complete(void *param) Link Here
22
	dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
22
	dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
23
				UART_XMIT_SIZE, DMA_TO_DEVICE);
23
				UART_XMIT_SIZE, DMA_TO_DEVICE);
24
24
25
	spin_lock_irqsave(&p->port.lock, flags);
25
	uart_port_lock_irqsave(&p->port, &flags);
26
26
27
	dma->tx_running = 0;
27
	dma->tx_running = 0;
28
28
Lines 35-41 static void __dma_tx_complete(void *param) Link Here
35
	if (ret || !dma->tx_running)
35
	if (ret || !dma->tx_running)
36
		serial8250_set_THRI(p);
36
		serial8250_set_THRI(p);
37
37
38
	spin_unlock_irqrestore(&p->port.lock, flags);
38
	uart_port_unlock_irqrestore(&p->port, flags);
39
}
39
}
40
40
41
static void __dma_rx_complete(struct uart_8250_port *p)
41
static void __dma_rx_complete(struct uart_8250_port *p)
Lines 70-76 static void dma_rx_complete(void *param) Link Here
70
	struct uart_8250_dma *dma = p->dma;
70
	struct uart_8250_dma *dma = p->dma;
71
	unsigned long flags;
71
	unsigned long flags;
72
72
73
	spin_lock_irqsave(&p->port.lock, flags);
73
	uart_port_lock_irqsave(&p->port, &flags);
74
	if (dma->rx_running)
74
	if (dma->rx_running)
75
		__dma_rx_complete(p);
75
		__dma_rx_complete(p);
76
76
Lines 80-86 static void dma_rx_complete(void *param) Link Here
80
	 */
80
	 */
81
	if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR))
81
	if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR))
82
		p->dma->rx_dma(p);
82
		p->dma->rx_dma(p);
83
	spin_unlock_irqrestore(&p->port.lock, flags);
83
	uart_port_unlock_irqrestore(&p->port, flags);
84
}
84
}
85
85
86
int serial8250_tx_dma(struct uart_8250_port *p)
86
int serial8250_tx_dma(struct uart_8250_port *p)
(-)a/drivers/tty/serial/8250/8250_dw.c (-4 / +4 lines)
Lines 263-282 static int dw8250_handle_irq(struct uart_port *p) Link Here
263
	 * so we limit the workaround only to non-DMA mode.
263
	 * so we limit the workaround only to non-DMA mode.
264
	 */
264
	 */
265
	if (!up->dma && rx_timeout) {
265
	if (!up->dma && rx_timeout) {
266
		spin_lock_irqsave(&p->lock, flags);
266
		uart_port_lock_irqsave(p, &flags);
267
		status = serial_lsr_in(up);
267
		status = serial_lsr_in(up);
268
268
269
		if (!(status & (UART_LSR_DR | UART_LSR_BI)))
269
		if (!(status & (UART_LSR_DR | UART_LSR_BI)))
270
			(void) p->serial_in(p, UART_RX);
270
			(void) p->serial_in(p, UART_RX);
271
271
272
		spin_unlock_irqrestore(&p->lock, flags);
272
		uart_port_unlock_irqrestore(p, flags);
273
	}
273
	}
274
274
275
	/* Manually stop the Rx DMA transfer when acting as flow controller */
275
	/* Manually stop the Rx DMA transfer when acting as flow controller */
276
	if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) {
276
	if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) {
277
		spin_lock_irqsave(&p->lock, flags);
277
		uart_port_lock_irqsave(p, &flags);
278
		status = serial_lsr_in(up);
278
		status = serial_lsr_in(up);
279
		spin_unlock_irqrestore(&p->lock, flags);
279
		uart_port_unlock_irqrestore(p, flags);
280
280
281
		if (status & (UART_LSR_DR | UART_LSR_BI)) {
281
		if (status & (UART_LSR_DR | UART_LSR_BI)) {
282
			dw8250_writel_ext(p, RZN1_UART_RDMACR, 0);
282
			dw8250_writel_ext(p, RZN1_UART_RDMACR, 0);
(-)a/drivers/tty/serial/8250/8250_exar.c (-2 / +2 lines)
Lines 201-209 static int xr17v35x_startup(struct uart_port *port) Link Here
201
	 *
201
	 *
202
	 * Synchronize UART_IER access against the console.
202
	 * Synchronize UART_IER access against the console.
203
	 */
203
	 */
204
	spin_lock_irq(&port->lock);
204
	uart_port_lock_irq(port);
205
	serial_port_out(port, UART_IER, 0);
205
	serial_port_out(port, UART_IER, 0);
206
	spin_unlock_irq(&port->lock);
206
	uart_port_unlock_irq(port);
207
207
208
	return serial8250_do_startup(port);
208
	return serial8250_do_startup(port);
209
}
209
}
(-)a/drivers/tty/serial/8250/8250_fsl.c (-3 / +3 lines)
Lines 30-40 int fsl8250_handle_irq(struct uart_port *port) Link Here
30
	unsigned int iir;
30
	unsigned int iir;
31
	struct uart_8250_port *up = up_to_u8250p(port);
31
	struct uart_8250_port *up = up_to_u8250p(port);
32
32
33
	spin_lock_irqsave(&up->port.lock, flags);
33
	uart_port_lock_irqsave(&up->port, &flags);
34
34
35
	iir = port->serial_in(port, UART_IIR);
35
	iir = port->serial_in(port, UART_IIR);
36
	if (iir & UART_IIR_NO_INT) {
36
	if (iir & UART_IIR_NO_INT) {
37
		spin_unlock_irqrestore(&up->port.lock, flags);
37
		uart_port_unlock_irqrestore(&up->port, flags);
38
		return 0;
38
		return 0;
39
	}
39
	}
40
40
Lines 54-60 int fsl8250_handle_irq(struct uart_port *port) Link Here
54
	if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
54
	if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
55
		up->lsr_saved_flags &= ~UART_LSR_BI;
55
		up->lsr_saved_flags &= ~UART_LSR_BI;
56
		port->serial_in(port, UART_RX);
56
		port->serial_in(port, UART_RX);
57
		spin_unlock_irqrestore(&up->port.lock, flags);
57
		uart_port_unlock_irqrestore(&up->port, flags);
58
		return 1;
58
		return 1;
59
	}
59
	}
60
60
(-)a/drivers/tty/serial/8250/8250_mtk.c (-4 / +4 lines)
Lines 102-108 static void mtk8250_dma_rx_complete(void *param) Link Here
102
	if (data->rx_status == DMA_RX_SHUTDOWN)
102
	if (data->rx_status == DMA_RX_SHUTDOWN)
103
		return;
103
		return;
104
104
105
	spin_lock_irqsave(&up->port.lock, flags);
105
	uart_port_lock_irqsave(&up->port, &flags);
106
106
107
	dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
107
	dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
108
	total = dma->rx_size - state.residue;
108
	total = dma->rx_size - state.residue;
Lines 128-134 static void mtk8250_dma_rx_complete(void *param) Link Here
128
128
129
	mtk8250_rx_dma(up);
129
	mtk8250_rx_dma(up);
130
130
131
	spin_unlock_irqrestore(&up->port.lock, flags);
131
	uart_port_unlock_irqrestore(&up->port, flags);
132
}
132
}
133
133
134
static void mtk8250_rx_dma(struct uart_8250_port *up)
134
static void mtk8250_rx_dma(struct uart_8250_port *up)
Lines 368-374 mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
368
	 * Ok, we're now changing the port state.  Do it with
368
	 * Ok, we're now changing the port state.  Do it with
369
	 * interrupts disabled.
369
	 * interrupts disabled.
370
	 */
370
	 */
371
	spin_lock_irqsave(&port->lock, flags);
371
	uart_port_lock_irqsave(port, &flags);
372
372
373
	/*
373
	/*
374
	 * Update the per-port timeout.
374
	 * Update the per-port timeout.
Lines 416-422 mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
416
	if (uart_console(port))
416
	if (uart_console(port))
417
		up->port.cons->cflag = termios->c_cflag;
417
		up->port.cons->cflag = termios->c_cflag;
418
418
419
	spin_unlock_irqrestore(&port->lock, flags);
419
	uart_port_unlock_irqrestore(port, flags);
420
	/* Don't rewrite B0 */
420
	/* Don't rewrite B0 */
421
	if (tty_termios_baud_rate(termios))
421
	if (tty_termios_baud_rate(termios))
422
		tty_termios_encode_baud_rate(termios, baud, baud);
422
		tty_termios_encode_baud_rate(termios, baud, baud);
(-)a/drivers/tty/serial/8250/8250_omap.c (-26 / +26 lines)
Lines 401-407 static void omap_8250_set_termios(struct uart_port *port, Link Here
401
	 * interrupts disabled.
401
	 * interrupts disabled.
402
	 */
402
	 */
403
	pm_runtime_get_sync(port->dev);
403
	pm_runtime_get_sync(port->dev);
404
	spin_lock_irq(&port->lock);
404
	uart_port_lock_irq(port);
405
405
406
	/*
406
	/*
407
	 * Update the per-port timeout.
407
	 * Update the per-port timeout.
Lines 504-510 static void omap_8250_set_termios(struct uart_port *port, Link Here
504
	}
504
	}
505
	omap8250_restore_regs(up);
505
	omap8250_restore_regs(up);
506
506
507
	spin_unlock_irq(&up->port.lock);
507
	uart_port_unlock_irq(&up->port);
508
	pm_runtime_mark_last_busy(port->dev);
508
	pm_runtime_mark_last_busy(port->dev);
509
	pm_runtime_put_autosuspend(port->dev);
509
	pm_runtime_put_autosuspend(port->dev);
510
510
Lines 529-535 static void omap_8250_pm(struct uart_port *port, unsigned int state, Link Here
529
	pm_runtime_get_sync(port->dev);
529
	pm_runtime_get_sync(port->dev);
530
530
531
	/* Synchronize UART_IER access against the console. */
531
	/* Synchronize UART_IER access against the console. */
532
	spin_lock_irq(&port->lock);
532
	uart_port_lock_irq(port);
533
533
534
	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
534
	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
535
	efr = serial_in(up, UART_EFR);
535
	efr = serial_in(up, UART_EFR);
Lines 541-547 static void omap_8250_pm(struct uart_port *port, unsigned int state, Link Here
541
	serial_out(up, UART_EFR, efr);
541
	serial_out(up, UART_EFR, efr);
542
	serial_out(up, UART_LCR, 0);
542
	serial_out(up, UART_LCR, 0);
543
543
544
	spin_unlock_irq(&port->lock);
544
	uart_port_unlock_irq(port);
545
545
546
	pm_runtime_mark_last_busy(port->dev);
546
	pm_runtime_mark_last_busy(port->dev);
547
	pm_runtime_put_autosuspend(port->dev);
547
	pm_runtime_put_autosuspend(port->dev);
Lines 660-666 static irqreturn_t omap8250_irq(int irq, void *dev_id) Link Here
660
		unsigned long delay;
660
		unsigned long delay;
661
661
662
		/* Synchronize UART_IER access against the console. */
662
		/* Synchronize UART_IER access against the console. */
663
		spin_lock(&port->lock);
663
		uart_port_lock(port);
664
		up->ier = port->serial_in(port, UART_IER);
664
		up->ier = port->serial_in(port, UART_IER);
665
		if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
665
		if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
666
			port->ops->stop_rx(port);
666
			port->ops->stop_rx(port);
Lines 670-676 static irqreturn_t omap8250_irq(int irq, void *dev_id) Link Here
670
			 */
670
			 */
671
			cancel_delayed_work(&up->overrun_backoff);
671
			cancel_delayed_work(&up->overrun_backoff);
672
		}
672
		}
673
		spin_unlock(&port->lock);
673
		uart_port_unlock(port);
674
674
675
		delay = msecs_to_jiffies(up->overrun_backoff_time_ms);
675
		delay = msecs_to_jiffies(up->overrun_backoff_time_ms);
676
		schedule_delayed_work(&up->overrun_backoff, delay);
676
		schedule_delayed_work(&up->overrun_backoff, delay);
Lines 717-726 static int omap_8250_startup(struct uart_port *port) Link Here
717
	}
717
	}
718
718
719
	/* Synchronize UART_IER access against the console. */
719
	/* Synchronize UART_IER access against the console. */
720
	spin_lock_irq(&port->lock);
720
	uart_port_lock_irq(port);
721
	up->ier = UART_IER_RLSI | UART_IER_RDI;
721
	up->ier = UART_IER_RLSI | UART_IER_RDI;
722
	serial_out(up, UART_IER, up->ier);
722
	serial_out(up, UART_IER, up->ier);
723
	spin_unlock_irq(&port->lock);
723
	uart_port_unlock_irq(port);
724
724
725
#ifdef CONFIG_PM
725
#ifdef CONFIG_PM
726
	up->capabilities |= UART_CAP_RPM;
726
	up->capabilities |= UART_CAP_RPM;
Lines 733-741 static int omap_8250_startup(struct uart_port *port) Link Here
733
	serial_out(up, UART_OMAP_WER, priv->wer);
733
	serial_out(up, UART_OMAP_WER, priv->wer);
734
734
735
	if (up->dma && !(priv->habit & UART_HAS_EFR2)) {
735
	if (up->dma && !(priv->habit & UART_HAS_EFR2)) {
736
		spin_lock_irq(&port->lock);
736
		uart_port_lock_irq(port);
737
		up->dma->rx_dma(up);
737
		up->dma->rx_dma(up);
738
		spin_unlock_irq(&port->lock);
738
		uart_port_unlock_irq(port);
739
	}
739
	}
740
740
741
	enable_irq(up->port.irq);
741
	enable_irq(up->port.irq);
Lines 761-770 static void omap_8250_shutdown(struct uart_port *port) Link Here
761
		serial_out(up, UART_OMAP_EFR2, 0x0);
761
		serial_out(up, UART_OMAP_EFR2, 0x0);
762
762
763
	/* Synchronize UART_IER access against the console. */
763
	/* Synchronize UART_IER access against the console. */
764
	spin_lock_irq(&port->lock);
764
	uart_port_lock_irq(port);
765
	up->ier = 0;
765
	up->ier = 0;
766
	serial_out(up, UART_IER, 0);
766
	serial_out(up, UART_IER, 0);
767
	spin_unlock_irq(&port->lock);
767
	uart_port_unlock_irq(port);
768
	disable_irq_nosync(up->port.irq);
768
	disable_irq_nosync(up->port.irq);
769
	dev_pm_clear_wake_irq(port->dev);
769
	dev_pm_clear_wake_irq(port->dev);
770
770
Lines 789-798 static void omap_8250_throttle(struct uart_port *port) Link Here
789
789
790
	pm_runtime_get_sync(port->dev);
790
	pm_runtime_get_sync(port->dev);
791
791
792
	spin_lock_irqsave(&port->lock, flags);
792
	uart_port_lock_irqsave(port, &flags);
793
	port->ops->stop_rx(port);
793
	port->ops->stop_rx(port);
794
	priv->throttled = true;
794
	priv->throttled = true;
795
	spin_unlock_irqrestore(&port->lock, flags);
795
	uart_port_unlock_irqrestore(port, flags);
796
796
797
	pm_runtime_mark_last_busy(port->dev);
797
	pm_runtime_mark_last_busy(port->dev);
798
	pm_runtime_put_autosuspend(port->dev);
798
	pm_runtime_put_autosuspend(port->dev);
Lines 807-820 static void omap_8250_unthrottle(struct uart_port *port) Link Here
807
	pm_runtime_get_sync(port->dev);
807
	pm_runtime_get_sync(port->dev);
808
808
809
	/* Synchronize UART_IER access against the console. */
809
	/* Synchronize UART_IER access against the console. */
810
	spin_lock_irqsave(&port->lock, flags);
810
	uart_port_lock_irqsave(port, &flags);
811
	priv->throttled = false;
811
	priv->throttled = false;
812
	if (up->dma)
812
	if (up->dma)
813
		up->dma->rx_dma(up);
813
		up->dma->rx_dma(up);
814
	up->ier |= UART_IER_RLSI | UART_IER_RDI;
814
	up->ier |= UART_IER_RLSI | UART_IER_RDI;
815
	port->read_status_mask |= UART_LSR_DR;
815
	port->read_status_mask |= UART_LSR_DR;
816
	serial_out(up, UART_IER, up->ier);
816
	serial_out(up, UART_IER, up->ier);
817
	spin_unlock_irqrestore(&port->lock, flags);
817
	uart_port_unlock_irqrestore(port, flags);
818
818
819
	pm_runtime_mark_last_busy(port->dev);
819
	pm_runtime_mark_last_busy(port->dev);
820
	pm_runtime_put_autosuspend(port->dev);
820
	pm_runtime_put_autosuspend(port->dev);
Lines 958-964 static void __dma_rx_complete(void *param) Link Here
958
	unsigned long flags;
958
	unsigned long flags;
959
959
960
	/* Synchronize UART_IER access against the console. */
960
	/* Synchronize UART_IER access against the console. */
961
	spin_lock_irqsave(&p->port.lock, flags);
961
	uart_port_lock_irqsave(&p->port, &flags);
962
962
963
	/*
963
	/*
964
	 * If the tx status is not DMA_COMPLETE, then this is a delayed
964
	 * If the tx status is not DMA_COMPLETE, then this is a delayed
Lines 967-973 static void __dma_rx_complete(void *param) Link Here
967
	 */
967
	 */
968
	if (dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state) !=
968
	if (dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state) !=
969
			DMA_COMPLETE) {
969
			DMA_COMPLETE) {
970
		spin_unlock_irqrestore(&p->port.lock, flags);
970
		uart_port_unlock_irqrestore(&p->port, flags);
971
		return;
971
		return;
972
	}
972
	}
973
	__dma_rx_do_complete(p);
973
	__dma_rx_do_complete(p);
Lines 978-984 static void __dma_rx_complete(void *param) Link Here
978
			omap_8250_rx_dma(p);
978
			omap_8250_rx_dma(p);
979
	}
979
	}
980
980
981
	spin_unlock_irqrestore(&p->port.lock, flags);
981
	uart_port_unlock_irqrestore(&p->port, flags);
982
}
982
}
983
983
984
static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
984
static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
Lines 1083-1089 static void omap_8250_dma_tx_complete(void *param) Link Here
1083
	dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
1083
	dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
1084
				UART_XMIT_SIZE, DMA_TO_DEVICE);
1084
				UART_XMIT_SIZE, DMA_TO_DEVICE);
1085
1085
1086
	spin_lock_irqsave(&p->port.lock, flags);
1086
	uart_port_lock_irqsave(&p->port, &flags);
1087
1087
1088
	dma->tx_running = 0;
1088
	dma->tx_running = 0;
1089
1089
Lines 1112-1118 static void omap_8250_dma_tx_complete(void *param) Link Here
1112
		serial8250_set_THRI(p);
1112
		serial8250_set_THRI(p);
1113
	}
1113
	}
1114
1114
1115
	spin_unlock_irqrestore(&p->port.lock, flags);
1115
	uart_port_unlock_irqrestore(&p->port, flags);
1116
}
1116
}
1117
1117
1118
static int omap_8250_tx_dma(struct uart_8250_port *p)
1118
static int omap_8250_tx_dma(struct uart_8250_port *p)
Lines 1278-1284 static int omap_8250_dma_handle_irq(struct uart_port *port) Link Here
1278
		return IRQ_HANDLED;
1278
		return IRQ_HANDLED;
1279
	}
1279
	}
1280
1280
1281
	spin_lock(&port->lock);
1281
	uart_port_lock(port);
1282
1282
1283
	status = serial_port_in(port, UART_LSR);
1283
	status = serial_port_in(port, UART_LSR);
1284
1284
Lines 1756-1770 static int omap8250_runtime_resume(struct device *dev) Link Here
1756
		up = serial8250_get_port(priv->line);
1756
		up = serial8250_get_port(priv->line);
1757
1757
1758
	if (up && omap8250_lost_context(up)) {
1758
	if (up && omap8250_lost_context(up)) {
1759
		spin_lock_irq(&up->port.lock);
1759
		uart_port_lock_irq(&up->port);
1760
		omap8250_restore_regs(up);
1760
		omap8250_restore_regs(up);
1761
		spin_unlock_irq(&up->port.lock);
1761
		uart_port_unlock_irq(&up->port);
1762
	}
1762
	}
1763
1763
1764
	if (up && up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2)) {
1764
	if (up && up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2)) {
1765
		spin_lock_irq(&up->port.lock);
1765
		uart_port_lock_irq(&up->port);
1766
		omap_8250_rx_dma(up);
1766
		omap_8250_rx_dma(up);
1767
		spin_unlock_irq(&up->port.lock);
1767
		uart_port_unlock_irq(&up->port);
1768
	}
1768
	}
1769
1769
1770
	priv->latency = priv->calc_latency;
1770
	priv->latency = priv->calc_latency;
(-)a/drivers/tty/serial/8250/8250_pci1xxxx.c (-4 / +4 lines)
Lines 225-234 static bool pci1xxxx_port_suspend(int line) Link Here
225
	if (port->suspended == 0 && port->dev) {
225
	if (port->suspended == 0 && port->dev) {
226
		wakeup_mask = readb(up->port.membase + UART_WAKE_MASK_REG);
226
		wakeup_mask = readb(up->port.membase + UART_WAKE_MASK_REG);
227
227
228
		spin_lock_irqsave(&port->lock, flags);
228
		uart_port_lock_irqsave(port, &flags);
229
		port->mctrl &= ~TIOCM_OUT2;
229
		port->mctrl &= ~TIOCM_OUT2;
230
		port->ops->set_mctrl(port, port->mctrl);
230
		port->ops->set_mctrl(port, port->mctrl);
231
		spin_unlock_irqrestore(&port->lock, flags);
231
		uart_port_unlock_irqrestore(port, flags);
232
232
233
		ret = (wakeup_mask & UART_WAKE_SRCS) != UART_WAKE_SRCS;
233
		ret = (wakeup_mask & UART_WAKE_SRCS) != UART_WAKE_SRCS;
234
	}
234
	}
Lines 251-260 static void pci1xxxx_port_resume(int line) Link Here
251
	writeb(UART_WAKE_SRCS, port->membase + UART_WAKE_REG);
251
	writeb(UART_WAKE_SRCS, port->membase + UART_WAKE_REG);
252
252
253
	if (port->suspended == 0) {
253
	if (port->suspended == 0) {
254
		spin_lock_irqsave(&port->lock, flags);
254
		uart_port_lock_irqsave(port, &flags);
255
		port->mctrl |= TIOCM_OUT2;
255
		port->mctrl |= TIOCM_OUT2;
256
		port->ops->set_mctrl(port, port->mctrl);
256
		port->ops->set_mctrl(port, port->mctrl);
257
		spin_unlock_irqrestore(&port->lock, flags);
257
		uart_port_unlock_irqrestore(port, flags);
258
	}
258
	}
259
	mutex_unlock(&tport->mutex);
259
	mutex_unlock(&tport->mutex);
260
}
260
}
(-)a/drivers/tty/serial/8250/8250_port.c (-55 / +102 lines)
Lines 557-562 static int serial8250_em485_init(struct uart_8250_port *p) Link Here
557
	if (!p->em485)
557
	if (!p->em485)
558
		return -ENOMEM;
558
		return -ENOMEM;
559
559
560
	if (uart_console(&p->port))
561
		dev_warn(p->port.dev, "no atomic printing for rs485 consoles\n");
562
560
	hrtimer_init(&p->em485->stop_tx_timer, CLOCK_MONOTONIC,
563
	hrtimer_init(&p->em485->stop_tx_timer, CLOCK_MONOTONIC,
561
		     HRTIMER_MODE_REL);
564
		     HRTIMER_MODE_REL);
562
	hrtimer_init(&p->em485->start_tx_timer, CLOCK_MONOTONIC,
565
	hrtimer_init(&p->em485->start_tx_timer, CLOCK_MONOTONIC,
Lines 689-695 static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) Link Here
689
692
690
	if (p->capabilities & UART_CAP_SLEEP) {
693
	if (p->capabilities & UART_CAP_SLEEP) {
691
		/* Synchronize UART_IER access against the console. */
694
		/* Synchronize UART_IER access against the console. */
692
		spin_lock_irq(&p->port.lock);
695
		uart_port_lock_irq(&p->port);
693
		if (p->capabilities & UART_CAP_EFR) {
696
		if (p->capabilities & UART_CAP_EFR) {
694
			lcr = serial_in(p, UART_LCR);
697
			lcr = serial_in(p, UART_LCR);
695
			efr = serial_in(p, UART_EFR);
698
			efr = serial_in(p, UART_EFR);
Lines 703-715 static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) Link Here
703
			serial_out(p, UART_EFR, efr);
706
			serial_out(p, UART_EFR, efr);
704
			serial_out(p, UART_LCR, lcr);
707
			serial_out(p, UART_LCR, lcr);
705
		}
708
		}
706
		spin_unlock_irq(&p->port.lock);
709
		uart_port_unlock_irq(&p->port);
707
	}
710
	}
708
711
709
	serial8250_rpm_put(p);
712
	serial8250_rpm_put(p);
710
}
713
}
711
714
712
static void serial8250_clear_IER(struct uart_8250_port *up)
715
/* Only to be used by write_atomic(), which does not require port lock. */
716
static void __serial8250_clear_IER(struct uart_8250_port *up)
713
{
717
{
714
	if (up->capabilities & UART_CAP_UUE)
718
	if (up->capabilities & UART_CAP_UUE)
715
		serial_out(up, UART_IER, UART_IER_UUE);
719
		serial_out(up, UART_IER, UART_IER_UUE);
Lines 717-722 static void serial8250_clear_IER(struct uart_8250_port *up) Link Here
717
		serial_out(up, UART_IER, 0);
721
		serial_out(up, UART_IER, 0);
718
}
722
}
719
723
724
static inline void serial8250_clear_IER(struct uart_8250_port *up)
725
{
726
	/* Port locked to synchronize UART_IER access against the console. */
727
	lockdep_assert_held_once(&up->port.lock);
728
729
	__serial8250_clear_IER(up);
730
}
731
720
#ifdef CONFIG_SERIAL_8250_RSA
732
#ifdef CONFIG_SERIAL_8250_RSA
721
/*
733
/*
722
 * Attempts to turn on the RSA FIFO.  Returns zero on failure.
734
 * Attempts to turn on the RSA FIFO.  Returns zero on failure.
Lines 746-754 static void enable_rsa(struct uart_8250_port *up) Link Here
746
{
758
{
747
	if (up->port.type == PORT_RSA) {
759
	if (up->port.type == PORT_RSA) {
748
		if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
760
		if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
749
			spin_lock_irq(&up->port.lock);
761
			uart_port_lock_irq(&up->port);
750
			__enable_rsa(up);
762
			__enable_rsa(up);
751
			spin_unlock_irq(&up->port.lock);
763
			uart_port_unlock_irq(&up->port);
752
		}
764
		}
753
		if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
765
		if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
754
			serial_out(up, UART_RSA_FRR, 0);
766
			serial_out(up, UART_RSA_FRR, 0);
Lines 768-774 static void disable_rsa(struct uart_8250_port *up) Link Here
768
780
769
	if (up->port.type == PORT_RSA &&
781
	if (up->port.type == PORT_RSA &&
770
	    up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
782
	    up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
771
		spin_lock_irq(&up->port.lock);
783
		uart_port_lock_irq(&up->port);
772
784
773
		mode = serial_in(up, UART_RSA_MSR);
785
		mode = serial_in(up, UART_RSA_MSR);
774
		result = !(mode & UART_RSA_MSR_FIFO);
786
		result = !(mode & UART_RSA_MSR_FIFO);
Lines 781-787 static void disable_rsa(struct uart_8250_port *up) Link Here
781
793
782
		if (result)
794
		if (result)
783
			up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
795
			up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
784
		spin_unlock_irq(&up->port.lock);
796
		uart_port_unlock_irq(&up->port);
785
	}
797
	}
786
}
798
}
787
#endif /* CONFIG_SERIAL_8250_RSA */
799
#endif /* CONFIG_SERIAL_8250_RSA */
Lines 1172-1178 static void autoconfig(struct uart_8250_port *up) Link Here
1172
	 *
1184
	 *
1173
	 * Synchronize UART_IER access against the console.
1185
	 * Synchronize UART_IER access against the console.
1174
	 */
1186
	 */
1175
	spin_lock_irqsave(&port->lock, flags);
1187
	uart_port_lock_irqsave(port, &flags);
1176
1188
1177
	up->capabilities = 0;
1189
	up->capabilities = 0;
1178
	up->bugs = 0;
1190
	up->bugs = 0;
Lines 1211-1217 static void autoconfig(struct uart_8250_port *up) Link Here
1211
			/*
1223
			/*
1212
			 * We failed; there's nothing here
1224
			 * We failed; there's nothing here
1213
			 */
1225
			 */
1214
			spin_unlock_irqrestore(&port->lock, flags);
1226
			uart_port_unlock_irqrestore(port, flags);
1215
			DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
1227
			DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
1216
				       scratch2, scratch3);
1228
				       scratch2, scratch3);
1217
			goto out;
1229
			goto out;
Lines 1235-1241 static void autoconfig(struct uart_8250_port *up) Link Here
1235
		status1 = serial_in(up, UART_MSR) & UART_MSR_STATUS_BITS;
1247
		status1 = serial_in(up, UART_MSR) & UART_MSR_STATUS_BITS;
1236
		serial8250_out_MCR(up, save_mcr);
1248
		serial8250_out_MCR(up, save_mcr);
1237
		if (status1 != (UART_MSR_DCD | UART_MSR_CTS)) {
1249
		if (status1 != (UART_MSR_DCD | UART_MSR_CTS)) {
1238
			spin_unlock_irqrestore(&port->lock, flags);
1250
			uart_port_unlock_irqrestore(port, flags);
1239
			DEBUG_AUTOCONF("LOOP test failed (%02x) ",
1251
			DEBUG_AUTOCONF("LOOP test failed (%02x) ",
1240
				       status1);
1252
				       status1);
1241
			goto out;
1253
			goto out;
Lines 1304-1310 static void autoconfig(struct uart_8250_port *up) Link Here
1304
	serial8250_clear_IER(up);
1316
	serial8250_clear_IER(up);
1305
1317
1306
out_unlock:
1318
out_unlock:
1307
	spin_unlock_irqrestore(&port->lock, flags);
1319
	uart_port_unlock_irqrestore(port, flags);
1308
1320
1309
	/*
1321
	/*
1310
	 * Check if the device is a Fintek F81216A
1322
	 * Check if the device is a Fintek F81216A
Lines 1344-1352 static void autoconfig_irq(struct uart_8250_port *up) Link Here
1344
	probe_irq_off(probe_irq_on());
1356
	probe_irq_off(probe_irq_on());
1345
	save_mcr = serial8250_in_MCR(up);
1357
	save_mcr = serial8250_in_MCR(up);
1346
	/* Synchronize UART_IER access against the console. */
1358
	/* Synchronize UART_IER access against the console. */
1347
	spin_lock_irq(&port->lock);
1359
	uart_port_lock_irq(port);
1348
	save_ier = serial_in(up, UART_IER);
1360
	save_ier = serial_in(up, UART_IER);
1349
	spin_unlock_irq(&port->lock);
1361
	uart_port_unlock_irq(port);
1350
	serial8250_out_MCR(up, UART_MCR_OUT1 | UART_MCR_OUT2);
1362
	serial8250_out_MCR(up, UART_MCR_OUT1 | UART_MCR_OUT2);
1351
1363
1352
	irqs = probe_irq_on();
1364
	irqs = probe_irq_on();
Lines 1359-1367 static void autoconfig_irq(struct uart_8250_port *up) Link Here
1359
			UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
1371
			UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
1360
	}
1372
	}
1361
	/* Synchronize UART_IER access against the console. */
1373
	/* Synchronize UART_IER access against the console. */
1362
	spin_lock_irq(&port->lock);
1374
	uart_port_lock_irq(port);
1363
	serial_out(up, UART_IER, UART_IER_ALL_INTR);
1375
	serial_out(up, UART_IER, UART_IER_ALL_INTR);
1364
	spin_unlock_irq(&port->lock);
1376
	uart_port_unlock_irq(port);
1365
	serial_in(up, UART_LSR);
1377
	serial_in(up, UART_LSR);
1366
	serial_in(up, UART_RX);
1378
	serial_in(up, UART_RX);
1367
	serial_in(up, UART_IIR);
1379
	serial_in(up, UART_IIR);
Lines 1372-1380 static void autoconfig_irq(struct uart_8250_port *up) Link Here
1372
1384
1373
	serial8250_out_MCR(up, save_mcr);
1385
	serial8250_out_MCR(up, save_mcr);
1374
	/* Synchronize UART_IER access against the console. */
1386
	/* Synchronize UART_IER access against the console. */
1375
	spin_lock_irq(&port->lock);
1387
	uart_port_lock_irq(port);
1376
	serial_out(up, UART_IER, save_ier);
1388
	serial_out(up, UART_IER, save_ier);
1377
	spin_unlock_irq(&port->lock);
1389
	uart_port_unlock_irq(port);
1378
1390
1379
	if (port->flags & UPF_FOURPORT)
1391
	if (port->flags & UPF_FOURPORT)
1380
		outb_p(save_ICP, ICP);
1392
		outb_p(save_ICP, ICP);
Lines 1442-1454 static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t) Link Here
1442
	unsigned long flags;
1454
	unsigned long flags;
1443
1455
1444
	serial8250_rpm_get(p);
1456
	serial8250_rpm_get(p);
1445
	spin_lock_irqsave(&p->port.lock, flags);
1457
	uart_port_lock_irqsave(&p->port, &flags);
1446
	if (em485->active_timer == &em485->stop_tx_timer) {
1458
	if (em485->active_timer == &em485->stop_tx_timer) {
1447
		p->rs485_stop_tx(p);
1459
		p->rs485_stop_tx(p);
1448
		em485->active_timer = NULL;
1460
		em485->active_timer = NULL;
1449
		em485->tx_stopped = true;
1461
		em485->tx_stopped = true;
1450
	}
1462
	}
1451
	spin_unlock_irqrestore(&p->port.lock, flags);
1463
	uart_port_unlock_irqrestore(&p->port, flags);
1452
	serial8250_rpm_put(p);
1464
	serial8250_rpm_put(p);
1453
1465
1454
	return HRTIMER_NORESTART;
1466
	return HRTIMER_NORESTART;
Lines 1630-1641 static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t) Link Here
1630
	struct uart_8250_port *p = em485->port;
1642
	struct uart_8250_port *p = em485->port;
1631
	unsigned long flags;
1643
	unsigned long flags;
1632
1644
1633
	spin_lock_irqsave(&p->port.lock, flags);
1645
	uart_port_lock_irqsave(&p->port, &flags);
1634
	if (em485->active_timer == &em485->start_tx_timer) {
1646
	if (em485->active_timer == &em485->start_tx_timer) {
1635
		__start_tx(&p->port);
1647
		__start_tx(&p->port);
1636
		em485->active_timer = NULL;
1648
		em485->active_timer = NULL;
1637
	}
1649
	}
1638
	spin_unlock_irqrestore(&p->port.lock, flags);
1650
	uart_port_unlock_irqrestore(&p->port, flags);
1639
1651
1640
	return HRTIMER_NORESTART;
1652
	return HRTIMER_NORESTART;
1641
}
1653
}
Lines 1918-1924 int serial8250_handle_irq(struct uart_port *port, unsigned int iir) Link Here
1918
	if (iir & UART_IIR_NO_INT)
1930
	if (iir & UART_IIR_NO_INT)
1919
		return 0;
1931
		return 0;
1920
1932
1921
	spin_lock_irqsave(&port->lock, flags);
1933
	uart_port_lock_irqsave(port, &flags);
1922
1934
1923
	status = serial_lsr_in(up);
1935
	status = serial_lsr_in(up);
1924
1936
Lines 1988-1996 static int serial8250_tx_threshold_handle_irq(struct uart_port *port) Link Here
1988
	if ((iir & UART_IIR_ID) == UART_IIR_THRI) {
2000
	if ((iir & UART_IIR_ID) == UART_IIR_THRI) {
1989
		struct uart_8250_port *up = up_to_u8250p(port);
2001
		struct uart_8250_port *up = up_to_u8250p(port);
1990
2002
1991
		spin_lock_irqsave(&port->lock, flags);
2003
		uart_port_lock_irqsave(port, &flags);
1992
		serial8250_tx_chars(up);
2004
		serial8250_tx_chars(up);
1993
		spin_unlock_irqrestore(&port->lock, flags);
2005
		uart_port_unlock_irqrestore(port, flags);
1994
	}
2006
	}
1995
2007
1996
	iir = serial_port_in(port, UART_IIR);
2008
	iir = serial_port_in(port, UART_IIR);
Lines 2005-2014 static unsigned int serial8250_tx_empty(struct uart_port *port) Link Here
2005
2017
2006
	serial8250_rpm_get(up);
2018
	serial8250_rpm_get(up);
2007
2019
2008
	spin_lock_irqsave(&port->lock, flags);
2020
	uart_port_lock_irqsave(port, &flags);
2009
	if (!serial8250_tx_dma_running(up) && uart_lsr_tx_empty(serial_lsr_in(up)))
2021
	if (!serial8250_tx_dma_running(up) && uart_lsr_tx_empty(serial_lsr_in(up)))
2010
		result = TIOCSER_TEMT;
2022
		result = TIOCSER_TEMT;
2011
	spin_unlock_irqrestore(&port->lock, flags);
2023
	uart_port_unlock_irqrestore(port, flags);
2012
2024
2013
	serial8250_rpm_put(up);
2025
	serial8250_rpm_put(up);
2014
2026
Lines 2070-2082 static void serial8250_break_ctl(struct uart_port *port, int break_state) Link Here
2070
	unsigned long flags;
2082
	unsigned long flags;
2071
2083
2072
	serial8250_rpm_get(up);
2084
	serial8250_rpm_get(up);
2073
	spin_lock_irqsave(&port->lock, flags);
2085
	uart_port_lock_irqsave(port, &flags);
2074
	if (break_state == -1)
2086
	if (break_state == -1)
2075
		up->lcr |= UART_LCR_SBC;
2087
		up->lcr |= UART_LCR_SBC;
2076
	else
2088
	else
2077
		up->lcr &= ~UART_LCR_SBC;
2089
		up->lcr &= ~UART_LCR_SBC;
2078
	serial_port_out(port, UART_LCR, up->lcr);
2090
	serial_port_out(port, UART_LCR, up->lcr);
2079
	spin_unlock_irqrestore(&port->lock, flags);
2091
	uart_port_unlock_irqrestore(port, flags);
2080
	serial8250_rpm_put(up);
2092
	serial8250_rpm_put(up);
2081
}
2093
}
2082
2094
Lines 2211-2217 int serial8250_do_startup(struct uart_port *port) Link Here
2211
		 *
2223
		 *
2212
		 * Synchronize UART_IER access against the console.
2224
		 * Synchronize UART_IER access against the console.
2213
		 */
2225
		 */
2214
		spin_lock_irqsave(&port->lock, flags);
2226
		uart_port_lock_irqsave(port, &flags);
2215
		up->acr = 0;
2227
		up->acr = 0;
2216
		serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
2228
		serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
2217
		serial_port_out(port, UART_EFR, UART_EFR_ECB);
2229
		serial_port_out(port, UART_EFR, UART_EFR_ECB);
Lines 2221-2227 int serial8250_do_startup(struct uart_port *port) Link Here
2221
		serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
2233
		serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
2222
		serial_port_out(port, UART_EFR, UART_EFR_ECB);
2234
		serial_port_out(port, UART_EFR, UART_EFR_ECB);
2223
		serial_port_out(port, UART_LCR, 0);
2235
		serial_port_out(port, UART_LCR, 0);
2224
		spin_unlock_irqrestore(&port->lock, flags);
2236
		uart_port_unlock_irqrestore(port, flags);
2225
	}
2237
	}
2226
2238
2227
	if (port->type == PORT_DA830) {
2239
	if (port->type == PORT_DA830) {
Lines 2230-2239 int serial8250_do_startup(struct uart_port *port) Link Here
2230
		 *
2242
		 *
2231
		 * Synchronize UART_IER access against the console.
2243
		 * Synchronize UART_IER access against the console.
2232
		 */
2244
		 */
2233
		spin_lock_irqsave(&port->lock, flags);
2245
		uart_port_lock_irqsave(port, &flags);
2234
		serial_port_out(port, UART_IER, 0);
2246
		serial_port_out(port, UART_IER, 0);
2235
		serial_port_out(port, UART_DA830_PWREMU_MGMT, 0);
2247
		serial_port_out(port, UART_DA830_PWREMU_MGMT, 0);
2236
		spin_unlock_irqrestore(&port->lock, flags);
2248
		uart_port_unlock_irqrestore(port, flags);
2237
		mdelay(10);
2249
		mdelay(10);
2238
2250
2239
		/* Enable Tx, Rx and free run mode */
2251
		/* Enable Tx, Rx and free run mode */
Lines 2347-2353 int serial8250_do_startup(struct uart_port *port) Link Here
2347
		 *
2359
		 *
2348
		 * Synchronize UART_IER access against the console.
2360
		 * Synchronize UART_IER access against the console.
2349
		 */
2361
		 */
2350
		spin_lock_irqsave(&port->lock, flags);
2362
		uart_port_lock_irqsave(port, &flags);
2351
2363
2352
		wait_for_xmitr(up, UART_LSR_THRE);
2364
		wait_for_xmitr(up, UART_LSR_THRE);
2353
		serial_port_out_sync(port, UART_IER, UART_IER_THRI);
2365
		serial_port_out_sync(port, UART_IER, UART_IER_THRI);
Lines 2359-2365 int serial8250_do_startup(struct uart_port *port) Link Here
2359
		iir = serial_port_in(port, UART_IIR);
2371
		iir = serial_port_in(port, UART_IIR);
2360
		serial_port_out(port, UART_IER, 0);
2372
		serial_port_out(port, UART_IER, 0);
2361
2373
2362
		spin_unlock_irqrestore(&port->lock, flags);
2374
		uart_port_unlock_irqrestore(port, flags);
2363
2375
2364
		if (port->irqflags & IRQF_SHARED)
2376
		if (port->irqflags & IRQF_SHARED)
2365
			enable_irq(port->irq);
2377
			enable_irq(port->irq);
Lines 2382-2388 int serial8250_do_startup(struct uart_port *port) Link Here
2382
	 */
2394
	 */
2383
	serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
2395
	serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
2384
2396
2385
	spin_lock_irqsave(&port->lock, flags);
2397
	uart_port_lock_irqsave(port, &flags);
2386
	if (up->port.flags & UPF_FOURPORT) {
2398
	if (up->port.flags & UPF_FOURPORT) {
2387
		if (!up->port.irq)
2399
		if (!up->port.irq)
2388
			up->port.mctrl |= TIOCM_OUT1;
2400
			up->port.mctrl |= TIOCM_OUT1;
Lines 2428-2434 int serial8250_do_startup(struct uart_port *port) Link Here
2428
	}
2440
	}
2429
2441
2430
dont_test_tx_en:
2442
dont_test_tx_en:
2431
	spin_unlock_irqrestore(&port->lock, flags);
2443
	uart_port_unlock_irqrestore(port, flags);
2432
2444
2433
	/*
2445
	/*
2434
	 * Clear the interrupt registers again for luck, and clear the
2446
	 * Clear the interrupt registers again for luck, and clear the
Lines 2499-2515 void serial8250_do_shutdown(struct uart_port *port) Link Here
2499
	 *
2511
	 *
2500
	 * Synchronize UART_IER access against the console.
2512
	 * Synchronize UART_IER access against the console.
2501
	 */
2513
	 */
2502
	spin_lock_irqsave(&port->lock, flags);
2514
	uart_port_lock_irqsave(port, &flags);
2503
	up->ier = 0;
2515
	up->ier = 0;
2504
	serial_port_out(port, UART_IER, 0);
2516
	serial_port_out(port, UART_IER, 0);
2505
	spin_unlock_irqrestore(&port->lock, flags);
2517
	uart_port_unlock_irqrestore(port, flags);
2506
2518
2507
	synchronize_irq(port->irq);
2519
	synchronize_irq(port->irq);
2508
2520
2509
	if (up->dma)
2521
	if (up->dma)
2510
		serial8250_release_dma(up);
2522
		serial8250_release_dma(up);
2511
2523
2512
	spin_lock_irqsave(&port->lock, flags);
2524
	uart_port_lock_irqsave(port, &flags);
2513
	if (port->flags & UPF_FOURPORT) {
2525
	if (port->flags & UPF_FOURPORT) {
2514
		/* reset interrupts on the AST Fourport board */
2526
		/* reset interrupts on the AST Fourport board */
2515
		inb((port->iobase & 0xfe0) | 0x1f);
2527
		inb((port->iobase & 0xfe0) | 0x1f);
Lines 2518-2524 void serial8250_do_shutdown(struct uart_port *port) Link Here
2518
		port->mctrl &= ~TIOCM_OUT2;
2530
		port->mctrl &= ~TIOCM_OUT2;
2519
2531
2520
	serial8250_set_mctrl(port, port->mctrl);
2532
	serial8250_set_mctrl(port, port->mctrl);
2521
	spin_unlock_irqrestore(&port->lock, flags);
2533
	uart_port_unlock_irqrestore(port, flags);
2522
2534
2523
	/*
2535
	/*
2524
	 * Disable break condition and FIFOs
2536
	 * Disable break condition and FIFOs
Lines 2754-2767 void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk) Link Here
2754
	quot = serial8250_get_divisor(port, baud, &frac);
2766
	quot = serial8250_get_divisor(port, baud, &frac);
2755
2767
2756
	serial8250_rpm_get(up);
2768
	serial8250_rpm_get(up);
2757
	spin_lock_irqsave(&port->lock, flags);
2769
	uart_port_lock_irqsave(port, &flags);
2758
2770
2759
	uart_update_timeout(port, termios->c_cflag, baud);
2771
	uart_update_timeout(port, termios->c_cflag, baud);
2760
2772
2761
	serial8250_set_divisor(port, baud, quot, frac);
2773
	serial8250_set_divisor(port, baud, quot, frac);
2762
	serial_port_out(port, UART_LCR, up->lcr);
2774
	serial_port_out(port, UART_LCR, up->lcr);
2763
2775
2764
	spin_unlock_irqrestore(&port->lock, flags);
2776
	uart_port_unlock_irqrestore(port, flags);
2765
	serial8250_rpm_put(up);
2777
	serial8250_rpm_put(up);
2766
2778
2767
out_unlock:
2779
out_unlock:
Lines 2798-2804 serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
2798
	 * Synchronize UART_IER access against the console.
2810
	 * Synchronize UART_IER access against the console.
2799
	 */
2811
	 */
2800
	serial8250_rpm_get(up);
2812
	serial8250_rpm_get(up);
2801
	spin_lock_irqsave(&port->lock, flags);
2813
	uart_port_lock_irqsave(port, &flags);
2802
2814
2803
	up->lcr = cval;					/* Save computed LCR */
2815
	up->lcr = cval;					/* Save computed LCR */
2804
2816
Lines 2901-2907 serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
2901
		serial_port_out(port, UART_FCR, up->fcr);	/* set fcr */
2913
		serial_port_out(port, UART_FCR, up->fcr);	/* set fcr */
2902
	}
2914
	}
2903
	serial8250_set_mctrl(port, port->mctrl);
2915
	serial8250_set_mctrl(port, port->mctrl);
2904
	spin_unlock_irqrestore(&port->lock, flags);
2916
	uart_port_unlock_irqrestore(port, flags);
2905
	serial8250_rpm_put(up);
2917
	serial8250_rpm_put(up);
2906
2918
2907
	/* Don't rewrite B0 */
2919
	/* Don't rewrite B0 */
Lines 2924-2938 void serial8250_do_set_ldisc(struct uart_port *port, struct ktermios *termios) Link Here
2924
{
2936
{
2925
	if (termios->c_line == N_PPS) {
2937
	if (termios->c_line == N_PPS) {
2926
		port->flags |= UPF_HARDPPS_CD;
2938
		port->flags |= UPF_HARDPPS_CD;
2927
		spin_lock_irq(&port->lock);
2939
		uart_port_lock_irq(port);
2928
		serial8250_enable_ms(port);
2940
		serial8250_enable_ms(port);
2929
		spin_unlock_irq(&port->lock);
2941
		uart_port_unlock_irq(port);
2930
	} else {
2942
	} else {
2931
		port->flags &= ~UPF_HARDPPS_CD;
2943
		port->flags &= ~UPF_HARDPPS_CD;
2932
		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2944
		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2933
			spin_lock_irq(&port->lock);
2945
			uart_port_lock_irq(port);
2934
			serial8250_disable_ms(port);
2946
			serial8250_disable_ms(port);
2935
			spin_unlock_irq(&port->lock);
2947
			uart_port_unlock_irq(port);
2936
		}
2948
		}
2937
	}
2949
	}
2938
}
2950
}
Lines 3401-3414 void serial8250_console_write(struct uart_8250_port *up, const char *s, Link Here
3401
	struct uart_port *port = &up->port;
3413
	struct uart_port *port = &up->port;
3402
	unsigned long flags;
3414
	unsigned long flags;
3403
	unsigned int ier, use_fifo;
3415
	unsigned int ier, use_fifo;
3404
	int locked = 1;
3405
3416
3406
	touch_nmi_watchdog();
3417
	touch_nmi_watchdog();
3407
3418
3408
	if (oops_in_progress)
3419
	uart_port_lock_irqsave(port, &flags);
3409
		locked = spin_trylock_irqsave(&port->lock, flags);
3410
	else
3411
		spin_lock_irqsave(&port->lock, flags);
3412
3420
3413
	/*
3421
	/*
3414
	 *	First save the IER then disable the interrupts
3422
	 *	First save the IER then disable the interrupts
Lines 3477-3484 void serial8250_console_write(struct uart_8250_port *up, const char *s, Link Here
3477
	if (up->msr_saved_flags)
3485
	if (up->msr_saved_flags)
3478
		serial8250_modem_status(up);
3486
		serial8250_modem_status(up);
3479
3487
3480
	if (locked)
3488
	uart_port_unlock_irqrestore(port, flags);
3481
		spin_unlock_irqrestore(&port->lock, flags);
3489
}
3490
3491
bool serial8250_console_write_atomic(struct uart_8250_port *up,
3492
				     struct nbcon_write_context *wctxt)
3493
{
3494
	struct uart_port *port = &up->port;
3495
	unsigned int ier;
3496
3497
	/* Atomic console not supported for rs485 mode. */
3498
	if (up->em485)
3499
		return false;
3500
3501
	touch_nmi_watchdog();
3502
3503
	if (!nbcon_enter_unsafe(wctxt))
3504
		return false;
3505
3506
	/*
3507
	 *	First save the IER then disable the interrupts
3508
	 */
3509
	ier = serial_port_in(port, UART_IER);
3510
	__serial8250_clear_IER(up);
3511
3512
	/* check scratch reg to see if port powered off during system sleep */
3513
	if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
3514
		serial8250_console_restore(up);
3515
		up->canary = 0;
3516
	}
3517
3518
	uart_console_write(port, wctxt->outbuf, wctxt->len, serial8250_console_putchar);
3519
3520
	/*
3521
	 *	Finally, wait for transmitter to become empty
3522
	 *	and restore the IER
3523
	 */
3524
	wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
3525
3526
	serial_port_out(port, UART_IER, ier);
3527
3528
	return nbcon_exit_unsafe(wctxt);
3482
}
3529
}
3483
3530
3484
static unsigned int probe_baud(struct uart_port *port)
3531
static unsigned int probe_baud(struct uart_port *port)
(-)a/drivers/tty/serial/altera_jtaguart.c (-14 / +14 lines)
Lines 147-160 static irqreturn_t altera_jtaguart_interrupt(int irq, void *data) Link Here
147
	isr = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) >>
147
	isr = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) >>
148
	       ALTERA_JTAGUART_CONTROL_RI_OFF) & port->read_status_mask;
148
	       ALTERA_JTAGUART_CONTROL_RI_OFF) & port->read_status_mask;
149
149
150
	spin_lock(&port->lock);
150
	uart_port_lock(port);
151
151
152
	if (isr & ALTERA_JTAGUART_CONTROL_RE_MSK)
152
	if (isr & ALTERA_JTAGUART_CONTROL_RE_MSK)
153
		altera_jtaguart_rx_chars(port);
153
		altera_jtaguart_rx_chars(port);
154
	if (isr & ALTERA_JTAGUART_CONTROL_WE_MSK)
154
	if (isr & ALTERA_JTAGUART_CONTROL_WE_MSK)
155
		altera_jtaguart_tx_chars(port);
155
		altera_jtaguart_tx_chars(port);
156
156
157
	spin_unlock(&port->lock);
157
	uart_port_unlock(port);
158
158
159
	return IRQ_RETVAL(isr);
159
	return IRQ_RETVAL(isr);
160
}
160
}
Lines 180-193 static int altera_jtaguart_startup(struct uart_port *port) Link Here
180
		return ret;
180
		return ret;
181
	}
181
	}
182
182
183
	spin_lock_irqsave(&port->lock, flags);
183
	uart_port_lock_irqsave(port, &flags);
184
184
185
	/* Enable RX interrupts now */
185
	/* Enable RX interrupts now */
186
	port->read_status_mask = ALTERA_JTAGUART_CONTROL_RE_MSK;
186
	port->read_status_mask = ALTERA_JTAGUART_CONTROL_RE_MSK;
187
	writel(port->read_status_mask,
187
	writel(port->read_status_mask,
188
			port->membase + ALTERA_JTAGUART_CONTROL_REG);
188
			port->membase + ALTERA_JTAGUART_CONTROL_REG);
189
189
190
	spin_unlock_irqrestore(&port->lock, flags);
190
	uart_port_unlock_irqrestore(port, flags);
191
191
192
	return 0;
192
	return 0;
193
}
193
}
Lines 196-209 static void altera_jtaguart_shutdown(struct uart_port *port) Link Here
196
{
196
{
197
	unsigned long flags;
197
	unsigned long flags;
198
198
199
	spin_lock_irqsave(&port->lock, flags);
199
	uart_port_lock_irqsave(port, &flags);
200
200
201
	/* Disable all interrupts now */
201
	/* Disable all interrupts now */
202
	port->read_status_mask = 0;
202
	port->read_status_mask = 0;
203
	writel(port->read_status_mask,
203
	writel(port->read_status_mask,
204
			port->membase + ALTERA_JTAGUART_CONTROL_REG);
204
			port->membase + ALTERA_JTAGUART_CONTROL_REG);
205
205
206
	spin_unlock_irqrestore(&port->lock, flags);
206
	uart_port_unlock_irqrestore(port, flags);
207
207
208
	free_irq(port->irq, port);
208
	free_irq(port->irq, port);
209
}
209
}
Lines 264-296 static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c Link Here
264
	unsigned long flags;
264
	unsigned long flags;
265
	u32 status;
265
	u32 status;
266
266
267
	spin_lock_irqsave(&port->lock, flags);
267
	uart_port_lock_irqsave(port, &flags);
268
	while (!altera_jtaguart_tx_space(port, &status)) {
268
	while (!altera_jtaguart_tx_space(port, &status)) {
269
		spin_unlock_irqrestore(&port->lock, flags);
269
		uart_port_unlock_irqrestore(port, flags);
270
270
271
		if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0) {
271
		if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0) {
272
			return;	/* no connection activity */
272
			return;	/* no connection activity */
273
		}
273
		}
274
274
275
		cpu_relax();
275
		cpu_relax();
276
		spin_lock_irqsave(&port->lock, flags);
276
		uart_port_lock_irqsave(port, &flags);
277
	}
277
	}
278
	writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
278
	writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
279
	spin_unlock_irqrestore(&port->lock, flags);
279
	uart_port_unlock_irqrestore(port, flags);
280
}
280
}
281
#else
281
#else
282
static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c)
282
static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c)
283
{
283
{
284
	unsigned long flags;
284
	unsigned long flags;
285
285
286
	spin_lock_irqsave(&port->lock, flags);
286
	uart_port_lock_irqsave(port, &flags);
287
	while (!altera_jtaguart_tx_space(port, NULL)) {
287
	while (!altera_jtaguart_tx_space(port, NULL)) {
288
		spin_unlock_irqrestore(&port->lock, flags);
288
		uart_port_unlock_irqrestore(port, flags);
289
		cpu_relax();
289
		cpu_relax();
290
		spin_lock_irqsave(&port->lock, flags);
290
		uart_port_lock_irqsave(port, &flags);
291
	}
291
	}
292
	writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
292
	writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
293
	spin_unlock_irqrestore(&port->lock, flags);
293
	uart_port_unlock_irqrestore(port, flags);
294
}
294
}
295
#endif
295
#endif
296
296
(-)a/drivers/tty/serial/altera_uart.c (-10 / +10 lines)
Lines 164-176 static void altera_uart_break_ctl(struct uart_port *port, int break_state) Link Here
164
	struct altera_uart *pp = container_of(port, struct altera_uart, port);
164
	struct altera_uart *pp = container_of(port, struct altera_uart, port);
165
	unsigned long flags;
165
	unsigned long flags;
166
166
167
	spin_lock_irqsave(&port->lock, flags);
167
	uart_port_lock_irqsave(port, &flags);
168
	if (break_state == -1)
168
	if (break_state == -1)
169
		pp->imr |= ALTERA_UART_CONTROL_TRBK_MSK;
169
		pp->imr |= ALTERA_UART_CONTROL_TRBK_MSK;
170
	else
170
	else
171
		pp->imr &= ~ALTERA_UART_CONTROL_TRBK_MSK;
171
		pp->imr &= ~ALTERA_UART_CONTROL_TRBK_MSK;
172
	altera_uart_update_ctrl_reg(pp);
172
	altera_uart_update_ctrl_reg(pp);
173
	spin_unlock_irqrestore(&port->lock, flags);
173
	uart_port_unlock_irqrestore(port, flags);
174
}
174
}
175
175
176
static void altera_uart_set_termios(struct uart_port *port,
176
static void altera_uart_set_termios(struct uart_port *port,
Lines 187-196 static void altera_uart_set_termios(struct uart_port *port, Link Here
187
		tty_termios_copy_hw(termios, old);
187
		tty_termios_copy_hw(termios, old);
188
	tty_termios_encode_baud_rate(termios, baud, baud);
188
	tty_termios_encode_baud_rate(termios, baud, baud);
189
189
190
	spin_lock_irqsave(&port->lock, flags);
190
	uart_port_lock_irqsave(port, &flags);
191
	uart_update_timeout(port, termios->c_cflag, baud);
191
	uart_update_timeout(port, termios->c_cflag, baud);
192
	altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
192
	altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
193
	spin_unlock_irqrestore(&port->lock, flags);
193
	uart_port_unlock_irqrestore(port, flags);
194
194
195
	/*
195
	/*
196
	 * FIXME: port->read_status_mask and port->ignore_status_mask
196
	 * FIXME: port->read_status_mask and port->ignore_status_mask
Lines 264-275 static irqreturn_t altera_uart_interrupt(int irq, void *data) Link Here
264
264
265
	isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr;
265
	isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr;
266
266
267
	spin_lock_irqsave(&port->lock, flags);
267
	uart_port_lock_irqsave(port, &flags);
268
	if (isr & ALTERA_UART_STATUS_RRDY_MSK)
268
	if (isr & ALTERA_UART_STATUS_RRDY_MSK)
269
		altera_uart_rx_chars(port);
269
		altera_uart_rx_chars(port);
270
	if (isr & ALTERA_UART_STATUS_TRDY_MSK)
270
	if (isr & ALTERA_UART_STATUS_TRDY_MSK)
271
		altera_uart_tx_chars(port);
271
		altera_uart_tx_chars(port);
272
	spin_unlock_irqrestore(&port->lock, flags);
272
	uart_port_unlock_irqrestore(port, flags);
273
273
274
	return IRQ_RETVAL(isr);
274
	return IRQ_RETVAL(isr);
275
}
275
}
Lines 313-325 static int altera_uart_startup(struct uart_port *port) Link Here
313
		}
313
		}
314
	}
314
	}
315
315
316
	spin_lock_irqsave(&port->lock, flags);
316
	uart_port_lock_irqsave(port, &flags);
317
317
318
	/* Enable RX interrupts now */
318
	/* Enable RX interrupts now */
319
	pp->imr = ALTERA_UART_CONTROL_RRDY_MSK;
319
	pp->imr = ALTERA_UART_CONTROL_RRDY_MSK;
320
	altera_uart_update_ctrl_reg(pp);
320
	altera_uart_update_ctrl_reg(pp);
321
321
322
	spin_unlock_irqrestore(&port->lock, flags);
322
	uart_port_unlock_irqrestore(port, flags);
323
323
324
	return 0;
324
	return 0;
325
}
325
}
Lines 329-341 static void altera_uart_shutdown(struct uart_port *port) Link Here
329
	struct altera_uart *pp = container_of(port, struct altera_uart, port);
329
	struct altera_uart *pp = container_of(port, struct altera_uart, port);
330
	unsigned long flags;
330
	unsigned long flags;
331
331
332
	spin_lock_irqsave(&port->lock, flags);
332
	uart_port_lock_irqsave(port, &flags);
333
333
334
	/* Disable all interrupts now */
334
	/* Disable all interrupts now */
335
	pp->imr = 0;
335
	pp->imr = 0;
336
	altera_uart_update_ctrl_reg(pp);
336
	altera_uart_update_ctrl_reg(pp);
337
337
338
	spin_unlock_irqrestore(&port->lock, flags);
338
	uart_port_unlock_irqrestore(port, flags);
339
339
340
	if (port->irq)
340
	if (port->irq)
341
		free_irq(port->irq, port);
341
		free_irq(port->irq, port);
(-)a/drivers/tty/serial/amba-pl010.c (-10 / +10 lines)
Lines 207-213 static irqreturn_t pl010_int(int irq, void *dev_id) Link Here
207
	unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
207
	unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
208
	int handled = 0;
208
	int handled = 0;
209
209
210
	spin_lock(&port->lock);
210
	uart_port_lock(port);
211
211
212
	status = readb(port->membase + UART010_IIR);
212
	status = readb(port->membase + UART010_IIR);
213
	if (status) {
213
	if (status) {
Lines 228-234 static irqreturn_t pl010_int(int irq, void *dev_id) Link Here
228
		handled = 1;
228
		handled = 1;
229
	}
229
	}
230
230
231
	spin_unlock(&port->lock);
231
	uart_port_unlock(port);
232
232
233
	return IRQ_RETVAL(handled);
233
	return IRQ_RETVAL(handled);
234
}
234
}
Lines 270-283 static void pl010_break_ctl(struct uart_port *port, int break_state) Link Here
270
	unsigned long flags;
270
	unsigned long flags;
271
	unsigned int lcr_h;
271
	unsigned int lcr_h;
272
272
273
	spin_lock_irqsave(&port->lock, flags);
273
	uart_port_lock_irqsave(port, &flags);
274
	lcr_h = readb(port->membase + UART010_LCRH);
274
	lcr_h = readb(port->membase + UART010_LCRH);
275
	if (break_state == -1)
275
	if (break_state == -1)
276
		lcr_h |= UART01x_LCRH_BRK;
276
		lcr_h |= UART01x_LCRH_BRK;
277
	else
277
	else
278
		lcr_h &= ~UART01x_LCRH_BRK;
278
		lcr_h &= ~UART01x_LCRH_BRK;
279
	writel(lcr_h, port->membase + UART010_LCRH);
279
	writel(lcr_h, port->membase + UART010_LCRH);
280
	spin_unlock_irqrestore(&port->lock, flags);
280
	uart_port_unlock_irqrestore(port, flags);
281
}
281
}
282
282
283
static int pl010_startup(struct uart_port *port)
283
static int pl010_startup(struct uart_port *port)
Lines 385-391 pl010_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
385
	if (port->fifosize > 1)
385
	if (port->fifosize > 1)
386
		lcr_h |= UART01x_LCRH_FEN;
386
		lcr_h |= UART01x_LCRH_FEN;
387
387
388
	spin_lock_irqsave(&port->lock, flags);
388
	uart_port_lock_irqsave(port, &flags);
389
389
390
	/*
390
	/*
391
	 * Update the per-port timeout.
391
	 * Update the per-port timeout.
Lines 438-459 pl010_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
438
	writel(lcr_h, port->membase + UART010_LCRH);
438
	writel(lcr_h, port->membase + UART010_LCRH);
439
	writel(old_cr, port->membase + UART010_CR);
439
	writel(old_cr, port->membase + UART010_CR);
440
440
441
	spin_unlock_irqrestore(&port->lock, flags);
441
	uart_port_unlock_irqrestore(port, flags);
442
}
442
}
443
443
444
static void pl010_set_ldisc(struct uart_port *port, struct ktermios *termios)
444
static void pl010_set_ldisc(struct uart_port *port, struct ktermios *termios)
445
{
445
{
446
	if (termios->c_line == N_PPS) {
446
	if (termios->c_line == N_PPS) {
447
		port->flags |= UPF_HARDPPS_CD;
447
		port->flags |= UPF_HARDPPS_CD;
448
		spin_lock_irq(&port->lock);
448
		uart_port_lock_irq(port);
449
		pl010_enable_ms(port);
449
		pl010_enable_ms(port);
450
		spin_unlock_irq(&port->lock);
450
		uart_port_unlock_irq(port);
451
	} else {
451
	} else {
452
		port->flags &= ~UPF_HARDPPS_CD;
452
		port->flags &= ~UPF_HARDPPS_CD;
453
		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
453
		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
454
			spin_lock_irq(&port->lock);
454
			uart_port_lock_irq(port);
455
			pl010_disable_ms(port);
455
			pl010_disable_ms(port);
456
			spin_unlock_irq(&port->lock);
456
			uart_port_unlock_irq(port);
457
		}
457
		}
458
	}
458
	}
459
}
459
}
(-)a/drivers/tty/serial/amba-pl011.c (-41 / +37 lines)
Lines 345-353 static int pl011_fifo_to_tty(struct uart_amba_port *uap) Link Here
345
				flag = TTY_FRAME;
345
				flag = TTY_FRAME;
346
		}
346
		}
347
347
348
		spin_unlock(&uap->port.lock);
348
		uart_port_unlock(&uap->port);
349
		sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
349
		sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
350
		spin_lock(&uap->port.lock);
350
		uart_port_lock(&uap->port);
351
351
352
		if (!sysrq)
352
		if (!sysrq)
353
			uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
353
			uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
Lines 550-556 static void pl011_dma_tx_callback(void *data) Link Here
550
	unsigned long flags;
550
	unsigned long flags;
551
	u16 dmacr;
551
	u16 dmacr;
552
552
553
	spin_lock_irqsave(&uap->port.lock, flags);
553
	uart_port_lock_irqsave(&uap->port, &flags);
554
	if (uap->dmatx.queued)
554
	if (uap->dmatx.queued)
555
		dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
555
		dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
556
			     DMA_TO_DEVICE);
556
			     DMA_TO_DEVICE);
Lines 571-577 static void pl011_dma_tx_callback(void *data) Link Here
571
	if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
571
	if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
572
	    uart_circ_empty(&uap->port.state->xmit)) {
572
	    uart_circ_empty(&uap->port.state->xmit)) {
573
		uap->dmatx.queued = false;
573
		uap->dmatx.queued = false;
574
		spin_unlock_irqrestore(&uap->port.lock, flags);
574
		uart_port_unlock_irqrestore(&uap->port, flags);
575
		return;
575
		return;
576
	}
576
	}
577
577
Lines 582-588 static void pl011_dma_tx_callback(void *data) Link Here
582
		 */
582
		 */
583
		pl011_start_tx_pio(uap);
583
		pl011_start_tx_pio(uap);
584
584
585
	spin_unlock_irqrestore(&uap->port.lock, flags);
585
	uart_port_unlock_irqrestore(&uap->port, flags);
586
}
586
}
587
587
588
/*
588
/*
Lines 1009-1015 static void pl011_dma_rx_callback(void *data) Link Here
1009
	 * routine to flush out the secondary DMA buffer while
1009
	 * routine to flush out the secondary DMA buffer while
1010
	 * we immediately trigger the next DMA job.
1010
	 * we immediately trigger the next DMA job.
1011
	 */
1011
	 */
1012
	spin_lock_irq(&uap->port.lock);
1012
	uart_port_lock_irq(&uap->port);
1013
	/*
1013
	/*
1014
	 * Rx data can be taken by the UART interrupts during
1014
	 * Rx data can be taken by the UART interrupts during
1015
	 * the DMA irq handler. So we check the residue here.
1015
	 * the DMA irq handler. So we check the residue here.
Lines 1025-1031 static void pl011_dma_rx_callback(void *data) Link Here
1025
	ret = pl011_dma_rx_trigger_dma(uap);
1025
	ret = pl011_dma_rx_trigger_dma(uap);
1026
1026
1027
	pl011_dma_rx_chars(uap, pending, lastbuf, false);
1027
	pl011_dma_rx_chars(uap, pending, lastbuf, false);
1028
	spin_unlock_irq(&uap->port.lock);
1028
	uart_port_unlock_irq(&uap->port);
1029
	/*
1029
	/*
1030
	 * Do this check after we picked the DMA chars so we don't
1030
	 * Do this check after we picked the DMA chars so we don't
1031
	 * get some IRQ immediately from RX.
1031
	 * get some IRQ immediately from RX.
Lines 1091-1101 static void pl011_dma_rx_poll(struct timer_list *t) Link Here
1091
	if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
1091
	if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
1092
			> uap->dmarx.poll_timeout) {
1092
			> uap->dmarx.poll_timeout) {
1093
1093
1094
		spin_lock_irqsave(&uap->port.lock, flags);
1094
		uart_port_lock_irqsave(&uap->port, &flags);
1095
		pl011_dma_rx_stop(uap);
1095
		pl011_dma_rx_stop(uap);
1096
		uap->im |= UART011_RXIM;
1096
		uap->im |= UART011_RXIM;
1097
		pl011_write(uap->im, uap, REG_IMSC);
1097
		pl011_write(uap->im, uap, REG_IMSC);
1098
		spin_unlock_irqrestore(&uap->port.lock, flags);
1098
		uart_port_unlock_irqrestore(&uap->port, flags);
1099
1099
1100
		uap->dmarx.running = false;
1100
		uap->dmarx.running = false;
1101
		dmaengine_terminate_all(rxchan);
1101
		dmaengine_terminate_all(rxchan);
Lines 1191-1200 static void pl011_dma_shutdown(struct uart_amba_port *uap) Link Here
1191
	while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
1191
	while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
1192
		cpu_relax();
1192
		cpu_relax();
1193
1193
1194
	spin_lock_irq(&uap->port.lock);
1194
	uart_port_lock_irq(&uap->port);
1195
	uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1195
	uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1196
	pl011_write(uap->dmacr, uap, REG_DMACR);
1196
	pl011_write(uap->dmacr, uap, REG_DMACR);
1197
	spin_unlock_irq(&uap->port.lock);
1197
	uart_port_unlock_irq(&uap->port);
1198
1198
1199
	if (uap->using_tx_dma) {
1199
	if (uap->using_tx_dma) {
1200
		/* In theory, this should already be done by pl011_dma_flush_buffer */
1200
		/* In theory, this should already be done by pl011_dma_flush_buffer */
Lines 1374-1382 static void pl011_throttle_rx(struct uart_port *port) Link Here
1374
{
1374
{
1375
	unsigned long flags;
1375
	unsigned long flags;
1376
1376
1377
	spin_lock_irqsave(&port->lock, flags);
1377
	uart_port_lock_irqsave(port, &flags);
1378
	pl011_stop_rx(port);
1378
	pl011_stop_rx(port);
1379
	spin_unlock_irqrestore(&port->lock, flags);
1379
	uart_port_unlock_irqrestore(port, flags);
1380
}
1380
}
1381
1381
1382
static void pl011_enable_ms(struct uart_port *port)
1382
static void pl011_enable_ms(struct uart_port *port)
Lines 1394-1400 __acquires(&uap->port.lock) Link Here
1394
{
1394
{
1395
	pl011_fifo_to_tty(uap);
1395
	pl011_fifo_to_tty(uap);
1396
1396
1397
	spin_unlock(&uap->port.lock);
1397
	uart_port_unlock(&uap->port);
1398
	tty_flip_buffer_push(&uap->port.state->port);
1398
	tty_flip_buffer_push(&uap->port.state->port);
1399
	/*
1399
	/*
1400
	 * If we were temporarily out of DMA mode for a while,
1400
	 * If we were temporarily out of DMA mode for a while,
Lines 1419-1425 __acquires(&uap->port.lock) Link Here
1419
#endif
1419
#endif
1420
		}
1420
		}
1421
	}
1421
	}
1422
	spin_lock(&uap->port.lock);
1422
	uart_port_lock(&uap->port);
1423
}
1423
}
1424
1424
1425
static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
1425
static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
Lines 1555-1561 static irqreturn_t pl011_int(int irq, void *dev_id) Link Here
1555
	unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1555
	unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1556
	int handled = 0;
1556
	int handled = 0;
1557
1557
1558
	spin_lock_irqsave(&uap->port.lock, flags);
1558
	uart_port_lock_irqsave(&uap->port, &flags);
1559
	status = pl011_read(uap, REG_RIS) & uap->im;
1559
	status = pl011_read(uap, REG_RIS) & uap->im;
1560
	if (status) {
1560
	if (status) {
1561
		do {
1561
		do {
Lines 1585-1591 static irqreturn_t pl011_int(int irq, void *dev_id) Link Here
1585
		handled = 1;
1585
		handled = 1;
1586
	}
1586
	}
1587
1587
1588
	spin_unlock_irqrestore(&uap->port.lock, flags);
1588
	uart_port_unlock_irqrestore(&uap->port, flags);
1589
1589
1590
	return IRQ_RETVAL(handled);
1590
	return IRQ_RETVAL(handled);
1591
}
1591
}
Lines 1657-1670 static void pl011_break_ctl(struct uart_port *port, int break_state) Link Here
1657
	unsigned long flags;
1657
	unsigned long flags;
1658
	unsigned int lcr_h;
1658
	unsigned int lcr_h;
1659
1659
1660
	spin_lock_irqsave(&uap->port.lock, flags);
1660
	uart_port_lock_irqsave(&uap->port, &flags);
1661
	lcr_h = pl011_read(uap, REG_LCRH_TX);
1661
	lcr_h = pl011_read(uap, REG_LCRH_TX);
1662
	if (break_state == -1)
1662
	if (break_state == -1)
1663
		lcr_h |= UART01x_LCRH_BRK;
1663
		lcr_h |= UART01x_LCRH_BRK;
1664
	else
1664
	else
1665
		lcr_h &= ~UART01x_LCRH_BRK;
1665
		lcr_h &= ~UART01x_LCRH_BRK;
1666
	pl011_write(lcr_h, uap, REG_LCRH_TX);
1666
	pl011_write(lcr_h, uap, REG_LCRH_TX);
1667
	spin_unlock_irqrestore(&uap->port.lock, flags);
1667
	uart_port_unlock_irqrestore(&uap->port, flags);
1668
}
1668
}
1669
1669
1670
#ifdef CONFIG_CONSOLE_POLL
1670
#ifdef CONFIG_CONSOLE_POLL
Lines 1803-1809 static void pl011_enable_interrupts(struct uart_amba_port *uap) Link Here
1803
	unsigned long flags;
1803
	unsigned long flags;
1804
	unsigned int i;
1804
	unsigned int i;
1805
1805
1806
	spin_lock_irqsave(&uap->port.lock, flags);
1806
	uart_port_lock_irqsave(&uap->port, &flags);
1807
1807
1808
	/* Clear out any spuriously appearing RX interrupts */
1808
	/* Clear out any spuriously appearing RX interrupts */
1809
	pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
1809
	pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
Lines 1825-1831 static void pl011_enable_interrupts(struct uart_amba_port *uap) Link Here
1825
	if (!pl011_dma_rx_running(uap))
1825
	if (!pl011_dma_rx_running(uap))
1826
		uap->im |= UART011_RXIM;
1826
		uap->im |= UART011_RXIM;
1827
	pl011_write(uap->im, uap, REG_IMSC);
1827
	pl011_write(uap->im, uap, REG_IMSC);
1828
	spin_unlock_irqrestore(&uap->port.lock, flags);
1828
	uart_port_unlock_irqrestore(&uap->port, flags);
1829
}
1829
}
1830
1830
1831
static void pl011_unthrottle_rx(struct uart_port *port)
1831
static void pl011_unthrottle_rx(struct uart_port *port)
Lines 1833-1839 static void pl011_unthrottle_rx(struct uart_port *port) Link Here
1833
	struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
1833
	struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
1834
	unsigned long flags;
1834
	unsigned long flags;
1835
1835
1836
	spin_lock_irqsave(&uap->port.lock, flags);
1836
	uart_port_lock_irqsave(&uap->port, &flags);
1837
1837
1838
	uap->im = UART011_RTIM;
1838
	uap->im = UART011_RTIM;
1839
	if (!pl011_dma_rx_running(uap))
1839
	if (!pl011_dma_rx_running(uap))
Lines 1841-1847 static void pl011_unthrottle_rx(struct uart_port *port) Link Here
1841
1841
1842
	pl011_write(uap->im, uap, REG_IMSC);
1842
	pl011_write(uap->im, uap, REG_IMSC);
1843
1843
1844
	spin_unlock_irqrestore(&uap->port.lock, flags);
1844
	uart_port_unlock_irqrestore(&uap->port, flags);
1845
}
1845
}
1846
1846
1847
static int pl011_startup(struct uart_port *port)
1847
static int pl011_startup(struct uart_port *port)
Lines 1861-1867 static int pl011_startup(struct uart_port *port) Link Here
1861
1861
1862
	pl011_write(uap->vendor->ifls, uap, REG_IFLS);
1862
	pl011_write(uap->vendor->ifls, uap, REG_IFLS);
1863
1863
1864
	spin_lock_irq(&uap->port.lock);
1864
	uart_port_lock_irq(&uap->port);
1865
1865
1866
	cr = pl011_read(uap, REG_CR);
1866
	cr = pl011_read(uap, REG_CR);
1867
	cr &= UART011_CR_RTS | UART011_CR_DTR;
1867
	cr &= UART011_CR_RTS | UART011_CR_DTR;
Lines 1872-1878 static int pl011_startup(struct uart_port *port) Link Here
1872
1872
1873
	pl011_write(cr, uap, REG_CR);
1873
	pl011_write(cr, uap, REG_CR);
1874
1874
1875
	spin_unlock_irq(&uap->port.lock);
1875
	uart_port_unlock_irq(&uap->port);
1876
1876
1877
	/*
1877
	/*
1878
	 * initialise the old status of the modem signals
1878
	 * initialise the old status of the modem signals
Lines 1933-1944 static void pl011_disable_uart(struct uart_amba_port *uap) Link Here
1933
	unsigned int cr;
1933
	unsigned int cr;
1934
1934
1935
	uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1935
	uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1936
	spin_lock_irq(&uap->port.lock);
1936
	uart_port_lock_irq(&uap->port);
1937
	cr = pl011_read(uap, REG_CR);
1937
	cr = pl011_read(uap, REG_CR);
1938
	cr &= UART011_CR_RTS | UART011_CR_DTR;
1938
	cr &= UART011_CR_RTS | UART011_CR_DTR;
1939
	cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1939
	cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1940
	pl011_write(cr, uap, REG_CR);
1940
	pl011_write(cr, uap, REG_CR);
1941
	spin_unlock_irq(&uap->port.lock);
1941
	uart_port_unlock_irq(&uap->port);
1942
1942
1943
	/*
1943
	/*
1944
	 * disable break condition and fifos
1944
	 * disable break condition and fifos
Lines 1950-1963 static void pl011_disable_uart(struct uart_amba_port *uap) Link Here
1950
1950
1951
static void pl011_disable_interrupts(struct uart_amba_port *uap)
1951
static void pl011_disable_interrupts(struct uart_amba_port *uap)
1952
{
1952
{
1953
	spin_lock_irq(&uap->port.lock);
1953
	uart_port_lock_irq(&uap->port);
1954
1954
1955
	/* mask all interrupts and clear all pending ones */
1955
	/* mask all interrupts and clear all pending ones */
1956
	uap->im = 0;
1956
	uap->im = 0;
1957
	pl011_write(uap->im, uap, REG_IMSC);
1957
	pl011_write(uap->im, uap, REG_IMSC);
1958
	pl011_write(0xffff, uap, REG_ICR);
1958
	pl011_write(0xffff, uap, REG_ICR);
1959
1959
1960
	spin_unlock_irq(&uap->port.lock);
1960
	uart_port_unlock_irq(&uap->port);
1961
}
1961
}
1962
1962
1963
static void pl011_shutdown(struct uart_port *port)
1963
static void pl011_shutdown(struct uart_port *port)
Lines 2102-2108 pl011_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
2102
2102
2103
	bits = tty_get_frame_size(termios->c_cflag);
2103
	bits = tty_get_frame_size(termios->c_cflag);
2104
2104
2105
	spin_lock_irqsave(&port->lock, flags);
2105
	uart_port_lock_irqsave(port, &flags);
2106
2106
2107
	/*
2107
	/*
2108
	 * Update the per-port timeout.
2108
	 * Update the per-port timeout.
Lines 2176-2182 pl011_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
2176
	old_cr |= UART011_CR_RXE;
2176
	old_cr |= UART011_CR_RXE;
2177
	pl011_write(old_cr, uap, REG_CR);
2177
	pl011_write(old_cr, uap, REG_CR);
2178
2178
2179
	spin_unlock_irqrestore(&port->lock, flags);
2179
	uart_port_unlock_irqrestore(port, flags);
2180
}
2180
}
2181
2181
2182
static void
2182
static void
Lines 2194-2203 sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
2194
	termios->c_cflag &= ~(CMSPAR | CRTSCTS);
2194
	termios->c_cflag &= ~(CMSPAR | CRTSCTS);
2195
	termios->c_cflag |= CS8 | CLOCAL;
2195
	termios->c_cflag |= CS8 | CLOCAL;
2196
2196
2197
	spin_lock_irqsave(&port->lock, flags);
2197
	uart_port_lock_irqsave(port, &flags);
2198
	uart_update_timeout(port, CS8, uap->fixed_baud);
2198
	uart_update_timeout(port, CS8, uap->fixed_baud);
2199
	pl011_setup_status_masks(port, termios);
2199
	pl011_setup_status_masks(port, termios);
2200
	spin_unlock_irqrestore(&port->lock, flags);
2200
	uart_port_unlock_irqrestore(port, flags);
2201
}
2201
}
2202
2202
2203
static const char *pl011_type(struct uart_port *port)
2203
static const char *pl011_type(struct uart_port *port)
Lines 2332-2344 pl011_console_write(struct console *co, const char *s, unsigned int count) Link Here
2332
2332
2333
	clk_enable(uap->clk);
2333
	clk_enable(uap->clk);
2334
2334
2335
	local_irq_save(flags);
2335
	if (uap->port.sysrq || oops_in_progress)
2336
	if (uap->port.sysrq)
2336
		locked = uart_port_trylock_irqsave(&uap->port, &flags);
2337
		locked = 0;
2338
	else if (oops_in_progress)
2339
		locked = spin_trylock(&uap->port.lock);
2340
	else
2337
	else
2341
		spin_lock(&uap->port.lock);
2338
		uart_port_lock_irqsave(&uap->port, &flags);
2342
2339
2343
	/*
2340
	/*
2344
	 *	First save the CR then disable the interrupts
2341
	 *	First save the CR then disable the interrupts
Lines 2364-2371 pl011_console_write(struct console *co, const char *s, unsigned int count) Link Here
2364
		pl011_write(old_cr, uap, REG_CR);
2361
		pl011_write(old_cr, uap, REG_CR);
2365
2362
2366
	if (locked)
2363
	if (locked)
2367
		spin_unlock(&uap->port.lock);
2364
		uart_port_unlock_irqrestore(&uap->port, flags);
2368
	local_irq_restore(flags);
2369
2365
2370
	clk_disable(uap->clk);
2366
	clk_disable(uap->clk);
2371
}
2367
}
(-)a/drivers/tty/serial/apbuart.c (-4 / +4 lines)
Lines 133-139 static irqreturn_t apbuart_int(int irq, void *dev_id) Link Here
133
	struct uart_port *port = dev_id;
133
	struct uart_port *port = dev_id;
134
	unsigned int status;
134
	unsigned int status;
135
135
136
	spin_lock(&port->lock);
136
	uart_port_lock(port);
137
137
138
	status = UART_GET_STATUS(port);
138
	status = UART_GET_STATUS(port);
139
	if (status & UART_STATUS_DR)
139
	if (status & UART_STATUS_DR)
Lines 141-147 static irqreturn_t apbuart_int(int irq, void *dev_id) Link Here
141
	if (status & UART_STATUS_THE)
141
	if (status & UART_STATUS_THE)
142
		apbuart_tx_chars(port);
142
		apbuart_tx_chars(port);
143
143
144
	spin_unlock(&port->lock);
144
	uart_port_unlock(port);
145
145
146
	return IRQ_HANDLED;
146
	return IRQ_HANDLED;
147
}
147
}
Lines 228-234 static void apbuart_set_termios(struct uart_port *port, Link Here
228
	if (termios->c_cflag & CRTSCTS)
228
	if (termios->c_cflag & CRTSCTS)
229
		cr |= UART_CTRL_FL;
229
		cr |= UART_CTRL_FL;
230
230
231
	spin_lock_irqsave(&port->lock, flags);
231
	uart_port_lock_irqsave(port, &flags);
232
232
233
	/* Update the per-port timeout. */
233
	/* Update the per-port timeout. */
234
	uart_update_timeout(port, termios->c_cflag, baud);
234
	uart_update_timeout(port, termios->c_cflag, baud);
Lines 251-257 static void apbuart_set_termios(struct uart_port *port, Link Here
251
	UART_PUT_SCAL(port, quot);
251
	UART_PUT_SCAL(port, quot);
252
	UART_PUT_CTRL(port, cr);
252
	UART_PUT_CTRL(port, cr);
253
253
254
	spin_unlock_irqrestore(&port->lock, flags);
254
	uart_port_unlock_irqrestore(port, flags);
255
}
255
}
256
256
257
static const char *apbuart_type(struct uart_port *port)
257
static const char *apbuart_type(struct uart_port *port)
(-)a/drivers/tty/serial/ar933x_uart.c (-13 / +13 lines)
Lines 133-141 static unsigned int ar933x_uart_tx_empty(struct uart_port *port) Link Here
133
	unsigned long flags;
133
	unsigned long flags;
134
	unsigned int rdata;
134
	unsigned int rdata;
135
135
136
	spin_lock_irqsave(&up->port.lock, flags);
136
	uart_port_lock_irqsave(&up->port, &flags);
137
	rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
137
	rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
138
	spin_unlock_irqrestore(&up->port.lock, flags);
138
	uart_port_unlock_irqrestore(&up->port, flags);
139
139
140
	return (rdata & AR933X_UART_DATA_TX_CSR) ? 0 : TIOCSER_TEMT;
140
	return (rdata & AR933X_UART_DATA_TX_CSR) ? 0 : TIOCSER_TEMT;
141
}
141
}
Lines 220-233 static void ar933x_uart_break_ctl(struct uart_port *port, int break_state) Link Here
220
		container_of(port, struct ar933x_uart_port, port);
220
		container_of(port, struct ar933x_uart_port, port);
221
	unsigned long flags;
221
	unsigned long flags;
222
222
223
	spin_lock_irqsave(&up->port.lock, flags);
223
	uart_port_lock_irqsave(&up->port, &flags);
224
	if (break_state == -1)
224
	if (break_state == -1)
225
		ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
225
		ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
226
				    AR933X_UART_CS_TX_BREAK);
226
				    AR933X_UART_CS_TX_BREAK);
227
	else
227
	else
228
		ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
228
		ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
229
				      AR933X_UART_CS_TX_BREAK);
229
				      AR933X_UART_CS_TX_BREAK);
230
	spin_unlock_irqrestore(&up->port.lock, flags);
230
	uart_port_unlock_irqrestore(&up->port, flags);
231
}
231
}
232
232
233
/*
233
/*
Lines 318-324 static void ar933x_uart_set_termios(struct uart_port *port, Link Here
318
	 * Ok, we're now changing the port state. Do it with
318
	 * Ok, we're now changing the port state. Do it with
319
	 * interrupts disabled.
319
	 * interrupts disabled.
320
	 */
320
	 */
321
	spin_lock_irqsave(&up->port.lock, flags);
321
	uart_port_lock_irqsave(&up->port, &flags);
322
322
323
	/* disable the UART */
323
	/* disable the UART */
324
	ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
324
	ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
Lines 352-358 static void ar933x_uart_set_termios(struct uart_port *port, Link Here
352
			AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
352
			AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
353
			AR933X_UART_CS_IF_MODE_DCE << AR933X_UART_CS_IF_MODE_S);
353
			AR933X_UART_CS_IF_MODE_DCE << AR933X_UART_CS_IF_MODE_S);
354
354
355
	spin_unlock_irqrestore(&up->port.lock, flags);
355
	uart_port_unlock_irqrestore(&up->port, flags);
356
356
357
	if (tty_termios_baud_rate(new))
357
	if (tty_termios_baud_rate(new))
358
		tty_termios_encode_baud_rate(new, baud, baud);
358
		tty_termios_encode_baud_rate(new, baud, baud);
Lines 450-456 static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id) Link Here
450
	if ((status & AR933X_UART_CS_HOST_INT) == 0)
450
	if ((status & AR933X_UART_CS_HOST_INT) == 0)
451
		return IRQ_NONE;
451
		return IRQ_NONE;
452
452
453
	spin_lock(&up->port.lock);
453
	uart_port_lock(&up->port);
454
454
455
	status = ar933x_uart_read(up, AR933X_UART_INT_REG);
455
	status = ar933x_uart_read(up, AR933X_UART_INT_REG);
456
	status &= ar933x_uart_read(up, AR933X_UART_INT_EN_REG);
456
	status &= ar933x_uart_read(up, AR933X_UART_INT_EN_REG);
Lines 468-474 static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id) Link Here
468
		ar933x_uart_tx_chars(up);
468
		ar933x_uart_tx_chars(up);
469
	}
469
	}
470
470
471
	spin_unlock(&up->port.lock);
471
	uart_port_unlock(&up->port);
472
472
473
	return IRQ_HANDLED;
473
	return IRQ_HANDLED;
474
}
474
}
Lines 485-491 static int ar933x_uart_startup(struct uart_port *port) Link Here
485
	if (ret)
485
	if (ret)
486
		return ret;
486
		return ret;
487
487
488
	spin_lock_irqsave(&up->port.lock, flags);
488
	uart_port_lock_irqsave(&up->port, &flags);
489
489
490
	/* Enable HOST interrupts */
490
	/* Enable HOST interrupts */
491
	ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
491
	ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
Lines 498-504 static int ar933x_uart_startup(struct uart_port *port) Link Here
498
	/* Enable RX interrupts */
498
	/* Enable RX interrupts */
499
	ar933x_uart_start_rx_interrupt(up);
499
	ar933x_uart_start_rx_interrupt(up);
500
500
501
	spin_unlock_irqrestore(&up->port.lock, flags);
501
	uart_port_unlock_irqrestore(&up->port, flags);
502
502
503
	return 0;
503
	return 0;
504
}
504
}
Lines 632-640 static void ar933x_uart_console_write(struct console *co, const char *s, Link Here
632
	if (up->port.sysrq)
632
	if (up->port.sysrq)
633
		locked = 0;
633
		locked = 0;
634
	else if (oops_in_progress)
634
	else if (oops_in_progress)
635
		locked = spin_trylock(&up->port.lock);
635
		locked = uart_port_trylock(&up->port);
636
	else
636
	else
637
		spin_lock(&up->port.lock);
637
		uart_port_lock(&up->port);
638
638
639
	/*
639
	/*
640
	 * First save the IER then disable the interrupts
640
	 * First save the IER then disable the interrupts
Lines 654-660 static void ar933x_uart_console_write(struct console *co, const char *s, Link Here
654
	ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_ALLINTS);
654
	ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_ALLINTS);
655
655
656
	if (locked)
656
	if (locked)
657
		spin_unlock(&up->port.lock);
657
		uart_port_unlock(&up->port);
658
658
659
	local_irq_restore(flags);
659
	local_irq_restore(flags);
660
}
660
}
(-)a/drivers/tty/serial/arc_uart.c (-8 / +8 lines)
Lines 279-287 static irqreturn_t arc_serial_isr(int irq, void *dev_id) Link Here
279
	if (status & RXIENB) {
279
	if (status & RXIENB) {
280
280
281
		/* already in ISR, no need of xx_irqsave */
281
		/* already in ISR, no need of xx_irqsave */
282
		spin_lock(&port->lock);
282
		uart_port_lock(port);
283
		arc_serial_rx_chars(port, status);
283
		arc_serial_rx_chars(port, status);
284
		spin_unlock(&port->lock);
284
		uart_port_unlock(port);
285
	}
285
	}
286
286
287
	if ((status & TXIENB) && (status & TXEMPTY)) {
287
	if ((status & TXIENB) && (status & TXEMPTY)) {
Lines 291-302 static irqreturn_t arc_serial_isr(int irq, void *dev_id) Link Here
291
		 */
291
		 */
292
		UART_TX_IRQ_DISABLE(port);
292
		UART_TX_IRQ_DISABLE(port);
293
293
294
		spin_lock(&port->lock);
294
		uart_port_lock(port);
295
295
296
		if (!uart_tx_stopped(port))
296
		if (!uart_tx_stopped(port))
297
			arc_serial_tx_chars(port);
297
			arc_serial_tx_chars(port);
298
298
299
		spin_unlock(&port->lock);
299
		uart_port_unlock(port);
300
	}
300
	}
301
301
302
	return IRQ_HANDLED;
302
	return IRQ_HANDLED;
Lines 366-372 arc_serial_set_termios(struct uart_port *port, struct ktermios *new, Link Here
366
	uartl = hw_val & 0xFF;
366
	uartl = hw_val & 0xFF;
367
	uarth = (hw_val >> 8) & 0xFF;
367
	uarth = (hw_val >> 8) & 0xFF;
368
368
369
	spin_lock_irqsave(&port->lock, flags);
369
	uart_port_lock_irqsave(port, &flags);
370
370
371
	UART_ALL_IRQ_DISABLE(port);
371
	UART_ALL_IRQ_DISABLE(port);
372
372
Lines 391-397 arc_serial_set_termios(struct uart_port *port, struct ktermios *new, Link Here
391
391
392
	uart_update_timeout(port, new->c_cflag, baud);
392
	uart_update_timeout(port, new->c_cflag, baud);
393
393
394
	spin_unlock_irqrestore(&port->lock, flags);
394
	uart_port_unlock_irqrestore(port, flags);
395
}
395
}
396
396
397
static const char *arc_serial_type(struct uart_port *port)
397
static const char *arc_serial_type(struct uart_port *port)
Lines 521-529 static void arc_serial_console_write(struct console *co, const char *s, Link Here
521
	struct uart_port *port = &arc_uart_ports[co->index].port;
521
	struct uart_port *port = &arc_uart_ports[co->index].port;
522
	unsigned long flags;
522
	unsigned long flags;
523
523
524
	spin_lock_irqsave(&port->lock, flags);
524
	uart_port_lock_irqsave(port, &flags);
525
	uart_console_write(port, s, count, arc_serial_console_putchar);
525
	uart_console_write(port, s, count, arc_serial_console_putchar);
526
	spin_unlock_irqrestore(&port->lock, flags);
526
	uart_port_unlock_irqrestore(port, flags);
527
}
527
}
528
528
529
static struct console arc_console = {
529
static struct console arc_console = {
(-)a/drivers/tty/serial/atmel_serial.c (-12 / +12 lines)
Lines 861-867 static void atmel_complete_tx_dma(void *arg) Link Here
861
	struct dma_chan *chan = atmel_port->chan_tx;
861
	struct dma_chan *chan = atmel_port->chan_tx;
862
	unsigned long flags;
862
	unsigned long flags;
863
863
864
	spin_lock_irqsave(&port->lock, flags);
864
	uart_port_lock_irqsave(port, &flags);
865
865
866
	if (chan)
866
	if (chan)
867
		dmaengine_terminate_all(chan);
867
		dmaengine_terminate_all(chan);
Lines 893-899 static void atmel_complete_tx_dma(void *arg) Link Here
893
				  atmel_port->tx_done_mask);
893
				  atmel_port->tx_done_mask);
894
	}
894
	}
895
895
896
	spin_unlock_irqrestore(&port->lock, flags);
896
	uart_port_unlock_irqrestore(port, flags);
897
}
897
}
898
898
899
static void atmel_release_tx_dma(struct uart_port *port)
899
static void atmel_release_tx_dma(struct uart_port *port)
Lines 1711-1719 static void atmel_tasklet_rx_func(struct tasklet_struct *t) Link Here
1711
	struct uart_port *port = &atmel_port->uart;
1711
	struct uart_port *port = &atmel_port->uart;
1712
1712
1713
	/* The interrupt handler does not take the lock */
1713
	/* The interrupt handler does not take the lock */
1714
	spin_lock(&port->lock);
1714
	uart_port_lock(port);
1715
	atmel_port->schedule_rx(port);
1715
	atmel_port->schedule_rx(port);
1716
	spin_unlock(&port->lock);
1716
	uart_port_unlock(port);
1717
}
1717
}
1718
1718
1719
static void atmel_tasklet_tx_func(struct tasklet_struct *t)
1719
static void atmel_tasklet_tx_func(struct tasklet_struct *t)
Lines 1723-1731 static void atmel_tasklet_tx_func(struct tasklet_struct *t) Link Here
1723
	struct uart_port *port = &atmel_port->uart;
1723
	struct uart_port *port = &atmel_port->uart;
1724
1724
1725
	/* The interrupt handler does not take the lock */
1725
	/* The interrupt handler does not take the lock */
1726
	spin_lock(&port->lock);
1726
	uart_port_lock(port);
1727
	atmel_port->schedule_tx(port);
1727
	atmel_port->schedule_tx(port);
1728
	spin_unlock(&port->lock);
1728
	uart_port_unlock(port);
1729
}
1729
}
1730
1730
1731
static void atmel_init_property(struct atmel_uart_port *atmel_port,
1731
static void atmel_init_property(struct atmel_uart_port *atmel_port,
Lines 2175-2181 static void atmel_set_termios(struct uart_port *port, Link Here
2175
	} else
2175
	} else
2176
		mode |= ATMEL_US_PAR_NONE;
2176
		mode |= ATMEL_US_PAR_NONE;
2177
2177
2178
	spin_lock_irqsave(&port->lock, flags);
2178
	uart_port_lock_irqsave(port, &flags);
2179
2179
2180
	port->read_status_mask = ATMEL_US_OVRE;
2180
	port->read_status_mask = ATMEL_US_OVRE;
2181
	if (termios->c_iflag & INPCK)
2181
	if (termios->c_iflag & INPCK)
Lines 2377-2398 static void atmel_set_termios(struct uart_port *port, Link Here
2377
	else
2377
	else
2378
		atmel_disable_ms(port);
2378
		atmel_disable_ms(port);
2379
2379
2380
	spin_unlock_irqrestore(&port->lock, flags);
2380
	uart_port_unlock_irqrestore(port, flags);
2381
}
2381
}
2382
2382
2383
static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
2383
static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
2384
{
2384
{
2385
	if (termios->c_line == N_PPS) {
2385
	if (termios->c_line == N_PPS) {
2386
		port->flags |= UPF_HARDPPS_CD;
2386
		port->flags |= UPF_HARDPPS_CD;
2387
		spin_lock_irq(&port->lock);
2387
		uart_port_lock_irq(port);
2388
		atmel_enable_ms(port);
2388
		atmel_enable_ms(port);
2389
		spin_unlock_irq(&port->lock);
2389
		uart_port_unlock_irq(port);
2390
	} else {
2390
	} else {
2391
		port->flags &= ~UPF_HARDPPS_CD;
2391
		port->flags &= ~UPF_HARDPPS_CD;
2392
		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2392
		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2393
			spin_lock_irq(&port->lock);
2393
			uart_port_lock_irq(port);
2394
			atmel_disable_ms(port);
2394
			atmel_disable_ms(port);
2395
			spin_unlock_irq(&port->lock);
2395
			uart_port_unlock_irq(port);
2396
		}
2396
		}
2397
	}
2397
	}
2398
}
2398
}
(-)a/drivers/tty/serial/bcm63xx_uart.c (-11 / +11 lines)
Lines 201-207 static void bcm_uart_break_ctl(struct uart_port *port, int ctl) Link Here
201
	unsigned long flags;
201
	unsigned long flags;
202
	unsigned int val;
202
	unsigned int val;
203
203
204
	spin_lock_irqsave(&port->lock, flags);
204
	uart_port_lock_irqsave(port, &flags);
205
205
206
	val = bcm_uart_readl(port, UART_CTL_REG);
206
	val = bcm_uart_readl(port, UART_CTL_REG);
207
	if (ctl)
207
	if (ctl)
Lines 210-216 static void bcm_uart_break_ctl(struct uart_port *port, int ctl) Link Here
210
		val &= ~UART_CTL_XMITBRK_MASK;
210
		val &= ~UART_CTL_XMITBRK_MASK;
211
	bcm_uart_writel(port, val, UART_CTL_REG);
211
	bcm_uart_writel(port, val, UART_CTL_REG);
212
212
213
	spin_unlock_irqrestore(&port->lock, flags);
213
	uart_port_unlock_irqrestore(port, flags);
214
}
214
}
215
215
216
/*
216
/*
Lines 332-338 static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id) Link Here
332
	unsigned int irqstat;
332
	unsigned int irqstat;
333
333
334
	port = dev_id;
334
	port = dev_id;
335
	spin_lock(&port->lock);
335
	uart_port_lock(port);
336
336
337
	irqstat = bcm_uart_readl(port, UART_IR_REG);
337
	irqstat = bcm_uart_readl(port, UART_IR_REG);
338
	if (irqstat & UART_RX_INT_STAT)
338
	if (irqstat & UART_RX_INT_STAT)
Lines 353-359 static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id) Link Here
353
					       estat & UART_EXTINP_DCD_MASK);
353
					       estat & UART_EXTINP_DCD_MASK);
354
	}
354
	}
355
355
356
	spin_unlock(&port->lock);
356
	uart_port_unlock(port);
357
	return IRQ_HANDLED;
357
	return IRQ_HANDLED;
358
}
358
}
359
359
Lines 451-459 static void bcm_uart_shutdown(struct uart_port *port) Link Here
451
{
451
{
452
	unsigned long flags;
452
	unsigned long flags;
453
453
454
	spin_lock_irqsave(&port->lock, flags);
454
	uart_port_lock_irqsave(port, &flags);
455
	bcm_uart_writel(port, 0, UART_IR_REG);
455
	bcm_uart_writel(port, 0, UART_IR_REG);
456
	spin_unlock_irqrestore(&port->lock, flags);
456
	uart_port_unlock_irqrestore(port, flags);
457
457
458
	bcm_uart_disable(port);
458
	bcm_uart_disable(port);
459
	bcm_uart_flush(port);
459
	bcm_uart_flush(port);
Lines 470-476 static void bcm_uart_set_termios(struct uart_port *port, struct ktermios *new, Link Here
470
	unsigned long flags;
470
	unsigned long flags;
471
	int tries;
471
	int tries;
472
472
473
	spin_lock_irqsave(&port->lock, flags);
473
	uart_port_lock_irqsave(port, &flags);
474
474
475
	/* Drain the hot tub fully before we power it off for the winter. */
475
	/* Drain the hot tub fully before we power it off for the winter. */
476
	for (tries = 3; !bcm_uart_tx_empty(port) && tries; tries--)
476
	for (tries = 3; !bcm_uart_tx_empty(port) && tries; tries--)
Lines 546-552 static void bcm_uart_set_termios(struct uart_port *port, struct ktermios *new, Link Here
546
546
547
	uart_update_timeout(port, new->c_cflag, baud);
547
	uart_update_timeout(port, new->c_cflag, baud);
548
	bcm_uart_enable(port);
548
	bcm_uart_enable(port);
549
	spin_unlock_irqrestore(&port->lock, flags);
549
	uart_port_unlock_irqrestore(port, flags);
550
}
550
}
551
551
552
/*
552
/*
Lines 712-720 static void bcm_console_write(struct console *co, const char *s, Link Here
712
		/* bcm_uart_interrupt() already took the lock */
712
		/* bcm_uart_interrupt() already took the lock */
713
		locked = 0;
713
		locked = 0;
714
	} else if (oops_in_progress) {
714
	} else if (oops_in_progress) {
715
		locked = spin_trylock(&port->lock);
715
		locked = uart_port_trylock(port);
716
	} else {
716
	} else {
717
		spin_lock(&port->lock);
717
		uart_port_lock(port);
718
		locked = 1;
718
		locked = 1;
719
	}
719
	}
720
720
Lines 725-731 static void bcm_console_write(struct console *co, const char *s, Link Here
725
	wait_for_xmitr(port);
725
	wait_for_xmitr(port);
726
726
727
	if (locked)
727
	if (locked)
728
		spin_unlock(&port->lock);
728
		uart_port_unlock(port);
729
	local_irq_restore(flags);
729
	local_irq_restore(flags);
730
}
730
}
731
731
(-)a/drivers/tty/serial/cpm_uart.c (-4 / +4 lines)
Lines 569-575 static void cpm_uart_set_termios(struct uart_port *port, Link Here
569
	if ((termios->c_cflag & CREAD) == 0)
569
	if ((termios->c_cflag & CREAD) == 0)
570
		port->read_status_mask &= ~BD_SC_EMPTY;
570
		port->read_status_mask &= ~BD_SC_EMPTY;
571
571
572
	spin_lock_irqsave(&port->lock, flags);
572
	uart_port_lock_irqsave(port, &flags);
573
573
574
	if (IS_SMC(pinfo)) {
574
	if (IS_SMC(pinfo)) {
575
		unsigned int bits = tty_get_frame_size(termios->c_cflag);
575
		unsigned int bits = tty_get_frame_size(termios->c_cflag);
Lines 609-615 static void cpm_uart_set_termios(struct uart_port *port, Link Here
609
		clk_set_rate(pinfo->clk, baud);
609
		clk_set_rate(pinfo->clk, baud);
610
	else
610
	else
611
		cpm_setbrg(pinfo->brg - 1, baud);
611
		cpm_setbrg(pinfo->brg - 1, baud);
612
	spin_unlock_irqrestore(&port->lock, flags);
612
	uart_port_unlock_irqrestore(port, flags);
613
}
613
}
614
614
615
static const char *cpm_uart_type(struct uart_port *port)
615
static const char *cpm_uart_type(struct uart_port *port)
Lines 1386-1394 static void cpm_uart_console_write(struct console *co, const char *s, Link Here
1386
		cpm_uart_early_write(pinfo, s, count, true);
1386
		cpm_uart_early_write(pinfo, s, count, true);
1387
		local_irq_restore(flags);
1387
		local_irq_restore(flags);
1388
	} else {
1388
	} else {
1389
		spin_lock_irqsave(&pinfo->port.lock, flags);
1389
		uart_port_lock_irqsave(&pinfo->port, &flags);
1390
		cpm_uart_early_write(pinfo, s, count, true);
1390
		cpm_uart_early_write(pinfo, s, count, true);
1391
		spin_unlock_irqrestore(&pinfo->port.lock, flags);
1391
		uart_port_unlock_irqrestore(&pinfo->port, flags);
1392
	}
1392
	}
1393
}
1393
}
1394
1394
(-)a/drivers/tty/serial/digicolor-usart.c (-9 / +9 lines)
Lines 133-139 static void digicolor_uart_rx(struct uart_port *port) Link Here
133
{
133
{
134
	unsigned long flags;
134
	unsigned long flags;
135
135
136
	spin_lock_irqsave(&port->lock, flags);
136
	uart_port_lock_irqsave(port, &flags);
137
137
138
	while (1) {
138
	while (1) {
139
		u8 status, ch, ch_flag;
139
		u8 status, ch, ch_flag;
Lines 172-178 static void digicolor_uart_rx(struct uart_port *port) Link Here
172
				 ch_flag);
172
				 ch_flag);
173
	}
173
	}
174
174
175
	spin_unlock_irqrestore(&port->lock, flags);
175
	uart_port_unlock_irqrestore(port, flags);
176
176
177
	tty_flip_buffer_push(&port->state->port);
177
	tty_flip_buffer_push(&port->state->port);
178
}
178
}
Lines 185-191 static void digicolor_uart_tx(struct uart_port *port) Link Here
185
	if (digicolor_uart_tx_full(port))
185
	if (digicolor_uart_tx_full(port))
186
		return;
186
		return;
187
187
188
	spin_lock_irqsave(&port->lock, flags);
188
	uart_port_lock_irqsave(port, &flags);
189
189
190
	if (port->x_char) {
190
	if (port->x_char) {
191
		writeb_relaxed(port->x_char, port->membase + UA_EMI_REC);
191
		writeb_relaxed(port->x_char, port->membase + UA_EMI_REC);
Lines 211-217 static void digicolor_uart_tx(struct uart_port *port) Link Here
211
		uart_write_wakeup(port);
211
		uart_write_wakeup(port);
212
212
213
out:
213
out:
214
	spin_unlock_irqrestore(&port->lock, flags);
214
	uart_port_unlock_irqrestore(port, flags);
215
}
215
}
216
216
217
static irqreturn_t digicolor_uart_int(int irq, void *dev_id)
217
static irqreturn_t digicolor_uart_int(int irq, void *dev_id)
Lines 333-339 static void digicolor_uart_set_termios(struct uart_port *port, Link Here
333
		port->ignore_status_mask |= UA_STATUS_OVERRUN_ERR
333
		port->ignore_status_mask |= UA_STATUS_OVERRUN_ERR
334
			| UA_STATUS_PARITY_ERR | UA_STATUS_FRAME_ERR;
334
			| UA_STATUS_PARITY_ERR | UA_STATUS_FRAME_ERR;
335
335
336
	spin_lock_irqsave(&port->lock, flags);
336
	uart_port_lock_irqsave(port, &flags);
337
337
338
	uart_update_timeout(port, termios->c_cflag, baud);
338
	uart_update_timeout(port, termios->c_cflag, baud);
339
339
Lines 341-347 static void digicolor_uart_set_termios(struct uart_port *port, Link Here
341
	writeb_relaxed(divisor & 0xff, port->membase + UA_HBAUD_LO);
341
	writeb_relaxed(divisor & 0xff, port->membase + UA_HBAUD_LO);
342
	writeb_relaxed(divisor >> 8, port->membase + UA_HBAUD_HI);
342
	writeb_relaxed(divisor >> 8, port->membase + UA_HBAUD_HI);
343
343
344
	spin_unlock_irqrestore(&port->lock, flags);
344
	uart_port_unlock_irqrestore(port, flags);
345
}
345
}
346
346
347
static const char *digicolor_uart_type(struct uart_port *port)
347
static const char *digicolor_uart_type(struct uart_port *port)
Lines 398-411 static void digicolor_uart_console_write(struct console *co, const char *c, Link Here
398
	int locked = 1;
398
	int locked = 1;
399
399
400
	if (oops_in_progress)
400
	if (oops_in_progress)
401
		locked = spin_trylock_irqsave(&port->lock, flags);
401
		locked = uart_port_trylock_irqsave(port, &flags);
402
	else
402
	else
403
		spin_lock_irqsave(&port->lock, flags);
403
		uart_port_lock_irqsave(port, &flags);
404
404
405
	uart_console_write(port, c, n, digicolor_uart_console_putchar);
405
	uart_console_write(port, c, n, digicolor_uart_console_putchar);
406
406
407
	if (locked)
407
	if (locked)
408
		spin_unlock_irqrestore(&port->lock, flags);
408
		uart_port_unlock_irqrestore(port, flags);
409
409
410
	/* Wait for transmitter to become empty */
410
	/* Wait for transmitter to become empty */
411
	do {
411
	do {
(-)a/drivers/tty/serial/dz.c (-16 / +16 lines)
Lines 268-276 static inline void dz_transmit_chars(struct dz_mux *mux) Link Here
268
	}
268
	}
269
	/* If nothing to do or stopped or hardware stopped. */
269
	/* If nothing to do or stopped or hardware stopped. */
270
	if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) {
270
	if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) {
271
		spin_lock(&dport->port.lock);
271
		uart_port_lock(&dport->port);
272
		dz_stop_tx(&dport->port);
272
		dz_stop_tx(&dport->port);
273
		spin_unlock(&dport->port.lock);
273
		uart_port_unlock(&dport->port);
274
		return;
274
		return;
275
	}
275
	}
276
276
Lines 287-295 static inline void dz_transmit_chars(struct dz_mux *mux) Link Here
287
287
288
	/* Are we are done. */
288
	/* Are we are done. */
289
	if (uart_circ_empty(xmit)) {
289
	if (uart_circ_empty(xmit)) {
290
		spin_lock(&dport->port.lock);
290
		uart_port_lock(&dport->port);
291
		dz_stop_tx(&dport->port);
291
		dz_stop_tx(&dport->port);
292
		spin_unlock(&dport->port.lock);
292
		uart_port_unlock(&dport->port);
293
	}
293
	}
294
}
294
}
295
295
Lines 415-428 static int dz_startup(struct uart_port *uport) Link Here
415
		return ret;
415
		return ret;
416
	}
416
	}
417
417
418
	spin_lock_irqsave(&dport->port.lock, flags);
418
	uart_port_lock_irqsave(&dport->port, &flags);
419
419
420
	/* Enable interrupts.  */
420
	/* Enable interrupts.  */
421
	tmp = dz_in(dport, DZ_CSR);
421
	tmp = dz_in(dport, DZ_CSR);
422
	tmp |= DZ_RIE | DZ_TIE;
422
	tmp |= DZ_RIE | DZ_TIE;
423
	dz_out(dport, DZ_CSR, tmp);
423
	dz_out(dport, DZ_CSR, tmp);
424
424
425
	spin_unlock_irqrestore(&dport->port.lock, flags);
425
	uart_port_unlock_irqrestore(&dport->port, flags);
426
426
427
	return 0;
427
	return 0;
428
}
428
}
Lines 443-451 static void dz_shutdown(struct uart_port *uport) Link Here
443
	int irq_guard;
443
	int irq_guard;
444
	u16 tmp;
444
	u16 tmp;
445
445
446
	spin_lock_irqsave(&dport->port.lock, flags);
446
	uart_port_lock_irqsave(&dport->port, &flags);
447
	dz_stop_tx(&dport->port);
447
	dz_stop_tx(&dport->port);
448
	spin_unlock_irqrestore(&dport->port.lock, flags);
448
	uart_port_unlock_irqrestore(&dport->port, flags);
449
449
450
	irq_guard = atomic_add_return(-1, &mux->irq_guard);
450
	irq_guard = atomic_add_return(-1, &mux->irq_guard);
451
	if (!irq_guard) {
451
	if (!irq_guard) {
Lines 491-504 static void dz_break_ctl(struct uart_port *uport, int break_state) Link Here
491
	unsigned long flags;
491
	unsigned long flags;
492
	unsigned short tmp, mask = 1 << dport->port.line;
492
	unsigned short tmp, mask = 1 << dport->port.line;
493
493
494
	spin_lock_irqsave(&uport->lock, flags);
494
	uart_port_lock_irqsave(uport, &flags);
495
	tmp = dz_in(dport, DZ_TCR);
495
	tmp = dz_in(dport, DZ_TCR);
496
	if (break_state)
496
	if (break_state)
497
		tmp |= mask;
497
		tmp |= mask;
498
	else
498
	else
499
		tmp &= ~mask;
499
		tmp &= ~mask;
500
	dz_out(dport, DZ_TCR, tmp);
500
	dz_out(dport, DZ_TCR, tmp);
501
	spin_unlock_irqrestore(&uport->lock, flags);
501
	uart_port_unlock_irqrestore(uport, flags);
502
}
502
}
503
503
504
static int dz_encode_baud_rate(unsigned int baud)
504
static int dz_encode_baud_rate(unsigned int baud)
Lines 608-614 static void dz_set_termios(struct uart_port *uport, struct ktermios *termios, Link Here
608
	if (termios->c_cflag & CREAD)
608
	if (termios->c_cflag & CREAD)
609
		cflag |= DZ_RXENAB;
609
		cflag |= DZ_RXENAB;
610
610
611
	spin_lock_irqsave(&dport->port.lock, flags);
611
	uart_port_lock_irqsave(&dport->port, &flags);
612
612
613
	uart_update_timeout(uport, termios->c_cflag, baud);
613
	uart_update_timeout(uport, termios->c_cflag, baud);
614
614
Lines 631-637 static void dz_set_termios(struct uart_port *uport, struct ktermios *termios, Link Here
631
	if (termios->c_iflag & IGNBRK)
631
	if (termios->c_iflag & IGNBRK)
632
		dport->port.ignore_status_mask |= DZ_BREAK;
632
		dport->port.ignore_status_mask |= DZ_BREAK;
633
633
634
	spin_unlock_irqrestore(&dport->port.lock, flags);
634
	uart_port_unlock_irqrestore(&dport->port, flags);
635
}
635
}
636
636
637
/*
637
/*
Lines 645-656 static void dz_pm(struct uart_port *uport, unsigned int state, Link Here
645
	struct dz_port *dport = to_dport(uport);
645
	struct dz_port *dport = to_dport(uport);
646
	unsigned long flags;
646
	unsigned long flags;
647
647
648
	spin_lock_irqsave(&dport->port.lock, flags);
648
	uart_port_lock_irqsave(&dport->port, &flags);
649
	if (state < 3)
649
	if (state < 3)
650
		dz_start_tx(&dport->port);
650
		dz_start_tx(&dport->port);
651
	else
651
	else
652
		dz_stop_tx(&dport->port);
652
		dz_stop_tx(&dport->port);
653
	spin_unlock_irqrestore(&dport->port.lock, flags);
653
	uart_port_unlock_irqrestore(&dport->port, flags);
654
}
654
}
655
655
656
656
Lines 811-817 static void dz_console_putchar(struct uart_port *uport, unsigned char ch) Link Here
811
	unsigned short csr, tcr, trdy, mask;
811
	unsigned short csr, tcr, trdy, mask;
812
	int loops = 10000;
812
	int loops = 10000;
813
813
814
	spin_lock_irqsave(&dport->port.lock, flags);
814
	uart_port_lock_irqsave(&dport->port, &flags);
815
	csr = dz_in(dport, DZ_CSR);
815
	csr = dz_in(dport, DZ_CSR);
816
	dz_out(dport, DZ_CSR, csr & ~DZ_TIE);
816
	dz_out(dport, DZ_CSR, csr & ~DZ_TIE);
817
	tcr = dz_in(dport, DZ_TCR);
817
	tcr = dz_in(dport, DZ_TCR);
Lines 819-825 static void dz_console_putchar(struct uart_port *uport, unsigned char ch) Link Here
819
	mask = tcr;
819
	mask = tcr;
820
	dz_out(dport, DZ_TCR, mask);
820
	dz_out(dport, DZ_TCR, mask);
821
	iob();
821
	iob();
822
	spin_unlock_irqrestore(&dport->port.lock, flags);
822
	uart_port_unlock_irqrestore(&dport->port, flags);
823
823
824
	do {
824
	do {
825
		trdy = dz_in(dport, DZ_CSR);
825
		trdy = dz_in(dport, DZ_CSR);
(-)a/drivers/tty/serial/fsl_linflexuart.c (-13 / +13 lines)
Lines 203-209 static irqreturn_t linflex_txint(int irq, void *dev_id) Link Here
203
	struct circ_buf *xmit = &sport->state->xmit;
203
	struct circ_buf *xmit = &sport->state->xmit;
204
	unsigned long flags;
204
	unsigned long flags;
205
205
206
	spin_lock_irqsave(&sport->lock, flags);
206
	uart_port_lock_irqsave(sport, &flags);
207
207
208
	if (sport->x_char) {
208
	if (sport->x_char) {
209
		linflex_put_char(sport, sport->x_char);
209
		linflex_put_char(sport, sport->x_char);
Lines 217-223 static irqreturn_t linflex_txint(int irq, void *dev_id) Link Here
217
217
218
	linflex_transmit_buffer(sport);
218
	linflex_transmit_buffer(sport);
219
out:
219
out:
220
	spin_unlock_irqrestore(&sport->lock, flags);
220
	uart_port_unlock_irqrestore(sport, flags);
221
	return IRQ_HANDLED;
221
	return IRQ_HANDLED;
222
}
222
}
223
223
Lines 230-236 static irqreturn_t linflex_rxint(int irq, void *dev_id) Link Here
230
	unsigned char rx;
230
	unsigned char rx;
231
	bool brk;
231
	bool brk;
232
232
233
	spin_lock_irqsave(&sport->lock, flags);
233
	uart_port_lock_irqsave(sport, &flags);
234
234
235
	status = readl(sport->membase + UARTSR);
235
	status = readl(sport->membase + UARTSR);
236
	while (status & LINFLEXD_UARTSR_RMB) {
236
	while (status & LINFLEXD_UARTSR_RMB) {
Lines 266-272 static irqreturn_t linflex_rxint(int irq, void *dev_id) Link Here
266
		}
266
		}
267
	}
267
	}
268
268
269
	spin_unlock_irqrestore(&sport->lock, flags);
269
	uart_port_unlock_irqrestore(sport, flags);
270
270
271
	tty_flip_buffer_push(port);
271
	tty_flip_buffer_push(port);
272
272
Lines 369-379 static int linflex_startup(struct uart_port *port) Link Here
369
	int ret = 0;
369
	int ret = 0;
370
	unsigned long flags;
370
	unsigned long flags;
371
371
372
	spin_lock_irqsave(&port->lock, flags);
372
	uart_port_lock_irqsave(port, &flags);
373
373
374
	linflex_setup_watermark(port);
374
	linflex_setup_watermark(port);
375
375
376
	spin_unlock_irqrestore(&port->lock, flags);
376
	uart_port_unlock_irqrestore(port, flags);
377
377
378
	ret = devm_request_irq(port->dev, port->irq, linflex_int, 0,
378
	ret = devm_request_irq(port->dev, port->irq, linflex_int, 0,
379
			       DRIVER_NAME, port);
379
			       DRIVER_NAME, port);
Lines 386-399 static void linflex_shutdown(struct uart_port *port) Link Here
386
	unsigned long ier;
386
	unsigned long ier;
387
	unsigned long flags;
387
	unsigned long flags;
388
388
389
	spin_lock_irqsave(&port->lock, flags);
389
	uart_port_lock_irqsave(port, &flags);
390
390
391
	/* disable interrupts */
391
	/* disable interrupts */
392
	ier = readl(port->membase + LINIER);
392
	ier = readl(port->membase + LINIER);
393
	ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE);
393
	ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE);
394
	writel(ier, port->membase + LINIER);
394
	writel(ier, port->membase + LINIER);
395
395
396
	spin_unlock_irqrestore(&port->lock, flags);
396
	uart_port_unlock_irqrestore(port, flags);
397
397
398
	devm_free_irq(port->dev, port->irq, port);
398
	devm_free_irq(port->dev, port->irq, port);
399
}
399
}
Lines 474-480 linflex_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
474
		cr &= ~LINFLEXD_UARTCR_PCE;
474
		cr &= ~LINFLEXD_UARTCR_PCE;
475
	}
475
	}
476
476
477
	spin_lock_irqsave(&port->lock, flags);
477
	uart_port_lock_irqsave(port, &flags);
478
478
479
	port->read_status_mask = 0;
479
	port->read_status_mask = 0;
480
480
Lines 507-513 linflex_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
507
507
508
	writel(cr1, port->membase + LINCR1);
508
	writel(cr1, port->membase + LINCR1);
509
509
510
	spin_unlock_irqrestore(&port->lock, flags);
510
	uart_port_unlock_irqrestore(port, flags);
511
}
511
}
512
512
513
static const char *linflex_type(struct uart_port *port)
513
static const char *linflex_type(struct uart_port *port)
Lines 646-659 linflex_console_write(struct console *co, const char *s, unsigned int count) Link Here
646
	if (sport->sysrq)
646
	if (sport->sysrq)
647
		locked = 0;
647
		locked = 0;
648
	else if (oops_in_progress)
648
	else if (oops_in_progress)
649
		locked = spin_trylock_irqsave(&sport->lock, flags);
649
		locked = uart_port_trylock_irqsave(sport, &flags);
650
	else
650
	else
651
		spin_lock_irqsave(&sport->lock, flags);
651
		uart_port_lock_irqsave(sport, &flags);
652
652
653
	linflex_string_write(sport, s, count);
653
	linflex_string_write(sport, s, count);
654
654
655
	if (locked)
655
	if (locked)
656
		spin_unlock_irqrestore(&sport->lock, flags);
656
		uart_port_unlock_irqrestore(sport, flags);
657
}
657
}
658
658
659
/*
659
/*
(-)a/drivers/tty/serial/fsl_lpuart.c (-44 / +44 lines)
Lines 532-540 static void lpuart_dma_tx_complete(void *arg) Link Here
532
	struct dma_chan *chan = sport->dma_tx_chan;
532
	struct dma_chan *chan = sport->dma_tx_chan;
533
	unsigned long flags;
533
	unsigned long flags;
534
534
535
	spin_lock_irqsave(&sport->port.lock, flags);
535
	uart_port_lock_irqsave(&sport->port, &flags);
536
	if (!sport->dma_tx_in_progress) {
536
	if (!sport->dma_tx_in_progress) {
537
		spin_unlock_irqrestore(&sport->port.lock, flags);
537
		uart_port_unlock_irqrestore(&sport->port, flags);
538
		return;
538
		return;
539
	}
539
	}
540
540
Lines 543-549 static void lpuart_dma_tx_complete(void *arg) Link Here
543
543
544
	uart_xmit_advance(&sport->port, sport->dma_tx_bytes);
544
	uart_xmit_advance(&sport->port, sport->dma_tx_bytes);
545
	sport->dma_tx_in_progress = false;
545
	sport->dma_tx_in_progress = false;
546
	spin_unlock_irqrestore(&sport->port.lock, flags);
546
	uart_port_unlock_irqrestore(&sport->port, flags);
547
547
548
	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
548
	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
549
		uart_write_wakeup(&sport->port);
549
		uart_write_wakeup(&sport->port);
Lines 553-564 static void lpuart_dma_tx_complete(void *arg) Link Here
553
		return;
553
		return;
554
	}
554
	}
555
555
556
	spin_lock_irqsave(&sport->port.lock, flags);
556
	uart_port_lock_irqsave(&sport->port, &flags);
557
557
558
	if (!lpuart_stopped_or_empty(&sport->port))
558
	if (!lpuart_stopped_or_empty(&sport->port))
559
		lpuart_dma_tx(sport);
559
		lpuart_dma_tx(sport);
560
560
561
	spin_unlock_irqrestore(&sport->port.lock, flags);
561
	uart_port_unlock_irqrestore(&sport->port, flags);
562
}
562
}
563
563
564
static dma_addr_t lpuart_dma_datareg_addr(struct lpuart_port *sport)
564
static dma_addr_t lpuart_dma_datareg_addr(struct lpuart_port *sport)
Lines 651-657 static int lpuart_poll_init(struct uart_port *port) Link Here
651
651
652
	sport->port.fifosize = 0;
652
	sport->port.fifosize = 0;
653
653
654
	spin_lock_irqsave(&sport->port.lock, flags);
654
	uart_port_lock_irqsave(&sport->port, &flags);
655
	/* Disable Rx & Tx */
655
	/* Disable Rx & Tx */
656
	writeb(0, sport->port.membase + UARTCR2);
656
	writeb(0, sport->port.membase + UARTCR2);
657
657
Lines 675-681 static int lpuart_poll_init(struct uart_port *port) Link Here
675
675
676
	/* Enable Rx and Tx */
676
	/* Enable Rx and Tx */
677
	writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2);
677
	writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2);
678
	spin_unlock_irqrestore(&sport->port.lock, flags);
678
	uart_port_unlock_irqrestore(&sport->port, flags);
679
679
680
	return 0;
680
	return 0;
681
}
681
}
Lines 703-709 static int lpuart32_poll_init(struct uart_port *port) Link Here
703
703
704
	sport->port.fifosize = 0;
704
	sport->port.fifosize = 0;
705
705
706
	spin_lock_irqsave(&sport->port.lock, flags);
706
	uart_port_lock_irqsave(&sport->port, &flags);
707
707
708
	/* Disable Rx & Tx */
708
	/* Disable Rx & Tx */
709
	lpuart32_write(&sport->port, 0, UARTCTRL);
709
	lpuart32_write(&sport->port, 0, UARTCTRL);
Lines 724-730 static int lpuart32_poll_init(struct uart_port *port) Link Here
724
724
725
	/* Enable Rx and Tx */
725
	/* Enable Rx and Tx */
726
	lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
726
	lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
727
	spin_unlock_irqrestore(&sport->port.lock, flags);
727
	uart_port_unlock_irqrestore(&sport->port, flags);
728
728
729
	return 0;
729
	return 0;
730
}
730
}
Lines 879-887 static unsigned int lpuart32_tx_empty(struct uart_port *port) Link Here
879
879
880
static void lpuart_txint(struct lpuart_port *sport)
880
static void lpuart_txint(struct lpuart_port *sport)
881
{
881
{
882
	spin_lock(&sport->port.lock);
882
	uart_port_lock(&sport->port);
883
	lpuart_transmit_buffer(sport);
883
	lpuart_transmit_buffer(sport);
884
	spin_unlock(&sport->port.lock);
884
	uart_port_unlock(&sport->port);
885
}
885
}
886
886
887
static void lpuart_rxint(struct lpuart_port *sport)
887
static void lpuart_rxint(struct lpuart_port *sport)
Lines 890-896 static void lpuart_rxint(struct lpuart_port *sport) Link Here
890
	struct tty_port *port = &sport->port.state->port;
890
	struct tty_port *port = &sport->port.state->port;
891
	unsigned char rx, sr;
891
	unsigned char rx, sr;
892
892
893
	spin_lock(&sport->port.lock);
893
	uart_port_lock(&sport->port);
894
894
895
	while (!(readb(sport->port.membase + UARTSFIFO) & UARTSFIFO_RXEMPT)) {
895
	while (!(readb(sport->port.membase + UARTSFIFO) & UARTSFIFO_RXEMPT)) {
896
		flg = TTY_NORMAL;
896
		flg = TTY_NORMAL;
Lines 956-964 static void lpuart_rxint(struct lpuart_port *sport) Link Here
956
956
957
static void lpuart32_txint(struct lpuart_port *sport)
957
static void lpuart32_txint(struct lpuart_port *sport)
958
{
958
{
959
	spin_lock(&sport->port.lock);
959
	uart_port_lock(&sport->port);
960
	lpuart32_transmit_buffer(sport);
960
	lpuart32_transmit_buffer(sport);
961
	spin_unlock(&sport->port.lock);
961
	uart_port_unlock(&sport->port);
962
}
962
}
963
963
964
static void lpuart32_rxint(struct lpuart_port *sport)
964
static void lpuart32_rxint(struct lpuart_port *sport)
Lines 968-974 static void lpuart32_rxint(struct lpuart_port *sport) Link Here
968
	unsigned long rx, sr;
968
	unsigned long rx, sr;
969
	bool is_break;
969
	bool is_break;
970
970
971
	spin_lock(&sport->port.lock);
971
	uart_port_lock(&sport->port);
972
972
973
	while (!(lpuart32_read(&sport->port, UARTFIFO) & UARTFIFO_RXEMPT)) {
973
	while (!(lpuart32_read(&sport->port, UARTFIFO) & UARTFIFO_RXEMPT)) {
974
		flg = TTY_NORMAL;
974
		flg = TTY_NORMAL;
Lines 1170-1181 static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) Link Here
1170
1170
1171
	async_tx_ack(sport->dma_rx_desc);
1171
	async_tx_ack(sport->dma_rx_desc);
1172
1172
1173
	spin_lock_irqsave(&sport->port.lock, flags);
1173
	uart_port_lock_irqsave(&sport->port, &flags);
1174
1174
1175
	dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state);
1175
	dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state);
1176
	if (dmastat == DMA_ERROR) {
1176
	if (dmastat == DMA_ERROR) {
1177
		dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
1177
		dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
1178
		spin_unlock_irqrestore(&sport->port.lock, flags);
1178
		uart_port_unlock_irqrestore(&sport->port, flags);
1179
		return;
1179
		return;
1180
	}
1180
	}
1181
1181
Lines 1244-1250 static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) Link Here
1244
	dma_sync_sg_for_device(chan->device->dev, &sport->rx_sgl, 1,
1244
	dma_sync_sg_for_device(chan->device->dev, &sport->rx_sgl, 1,
1245
			       DMA_FROM_DEVICE);
1245
			       DMA_FROM_DEVICE);
1246
1246
1247
	spin_unlock_irqrestore(&sport->port.lock, flags);
1247
	uart_port_unlock_irqrestore(&sport->port, flags);
1248
1248
1249
	tty_flip_buffer_push(port);
1249
	tty_flip_buffer_push(port);
1250
	if (!sport->dma_idle_int)
1250
	if (!sport->dma_idle_int)
Lines 1335-1343 static void lpuart_timer_func(struct timer_list *t) Link Here
1335
		mod_timer(&sport->lpuart_timer,
1335
		mod_timer(&sport->lpuart_timer,
1336
			  jiffies + sport->dma_rx_timeout);
1336
			  jiffies + sport->dma_rx_timeout);
1337
1337
1338
	if (spin_trylock_irqsave(&sport->port.lock, flags)) {
1338
	if (uart_port_trylock_irqsave(&sport->port, &flags)) {
1339
		sport->last_residue = state.residue;
1339
		sport->last_residue = state.residue;
1340
		spin_unlock_irqrestore(&sport->port.lock, flags);
1340
		uart_port_unlock_irqrestore(&sport->port, flags);
1341
	}
1341
	}
1342
}
1342
}
1343
1343
Lines 1802-1815 static void lpuart_hw_setup(struct lpuart_port *sport) Link Here
1802
{
1802
{
1803
	unsigned long flags;
1803
	unsigned long flags;
1804
1804
1805
	spin_lock_irqsave(&sport->port.lock, flags);
1805
	uart_port_lock_irqsave(&sport->port, &flags);
1806
1806
1807
	lpuart_setup_watermark_enable(sport);
1807
	lpuart_setup_watermark_enable(sport);
1808
1808
1809
	lpuart_rx_dma_startup(sport);
1809
	lpuart_rx_dma_startup(sport);
1810
	lpuart_tx_dma_startup(sport);
1810
	lpuart_tx_dma_startup(sport);
1811
1811
1812
	spin_unlock_irqrestore(&sport->port.lock, flags);
1812
	uart_port_unlock_irqrestore(&sport->port, flags);
1813
}
1813
}
1814
1814
1815
static int lpuart_startup(struct uart_port *port)
1815
static int lpuart_startup(struct uart_port *port)
Lines 1859-1865 static void lpuart32_hw_setup(struct lpuart_port *sport) Link Here
1859
{
1859
{
1860
	unsigned long flags;
1860
	unsigned long flags;
1861
1861
1862
	spin_lock_irqsave(&sport->port.lock, flags);
1862
	uart_port_lock_irqsave(&sport->port, &flags);
1863
1863
1864
	lpuart32_hw_disable(sport);
1864
	lpuart32_hw_disable(sport);
1865
1865
Lines 1869-1875 static void lpuart32_hw_setup(struct lpuart_port *sport) Link Here
1869
	lpuart32_setup_watermark_enable(sport);
1869
	lpuart32_setup_watermark_enable(sport);
1870
	lpuart32_configure(sport);
1870
	lpuart32_configure(sport);
1871
1871
1872
	spin_unlock_irqrestore(&sport->port.lock, flags);
1872
	uart_port_unlock_irqrestore(&sport->port, flags);
1873
}
1873
}
1874
1874
1875
static int lpuart32_startup(struct uart_port *port)
1875
static int lpuart32_startup(struct uart_port *port)
Lines 1932-1938 static void lpuart_shutdown(struct uart_port *port) Link Here
1932
	unsigned char temp;
1932
	unsigned char temp;
1933
	unsigned long flags;
1933
	unsigned long flags;
1934
1934
1935
	spin_lock_irqsave(&port->lock, flags);
1935
	uart_port_lock_irqsave(port, &flags);
1936
1936
1937
	/* disable Rx/Tx and interrupts */
1937
	/* disable Rx/Tx and interrupts */
1938
	temp = readb(port->membase + UARTCR2);
1938
	temp = readb(port->membase + UARTCR2);
Lines 1940-1946 static void lpuart_shutdown(struct uart_port *port) Link Here
1940
			UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
1940
			UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
1941
	writeb(temp, port->membase + UARTCR2);
1941
	writeb(temp, port->membase + UARTCR2);
1942
1942
1943
	spin_unlock_irqrestore(&port->lock, flags);
1943
	uart_port_unlock_irqrestore(port, flags);
1944
1944
1945
	lpuart_dma_shutdown(sport);
1945
	lpuart_dma_shutdown(sport);
1946
}
1946
}
Lines 1952-1958 static void lpuart32_shutdown(struct uart_port *port) Link Here
1952
	unsigned long temp;
1952
	unsigned long temp;
1953
	unsigned long flags;
1953
	unsigned long flags;
1954
1954
1955
	spin_lock_irqsave(&port->lock, flags);
1955
	uart_port_lock_irqsave(port, &flags);
1956
1956
1957
	/* clear status */
1957
	/* clear status */
1958
	temp = lpuart32_read(&sport->port, UARTSTAT);
1958
	temp = lpuart32_read(&sport->port, UARTSTAT);
Lines 1969-1975 static void lpuart32_shutdown(struct uart_port *port) Link Here
1969
			UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE | UARTCTRL_SBK);
1969
			UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE | UARTCTRL_SBK);
1970
	lpuart32_write(port, temp, UARTCTRL);
1970
	lpuart32_write(port, temp, UARTCTRL);
1971
1971
1972
	spin_unlock_irqrestore(&port->lock, flags);
1972
	uart_port_unlock_irqrestore(port, flags);
1973
1973
1974
	lpuart_dma_shutdown(sport);
1974
	lpuart_dma_shutdown(sport);
1975
}
1975
}
Lines 2069-2075 lpuart_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
2069
	if (old && sport->lpuart_dma_rx_use)
2069
	if (old && sport->lpuart_dma_rx_use)
2070
		lpuart_dma_rx_free(&sport->port);
2070
		lpuart_dma_rx_free(&sport->port);
2071
2071
2072
	spin_lock_irqsave(&sport->port.lock, flags);
2072
	uart_port_lock_irqsave(&sport->port, &flags);
2073
2073
2074
	sport->port.read_status_mask = 0;
2074
	sport->port.read_status_mask = 0;
2075
	if (termios->c_iflag & INPCK)
2075
	if (termios->c_iflag & INPCK)
Lines 2124-2130 lpuart_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
2124
			sport->lpuart_dma_rx_use = false;
2124
			sport->lpuart_dma_rx_use = false;
2125
	}
2125
	}
2126
2126
2127
	spin_unlock_irqrestore(&sport->port.lock, flags);
2127
	uart_port_unlock_irqrestore(&sport->port, flags);
2128
}
2128
}
2129
2129
2130
static void __lpuart32_serial_setbrg(struct uart_port *port,
2130
static void __lpuart32_serial_setbrg(struct uart_port *port,
Lines 2304-2310 lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
2304
	if (old && sport->lpuart_dma_rx_use)
2304
	if (old && sport->lpuart_dma_rx_use)
2305
		lpuart_dma_rx_free(&sport->port);
2305
		lpuart_dma_rx_free(&sport->port);
2306
2306
2307
	spin_lock_irqsave(&sport->port.lock, flags);
2307
	uart_port_lock_irqsave(&sport->port, &flags);
2308
2308
2309
	sport->port.read_status_mask = 0;
2309
	sport->port.read_status_mask = 0;
2310
	if (termios->c_iflag & INPCK)
2310
	if (termios->c_iflag & INPCK)
Lines 2359-2365 lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
2359
			sport->lpuart_dma_rx_use = false;
2359
			sport->lpuart_dma_rx_use = false;
2360
	}
2360
	}
2361
2361
2362
	spin_unlock_irqrestore(&sport->port.lock, flags);
2362
	uart_port_unlock_irqrestore(&sport->port, flags);
2363
}
2363
}
2364
2364
2365
static const char *lpuart_type(struct uart_port *port)
2365
static const char *lpuart_type(struct uart_port *port)
Lines 2477-2485 lpuart_console_write(struct console *co, const char *s, unsigned int count) Link Here
2477
	int locked = 1;
2477
	int locked = 1;
2478
2478
2479
	if (oops_in_progress)
2479
	if (oops_in_progress)
2480
		locked = spin_trylock_irqsave(&sport->port.lock, flags);
2480
		locked = uart_port_trylock_irqsave(&sport->port, &flags);
2481
	else
2481
	else
2482
		spin_lock_irqsave(&sport->port.lock, flags);
2482
		uart_port_lock_irqsave(&sport->port, &flags);
2483
2483
2484
	/* first save CR2 and then disable interrupts */
2484
	/* first save CR2 and then disable interrupts */
2485
	cr2 = old_cr2 = readb(sport->port.membase + UARTCR2);
2485
	cr2 = old_cr2 = readb(sport->port.membase + UARTCR2);
Lines 2495-2501 lpuart_console_write(struct console *co, const char *s, unsigned int count) Link Here
2495
	writeb(old_cr2, sport->port.membase + UARTCR2);
2495
	writeb(old_cr2, sport->port.membase + UARTCR2);
2496
2496
2497
	if (locked)
2497
	if (locked)
2498
		spin_unlock_irqrestore(&sport->port.lock, flags);
2498
		uart_port_unlock_irqrestore(&sport->port, flags);
2499
}
2499
}
2500
2500
2501
static void
2501
static void
Lines 2507-2515 lpuart32_console_write(struct console *co, const char *s, unsigned int count) Link Here
2507
	int locked = 1;
2507
	int locked = 1;
2508
2508
2509
	if (oops_in_progress)
2509
	if (oops_in_progress)
2510
		locked = spin_trylock_irqsave(&sport->port.lock, flags);
2510
		locked = uart_port_trylock_irqsave(&sport->port, &flags);
2511
	else
2511
	else
2512
		spin_lock_irqsave(&sport->port.lock, flags);
2512
		uart_port_lock_irqsave(&sport->port, &flags);
2513
2513
2514
	/* first save CR2 and then disable interrupts */
2514
	/* first save CR2 and then disable interrupts */
2515
	cr = old_cr = lpuart32_read(&sport->port, UARTCTRL);
2515
	cr = old_cr = lpuart32_read(&sport->port, UARTCTRL);
Lines 2525-2531 lpuart32_console_write(struct console *co, const char *s, unsigned int count) Link Here
2525
	lpuart32_write(&sport->port, old_cr, UARTCTRL);
2525
	lpuart32_write(&sport->port, old_cr, UARTCTRL);
2526
2526
2527
	if (locked)
2527
	if (locked)
2528
		spin_unlock_irqrestore(&sport->port.lock, flags);
2528
		uart_port_unlock_irqrestore(&sport->port, flags);
2529
}
2529
}
2530
2530
2531
/*
2531
/*
Lines 3089-3095 static int lpuart_suspend(struct device *dev) Link Here
3089
	uart_suspend_port(&lpuart_reg, &sport->port);
3089
	uart_suspend_port(&lpuart_reg, &sport->port);
3090
3090
3091
	if (lpuart_uport_is_active(sport)) {
3091
	if (lpuart_uport_is_active(sport)) {
3092
		spin_lock_irqsave(&sport->port.lock, flags);
3092
		uart_port_lock_irqsave(&sport->port, &flags);
3093
		if (lpuart_is_32(sport)) {
3093
		if (lpuart_is_32(sport)) {
3094
			/* disable Rx/Tx and interrupts */
3094
			/* disable Rx/Tx and interrupts */
3095
			temp = lpuart32_read(&sport->port, UARTCTRL);
3095
			temp = lpuart32_read(&sport->port, UARTCTRL);
Lines 3101-3107 static int lpuart_suspend(struct device *dev) Link Here
3101
			temp &= ~(UARTCR2_TE | UARTCR2_TIE | UARTCR2_TCIE);
3101
			temp &= ~(UARTCR2_TE | UARTCR2_TIE | UARTCR2_TCIE);
3102
			writeb(temp, sport->port.membase + UARTCR2);
3102
			writeb(temp, sport->port.membase + UARTCR2);
3103
		}
3103
		}
3104
		spin_unlock_irqrestore(&sport->port.lock, flags);
3104
		uart_port_unlock_irqrestore(&sport->port, flags);
3105
3105
3106
		if (sport->lpuart_dma_rx_use) {
3106
		if (sport->lpuart_dma_rx_use) {
3107
			/*
3107
			/*
Lines 3114-3120 static int lpuart_suspend(struct device *dev) Link Here
3114
			lpuart_dma_rx_free(&sport->port);
3114
			lpuart_dma_rx_free(&sport->port);
3115
3115
3116
			/* Disable Rx DMA to use UART port as wakeup source */
3116
			/* Disable Rx DMA to use UART port as wakeup source */
3117
			spin_lock_irqsave(&sport->port.lock, flags);
3117
			uart_port_lock_irqsave(&sport->port, &flags);
3118
			if (lpuart_is_32(sport)) {
3118
			if (lpuart_is_32(sport)) {
3119
				temp = lpuart32_read(&sport->port, UARTBAUD);
3119
				temp = lpuart32_read(&sport->port, UARTBAUD);
3120
				lpuart32_write(&sport->port, temp & ~UARTBAUD_RDMAE,
3120
				lpuart32_write(&sport->port, temp & ~UARTBAUD_RDMAE,
Lines 3123-3133 static int lpuart_suspend(struct device *dev) Link Here
3123
				writeb(readb(sport->port.membase + UARTCR5) &
3123
				writeb(readb(sport->port.membase + UARTCR5) &
3124
				       ~UARTCR5_RDMAS, sport->port.membase + UARTCR5);
3124
				       ~UARTCR5_RDMAS, sport->port.membase + UARTCR5);
3125
			}
3125
			}
3126
			spin_unlock_irqrestore(&sport->port.lock, flags);
3126
			uart_port_unlock_irqrestore(&sport->port, flags);
3127
		}
3127
		}
3128
3128
3129
		if (sport->lpuart_dma_tx_use) {
3129
		if (sport->lpuart_dma_tx_use) {
3130
			spin_lock_irqsave(&sport->port.lock, flags);
3130
			uart_port_lock_irqsave(&sport->port, &flags);
3131
			if (lpuart_is_32(sport)) {
3131
			if (lpuart_is_32(sport)) {
3132
				temp = lpuart32_read(&sport->port, UARTBAUD);
3132
				temp = lpuart32_read(&sport->port, UARTBAUD);
3133
				temp &= ~UARTBAUD_TDMAE;
3133
				temp &= ~UARTBAUD_TDMAE;
Lines 3137-3143 static int lpuart_suspend(struct device *dev) Link Here
3137
				temp &= ~UARTCR5_TDMAS;
3137
				temp &= ~UARTCR5_TDMAS;
3138
				writeb(temp, sport->port.membase + UARTCR5);
3138
				writeb(temp, sport->port.membase + UARTCR5);
3139
			}
3139
			}
3140
			spin_unlock_irqrestore(&sport->port.lock, flags);
3140
			uart_port_unlock_irqrestore(&sport->port, flags);
3141
			sport->dma_tx_in_progress = false;
3141
			sport->dma_tx_in_progress = false;
3142
			dmaengine_terminate_sync(sport->dma_tx_chan);
3142
			dmaengine_terminate_sync(sport->dma_tx_chan);
3143
		}
3143
		}
(-)a/drivers/tty/serial/icom.c (-13 / +13 lines)
Lines 929-935 static inline void check_modem_status(struct icom_port *icom_port) Link Here
929
	char delta_status;
929
	char delta_status;
930
	unsigned char status;
930
	unsigned char status;
931
931
932
	spin_lock(&icom_port->uart_port.lock);
932
	uart_port_lock(&icom_port->uart_port);
933
933
934
	/*modem input register */
934
	/*modem input register */
935
	status = readb(&icom_port->dram->isr);
935
	status = readb(&icom_port->dram->isr);
Lines 951-957 static inline void check_modem_status(struct icom_port *icom_port) Link Here
951
				      port.delta_msr_wait);
951
				      port.delta_msr_wait);
952
		old_status = status;
952
		old_status = status;
953
	}
953
	}
954
	spin_unlock(&icom_port->uart_port.lock);
954
	uart_port_unlock(&icom_port->uart_port);
955
}
955
}
956
956
957
static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
957
static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
Lines 1093-1099 static void process_interrupt(u16 port_int_reg, Link Here
1093
			      struct icom_port *icom_port)
1093
			      struct icom_port *icom_port)
1094
{
1094
{
1095
1095
1096
	spin_lock(&icom_port->uart_port.lock);
1096
	uart_port_lock(&icom_port->uart_port);
1097
	trace(icom_port, "INTERRUPT", port_int_reg);
1097
	trace(icom_port, "INTERRUPT", port_int_reg);
1098
1098
1099
	if (port_int_reg & (INT_XMIT_COMPLETED | INT_XMIT_DISABLED))
1099
	if (port_int_reg & (INT_XMIT_COMPLETED | INT_XMIT_DISABLED))
Lines 1102-1108 static void process_interrupt(u16 port_int_reg, Link Here
1102
	if (port_int_reg & INT_RCV_COMPLETED)
1102
	if (port_int_reg & INT_RCV_COMPLETED)
1103
		recv_interrupt(port_int_reg, icom_port);
1103
		recv_interrupt(port_int_reg, icom_port);
1104
1104
1105
	spin_unlock(&icom_port->uart_port.lock);
1105
	uart_port_unlock(&icom_port->uart_port);
1106
}
1106
}
1107
1107
1108
static irqreturn_t icom_interrupt(int irq, void *dev_id)
1108
static irqreturn_t icom_interrupt(int irq, void *dev_id)
Lines 1186-1199 static unsigned int icom_tx_empty(struct uart_port *port) Link Here
1186
	int ret;
1186
	int ret;
1187
	unsigned long flags;
1187
	unsigned long flags;
1188
1188
1189
	spin_lock_irqsave(&port->lock, flags);
1189
	uart_port_lock_irqsave(port, &flags);
1190
	if (le16_to_cpu(icom_port->statStg->xmit[0].flags) &
1190
	if (le16_to_cpu(icom_port->statStg->xmit[0].flags) &
1191
	    SA_FLAGS_READY_TO_XMIT)
1191
	    SA_FLAGS_READY_TO_XMIT)
1192
		ret = TIOCSER_TEMT;
1192
		ret = TIOCSER_TEMT;
1193
	else
1193
	else
1194
		ret = 0;
1194
		ret = 0;
1195
1195
1196
	spin_unlock_irqrestore(&port->lock, flags);
1196
	uart_port_unlock_irqrestore(port, flags);
1197
	return ret;
1197
	return ret;
1198
}
1198
}
1199
1199
Lines 1276-1282 static void icom_send_xchar(struct uart_port *port, char ch) Link Here
1276
1276
1277
	/* wait .1 sec to send char */
1277
	/* wait .1 sec to send char */
1278
	for (index = 0; index < 10; index++) {
1278
	for (index = 0; index < 10; index++) {
1279
		spin_lock_irqsave(&port->lock, flags);
1279
		uart_port_lock_irqsave(port, &flags);
1280
		xdata = readb(&icom_port->dram->xchar);
1280
		xdata = readb(&icom_port->dram->xchar);
1281
		if (xdata == 0x00) {
1281
		if (xdata == 0x00) {
1282
			trace(icom_port, "QUICK_WRITE", 0);
1282
			trace(icom_port, "QUICK_WRITE", 0);
Lines 1284-1293 static void icom_send_xchar(struct uart_port *port, char ch) Link Here
1284
1284
1285
			/* flush write operation */
1285
			/* flush write operation */
1286
			xdata = readb(&icom_port->dram->xchar);
1286
			xdata = readb(&icom_port->dram->xchar);
1287
			spin_unlock_irqrestore(&port->lock, flags);
1287
			uart_port_unlock_irqrestore(port, flags);
1288
			break;
1288
			break;
1289
		}
1289
		}
1290
		spin_unlock_irqrestore(&port->lock, flags);
1290
		uart_port_unlock_irqrestore(port, flags);
1291
		msleep(10);
1291
		msleep(10);
1292
	}
1292
	}
1293
}
1293
}
Lines 1307-1313 static void icom_break(struct uart_port *port, int break_state) Link Here
1307
	unsigned char cmdReg;
1307
	unsigned char cmdReg;
1308
	unsigned long flags;
1308
	unsigned long flags;
1309
1309
1310
	spin_lock_irqsave(&port->lock, flags);
1310
	uart_port_lock_irqsave(port, &flags);
1311
	trace(icom_port, "BREAK", 0);
1311
	trace(icom_port, "BREAK", 0);
1312
	cmdReg = readb(&icom_port->dram->CmdReg);
1312
	cmdReg = readb(&icom_port->dram->CmdReg);
1313
	if (break_state == -1) {
1313
	if (break_state == -1) {
Lines 1315-1321 static void icom_break(struct uart_port *port, int break_state) Link Here
1315
	} else {
1315
	} else {
1316
		writeb(cmdReg & ~CMD_SND_BREAK, &icom_port->dram->CmdReg);
1316
		writeb(cmdReg & ~CMD_SND_BREAK, &icom_port->dram->CmdReg);
1317
	}
1317
	}
1318
	spin_unlock_irqrestore(&port->lock, flags);
1318
	uart_port_unlock_irqrestore(port, flags);
1319
}
1319
}
1320
1320
1321
static int icom_open(struct uart_port *port)
1321
static int icom_open(struct uart_port *port)
Lines 1365-1371 static void icom_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
1365
	unsigned long offset;
1365
	unsigned long offset;
1366
	unsigned long flags;
1366
	unsigned long flags;
1367
1367
1368
	spin_lock_irqsave(&port->lock, flags);
1368
	uart_port_lock_irqsave(port, &flags);
1369
	trace(icom_port, "CHANGE_SPEED", 0);
1369
	trace(icom_port, "CHANGE_SPEED", 0);
1370
1370
1371
	cflag = termios->c_cflag;
1371
	cflag = termios->c_cflag;
Lines 1516-1522 static void icom_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
1516
	trace(icom_port, "XR_ENAB", 0);
1516
	trace(icom_port, "XR_ENAB", 0);
1517
	writeb(CMD_XMIT_RCV_ENABLE, &icom_port->dram->CmdReg);
1517
	writeb(CMD_XMIT_RCV_ENABLE, &icom_port->dram->CmdReg);
1518
1518
1519
	spin_unlock_irqrestore(&port->lock, flags);
1519
	uart_port_unlock_irqrestore(port, flags);
1520
}
1520
}
1521
1521
1522
static const char *icom_type(struct uart_port *port)
1522
static const char *icom_type(struct uart_port *port)
(-)a/drivers/tty/serial/imx.c (-42 / +42 lines)
Lines 575-581 static void imx_uart_dma_tx_callback(void *data) Link Here
575
	unsigned long flags;
575
	unsigned long flags;
576
	u32 ucr1;
576
	u32 ucr1;
577
577
578
	spin_lock_irqsave(&sport->port.lock, flags);
578
	uart_port_lock_irqsave(&sport->port, &flags);
579
579
580
	dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
580
	dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
581
581
Lines 600-606 static void imx_uart_dma_tx_callback(void *data) Link Here
600
		imx_uart_writel(sport, ucr4, UCR4);
600
		imx_uart_writel(sport, ucr4, UCR4);
601
	}
601
	}
602
602
603
	spin_unlock_irqrestore(&sport->port.lock, flags);
603
	uart_port_unlock_irqrestore(&sport->port, flags);
604
}
604
}
605
605
606
/* called with port.lock taken and irqs off */
606
/* called with port.lock taken and irqs off */
Lines 766-776 static irqreturn_t imx_uart_rtsint(int irq, void *dev_id) Link Here
766
	struct imx_port *sport = dev_id;
766
	struct imx_port *sport = dev_id;
767
	irqreturn_t ret;
767
	irqreturn_t ret;
768
768
769
	spin_lock(&sport->port.lock);
769
	uart_port_lock(&sport->port);
770
770
771
	ret = __imx_uart_rtsint(irq, dev_id);
771
	ret = __imx_uart_rtsint(irq, dev_id);
772
772
773
	spin_unlock(&sport->port.lock);
773
	uart_port_unlock(&sport->port);
774
774
775
	return ret;
775
	return ret;
776
}
776
}
Lines 779-787 static irqreturn_t imx_uart_txint(int irq, void *dev_id) Link Here
779
{
779
{
780
	struct imx_port *sport = dev_id;
780
	struct imx_port *sport = dev_id;
781
781
782
	spin_lock(&sport->port.lock);
782
	uart_port_lock(&sport->port);
783
	imx_uart_transmit_buffer(sport);
783
	imx_uart_transmit_buffer(sport);
784
	spin_unlock(&sport->port.lock);
784
	uart_port_unlock(&sport->port);
785
	return IRQ_HANDLED;
785
	return IRQ_HANDLED;
786
}
786
}
787
787
Lines 895-905 static irqreturn_t imx_uart_rxint(int irq, void *dev_id) Link Here
895
	struct imx_port *sport = dev_id;
895
	struct imx_port *sport = dev_id;
896
	irqreturn_t ret;
896
	irqreturn_t ret;
897
897
898
	spin_lock(&sport->port.lock);
898
	uart_port_lock(&sport->port);
899
899
900
	ret = __imx_uart_rxint(irq, dev_id);
900
	ret = __imx_uart_rxint(irq, dev_id);
901
901
902
	spin_unlock(&sport->port.lock);
902
	uart_port_unlock(&sport->port);
903
903
904
	return ret;
904
	return ret;
905
}
905
}
Lines 962-968 static irqreturn_t imx_uart_int(int irq, void *dev_id) Link Here
962
	unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4;
962
	unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4;
963
	irqreturn_t ret = IRQ_NONE;
963
	irqreturn_t ret = IRQ_NONE;
964
964
965
	spin_lock(&sport->port.lock);
965
	uart_port_lock(&sport->port);
966
966
967
	usr1 = imx_uart_readl(sport, USR1);
967
	usr1 = imx_uart_readl(sport, USR1);
968
	usr2 = imx_uart_readl(sport, USR2);
968
	usr2 = imx_uart_readl(sport, USR2);
Lines 1032-1038 static irqreturn_t imx_uart_int(int irq, void *dev_id) Link Here
1032
		ret = IRQ_HANDLED;
1032
		ret = IRQ_HANDLED;
1033
	}
1033
	}
1034
1034
1035
	spin_unlock(&sport->port.lock);
1035
	uart_port_unlock(&sport->port);
1036
1036
1037
	return ret;
1037
	return ret;
1038
}
1038
}
Lines 1115-1121 static void imx_uart_break_ctl(struct uart_port *port, int break_state) Link Here
1115
	unsigned long flags;
1115
	unsigned long flags;
1116
	u32 ucr1;
1116
	u32 ucr1;
1117
1117
1118
	spin_lock_irqsave(&sport->port.lock, flags);
1118
	uart_port_lock_irqsave(&sport->port, &flags);
1119
1119
1120
	ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_SNDBRK;
1120
	ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_SNDBRK;
1121
1121
Lines 1124-1130 static void imx_uart_break_ctl(struct uart_port *port, int break_state) Link Here
1124
1124
1125
	imx_uart_writel(sport, ucr1, UCR1);
1125
	imx_uart_writel(sport, ucr1, UCR1);
1126
1126
1127
	spin_unlock_irqrestore(&sport->port.lock, flags);
1127
	uart_port_unlock_irqrestore(&sport->port, flags);
1128
}
1128
}
1129
1129
1130
/*
1130
/*
Lines 1137-1145 static void imx_uart_timeout(struct timer_list *t) Link Here
1137
	unsigned long flags;
1137
	unsigned long flags;
1138
1138
1139
	if (sport->port.state) {
1139
	if (sport->port.state) {
1140
		spin_lock_irqsave(&sport->port.lock, flags);
1140
		uart_port_lock_irqsave(&sport->port, &flags);
1141
		imx_uart_mctrl_check(sport);
1141
		imx_uart_mctrl_check(sport);
1142
		spin_unlock_irqrestore(&sport->port.lock, flags);
1142
		uart_port_unlock_irqrestore(&sport->port, flags);
1143
1143
1144
		mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
1144
		mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
1145
	}
1145
	}
Lines 1169-1177 static void imx_uart_dma_rx_callback(void *data) Link Here
1169
	status = dmaengine_tx_status(chan, sport->rx_cookie, &state);
1169
	status = dmaengine_tx_status(chan, sport->rx_cookie, &state);
1170
1170
1171
	if (status == DMA_ERROR) {
1171
	if (status == DMA_ERROR) {
1172
		spin_lock(&sport->port.lock);
1172
		uart_port_lock(&sport->port);
1173
		imx_uart_clear_rx_errors(sport);
1173
		imx_uart_clear_rx_errors(sport);
1174
		spin_unlock(&sport->port.lock);
1174
		uart_port_unlock(&sport->port);
1175
		return;
1175
		return;
1176
	}
1176
	}
1177
1177
Lines 1200-1208 static void imx_uart_dma_rx_callback(void *data) Link Here
1200
		r_bytes = rx_ring->head - rx_ring->tail;
1200
		r_bytes = rx_ring->head - rx_ring->tail;
1201
1201
1202
		/* If we received something, check for 0xff flood */
1202
		/* If we received something, check for 0xff flood */
1203
		spin_lock(&sport->port.lock);
1203
		uart_port_lock(&sport->port);
1204
		imx_uart_check_flood(sport, imx_uart_readl(sport, USR2));
1204
		imx_uart_check_flood(sport, imx_uart_readl(sport, USR2));
1205
		spin_unlock(&sport->port.lock);
1205
		uart_port_unlock(&sport->port);
1206
1206
1207
		if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
1207
		if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
1208
1208
Lines 1460-1466 static int imx_uart_startup(struct uart_port *port) Link Here
1460
	if (!uart_console(port) && imx_uart_dma_init(sport) == 0)
1460
	if (!uart_console(port) && imx_uart_dma_init(sport) == 0)
1461
		dma_is_inited = 1;
1461
		dma_is_inited = 1;
1462
1462
1463
	spin_lock_irqsave(&sport->port.lock, flags);
1463
	uart_port_lock_irqsave(&sport->port, &flags);
1464
1464
1465
	/* Reset fifo's and state machines */
1465
	/* Reset fifo's and state machines */
1466
	imx_uart_soft_reset(sport);
1466
	imx_uart_soft_reset(sport);
Lines 1533-1539 static int imx_uart_startup(struct uart_port *port) Link Here
1533
1533
1534
	imx_uart_disable_loopback_rs485(sport);
1534
	imx_uart_disable_loopback_rs485(sport);
1535
1535
1536
	spin_unlock_irqrestore(&sport->port.lock, flags);
1536
	uart_port_unlock_irqrestore(&sport->port, flags);
1537
1537
1538
	return 0;
1538
	return 0;
1539
}
1539
}
Lines 1558-1578 static void imx_uart_shutdown(struct uart_port *port) Link Here
1558
			sport->dma_is_rxing = 0;
1558
			sport->dma_is_rxing = 0;
1559
		}
1559
		}
1560
1560
1561
		spin_lock_irqsave(&sport->port.lock, flags);
1561
		uart_port_lock_irqsave(&sport->port, &flags);
1562
		imx_uart_stop_tx(port);
1562
		imx_uart_stop_tx(port);
1563
		imx_uart_stop_rx(port);
1563
		imx_uart_stop_rx(port);
1564
		imx_uart_disable_dma(sport);
1564
		imx_uart_disable_dma(sport);
1565
		spin_unlock_irqrestore(&sport->port.lock, flags);
1565
		uart_port_unlock_irqrestore(&sport->port, flags);
1566
		imx_uart_dma_exit(sport);
1566
		imx_uart_dma_exit(sport);
1567
	}
1567
	}
1568
1568
1569
	mctrl_gpio_disable_ms(sport->gpios);
1569
	mctrl_gpio_disable_ms(sport->gpios);
1570
1570
1571
	spin_lock_irqsave(&sport->port.lock, flags);
1571
	uart_port_lock_irqsave(&sport->port, &flags);
1572
	ucr2 = imx_uart_readl(sport, UCR2);
1572
	ucr2 = imx_uart_readl(sport, UCR2);
1573
	ucr2 &= ~(UCR2_TXEN | UCR2_ATEN);
1573
	ucr2 &= ~(UCR2_TXEN | UCR2_ATEN);
1574
	imx_uart_writel(sport, ucr2, UCR2);
1574
	imx_uart_writel(sport, ucr2, UCR2);
1575
	spin_unlock_irqrestore(&sport->port.lock, flags);
1575
	uart_port_unlock_irqrestore(&sport->port, flags);
1576
1576
1577
	/*
1577
	/*
1578
	 * Stop our timer.
1578
	 * Stop our timer.
Lines 1583-1589 static void imx_uart_shutdown(struct uart_port *port) Link Here
1583
	 * Disable all interrupts, port and break condition.
1583
	 * Disable all interrupts, port and break condition.
1584
	 */
1584
	 */
1585
1585
1586
	spin_lock_irqsave(&sport->port.lock, flags);
1586
	uart_port_lock_irqsave(&sport->port, &flags);
1587
1587
1588
	ucr1 = imx_uart_readl(sport, UCR1);
1588
	ucr1 = imx_uart_readl(sport, UCR1);
1589
	ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_RXDMAEN |
1589
	ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_RXDMAEN |
Lines 1605-1611 static void imx_uart_shutdown(struct uart_port *port) Link Here
1605
	ucr4 &= ~UCR4_TCEN;
1605
	ucr4 &= ~UCR4_TCEN;
1606
	imx_uart_writel(sport, ucr4, UCR4);
1606
	imx_uart_writel(sport, ucr4, UCR4);
1607
1607
1608
	spin_unlock_irqrestore(&sport->port.lock, flags);
1608
	uart_port_unlock_irqrestore(&sport->port, flags);
1609
1609
1610
	clk_disable_unprepare(sport->clk_per);
1610
	clk_disable_unprepare(sport->clk_per);
1611
	clk_disable_unprepare(sport->clk_ipg);
1611
	clk_disable_unprepare(sport->clk_ipg);
Lines 1668-1674 imx_uart_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
1668
	baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
1668
	baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
1669
	quot = uart_get_divisor(port, baud);
1669
	quot = uart_get_divisor(port, baud);
1670
1670
1671
	spin_lock_irqsave(&sport->port.lock, flags);
1671
	uart_port_lock_irqsave(&sport->port, &flags);
1672
1672
1673
	/*
1673
	/*
1674
	 * Read current UCR2 and save it for future use, then clear all the bits
1674
	 * Read current UCR2 and save it for future use, then clear all the bits
Lines 1796-1802 imx_uart_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
1796
	if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
1796
	if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
1797
		imx_uart_enable_ms(&sport->port);
1797
		imx_uart_enable_ms(&sport->port);
1798
1798
1799
	spin_unlock_irqrestore(&sport->port.lock, flags);
1799
	uart_port_unlock_irqrestore(&sport->port, flags);
1800
}
1800
}
1801
1801
1802
static const char *imx_uart_type(struct uart_port *port)
1802
static const char *imx_uart_type(struct uart_port *port)
Lines 1858-1864 static int imx_uart_poll_init(struct uart_port *port) Link Here
1858
1858
1859
	imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
1859
	imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
1860
1860
1861
	spin_lock_irqsave(&sport->port.lock, flags);
1861
	uart_port_lock_irqsave(&sport->port, &flags);
1862
1862
1863
	/*
1863
	/*
1864
	 * Be careful about the order of enabling bits here. First enable the
1864
	 * Be careful about the order of enabling bits here. First enable the
Lines 1886-1892 static int imx_uart_poll_init(struct uart_port *port) Link Here
1886
	imx_uart_writel(sport, ucr1 | UCR1_RRDYEN, UCR1);
1886
	imx_uart_writel(sport, ucr1 | UCR1_RRDYEN, UCR1);
1887
	imx_uart_writel(sport, ucr2 | UCR2_ATEN, UCR2);
1887
	imx_uart_writel(sport, ucr2 | UCR2_ATEN, UCR2);
1888
1888
1889
	spin_unlock_irqrestore(&sport->port.lock, flags);
1889
	uart_port_unlock_irqrestore(&sport->port, flags);
1890
1890
1891
	return 0;
1891
	return 0;
1892
}
1892
}
Lines 2005-2013 imx_uart_console_write(struct console *co, const char *s, unsigned int count) Link Here
2005
	if (sport->port.sysrq)
2005
	if (sport->port.sysrq)
2006
		locked = 0;
2006
		locked = 0;
2007
	else if (oops_in_progress)
2007
	else if (oops_in_progress)
2008
		locked = spin_trylock_irqsave(&sport->port.lock, flags);
2008
		locked = uart_port_trylock_irqsave(&sport->port, &flags);
2009
	else
2009
	else
2010
		spin_lock_irqsave(&sport->port.lock, flags);
2010
		uart_port_lock_irqsave(&sport->port, &flags);
2011
2011
2012
	/*
2012
	/*
2013
	 *	First, save UCR1/2/3 and then disable interrupts
2013
	 *	First, save UCR1/2/3 and then disable interrupts
Lines 2035-2041 imx_uart_console_write(struct console *co, const char *s, unsigned int count) Link Here
2035
	imx_uart_ucrs_restore(sport, &old_ucr);
2035
	imx_uart_ucrs_restore(sport, &old_ucr);
2036
2036
2037
	if (locked)
2037
	if (locked)
2038
		spin_unlock_irqrestore(&sport->port.lock, flags);
2038
		uart_port_unlock_irqrestore(&sport->port, flags);
2039
}
2039
}
2040
2040
2041
/*
2041
/*
Lines 2193-2202 static enum hrtimer_restart imx_trigger_start_tx(struct hrtimer *t) Link Here
2193
	struct imx_port *sport = container_of(t, struct imx_port, trigger_start_tx);
2193
	struct imx_port *sport = container_of(t, struct imx_port, trigger_start_tx);
2194
	unsigned long flags;
2194
	unsigned long flags;
2195
2195
2196
	spin_lock_irqsave(&sport->port.lock, flags);
2196
	uart_port_lock_irqsave(&sport->port, &flags);
2197
	if (sport->tx_state == WAIT_AFTER_RTS)
2197
	if (sport->tx_state == WAIT_AFTER_RTS)
2198
		imx_uart_start_tx(&sport->port);
2198
		imx_uart_start_tx(&sport->port);
2199
	spin_unlock_irqrestore(&sport->port.lock, flags);
2199
	uart_port_unlock_irqrestore(&sport->port, flags);
2200
2200
2201
	return HRTIMER_NORESTART;
2201
	return HRTIMER_NORESTART;
2202
}
2202
}
Lines 2206-2215 static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t) Link Here
2206
	struct imx_port *sport = container_of(t, struct imx_port, trigger_stop_tx);
2206
	struct imx_port *sport = container_of(t, struct imx_port, trigger_stop_tx);
2207
	unsigned long flags;
2207
	unsigned long flags;
2208
2208
2209
	spin_lock_irqsave(&sport->port.lock, flags);
2209
	uart_port_lock_irqsave(&sport->port, &flags);
2210
	if (sport->tx_state == WAIT_AFTER_SEND)
2210
	if (sport->tx_state == WAIT_AFTER_SEND)
2211
		imx_uart_stop_tx(&sport->port);
2211
		imx_uart_stop_tx(&sport->port);
2212
	spin_unlock_irqrestore(&sport->port.lock, flags);
2212
	uart_port_unlock_irqrestore(&sport->port, flags);
2213
2213
2214
	return HRTIMER_NORESTART;
2214
	return HRTIMER_NORESTART;
2215
}
2215
}
Lines 2482-2490 static void imx_uart_restore_context(struct imx_port *sport) Link Here
2482
{
2482
{
2483
	unsigned long flags;
2483
	unsigned long flags;
2484
2484
2485
	spin_lock_irqsave(&sport->port.lock, flags);
2485
	uart_port_lock_irqsave(&sport->port, &flags);
2486
	if (!sport->context_saved) {
2486
	if (!sport->context_saved) {
2487
		spin_unlock_irqrestore(&sport->port.lock, flags);
2487
		uart_port_unlock_irqrestore(&sport->port, flags);
2488
		return;
2488
		return;
2489
	}
2489
	}
2490
2490
Lines 2499-2505 static void imx_uart_restore_context(struct imx_port *sport) Link Here
2499
	imx_uart_writel(sport, sport->saved_reg[2], UCR3);
2499
	imx_uart_writel(sport, sport->saved_reg[2], UCR3);
2500
	imx_uart_writel(sport, sport->saved_reg[3], UCR4);
2500
	imx_uart_writel(sport, sport->saved_reg[3], UCR4);
2501
	sport->context_saved = false;
2501
	sport->context_saved = false;
2502
	spin_unlock_irqrestore(&sport->port.lock, flags);
2502
	uart_port_unlock_irqrestore(&sport->port, flags);
2503
}
2503
}
2504
2504
2505
static void imx_uart_save_context(struct imx_port *sport)
2505
static void imx_uart_save_context(struct imx_port *sport)
Lines 2507-2513 static void imx_uart_save_context(struct imx_port *sport) Link Here
2507
	unsigned long flags;
2507
	unsigned long flags;
2508
2508
2509
	/* Save necessary regs */
2509
	/* Save necessary regs */
2510
	spin_lock_irqsave(&sport->port.lock, flags);
2510
	uart_port_lock_irqsave(&sport->port, &flags);
2511
	sport->saved_reg[0] = imx_uart_readl(sport, UCR1);
2511
	sport->saved_reg[0] = imx_uart_readl(sport, UCR1);
2512
	sport->saved_reg[1] = imx_uart_readl(sport, UCR2);
2512
	sport->saved_reg[1] = imx_uart_readl(sport, UCR2);
2513
	sport->saved_reg[2] = imx_uart_readl(sport, UCR3);
2513
	sport->saved_reg[2] = imx_uart_readl(sport, UCR3);
Lines 2519-2525 static void imx_uart_save_context(struct imx_port *sport) Link Here
2519
	sport->saved_reg[8] = imx_uart_readl(sport, UBMR);
2519
	sport->saved_reg[8] = imx_uart_readl(sport, UBMR);
2520
	sport->saved_reg[9] = imx_uart_readl(sport, IMX21_UTS);
2520
	sport->saved_reg[9] = imx_uart_readl(sport, IMX21_UTS);
2521
	sport->context_saved = true;
2521
	sport->context_saved = true;
2522
	spin_unlock_irqrestore(&sport->port.lock, flags);
2522
	uart_port_unlock_irqrestore(&sport->port, flags);
2523
}
2523
}
2524
2524
2525
static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
2525
static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
(-)a/drivers/tty/serial/ip22zilog.c (-18 / +18 lines)
Lines 432-438 static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id) Link Here
432
		unsigned char r3;
432
		unsigned char r3;
433
		bool push = false;
433
		bool push = false;
434
434
435
		spin_lock(&up->port.lock);
435
		uart_port_lock(&up->port);
436
		r3 = read_zsreg(channel, R3);
436
		r3 = read_zsreg(channel, R3);
437
437
438
		/* Channel A */
438
		/* Channel A */
Lines 448-454 static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id) Link Here
448
			if (r3 & CHATxIP)
448
			if (r3 & CHATxIP)
449
				ip22zilog_transmit_chars(up, channel);
449
				ip22zilog_transmit_chars(up, channel);
450
		}
450
		}
451
		spin_unlock(&up->port.lock);
451
		uart_port_unlock(&up->port);
452
452
453
		if (push)
453
		if (push)
454
			tty_flip_buffer_push(&up->port.state->port);
454
			tty_flip_buffer_push(&up->port.state->port);
Lines 458-464 static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id) Link Here
458
		channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
458
		channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
459
		push = false;
459
		push = false;
460
460
461
		spin_lock(&up->port.lock);
461
		uart_port_lock(&up->port);
462
		if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
462
		if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
463
			writeb(RES_H_IUS, &channel->control);
463
			writeb(RES_H_IUS, &channel->control);
464
			ZSDELAY();
464
			ZSDELAY();
Lines 471-477 static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id) Link Here
471
			if (r3 & CHBTxIP)
471
			if (r3 & CHBTxIP)
472
				ip22zilog_transmit_chars(up, channel);
472
				ip22zilog_transmit_chars(up, channel);
473
		}
473
		}
474
		spin_unlock(&up->port.lock);
474
		uart_port_unlock(&up->port);
475
475
476
		if (push)
476
		if (push)
477
			tty_flip_buffer_push(&up->port.state->port);
477
			tty_flip_buffer_push(&up->port.state->port);
Lines 504-514 static unsigned int ip22zilog_tx_empty(struct uart_port *port) Link Here
504
	unsigned char status;
504
	unsigned char status;
505
	unsigned int ret;
505
	unsigned int ret;
506
506
507
	spin_lock_irqsave(&port->lock, flags);
507
	uart_port_lock_irqsave(port, &flags);
508
508
509
	status = ip22zilog_read_channel_status(port);
509
	status = ip22zilog_read_channel_status(port);
510
510
511
	spin_unlock_irqrestore(&port->lock, flags);
511
	uart_port_unlock_irqrestore(port, flags);
512
512
513
	if (status & Tx_BUF_EMP)
513
	if (status & Tx_BUF_EMP)
514
		ret = TIOCSER_TEMT;
514
		ret = TIOCSER_TEMT;
Lines 664-670 static void ip22zilog_break_ctl(struct uart_port *port, int break_state) Link Here
664
	else
664
	else
665
		clear_bits |= SND_BRK;
665
		clear_bits |= SND_BRK;
666
666
667
	spin_lock_irqsave(&port->lock, flags);
667
	uart_port_lock_irqsave(port, &flags);
668
668
669
	new_reg = (up->curregs[R5] | set_bits) & ~clear_bits;
669
	new_reg = (up->curregs[R5] | set_bits) & ~clear_bits;
670
	if (new_reg != up->curregs[R5]) {
670
	if (new_reg != up->curregs[R5]) {
Lines 674-680 static void ip22zilog_break_ctl(struct uart_port *port, int break_state) Link Here
674
		write_zsreg(channel, R5, up->curregs[R5]);
674
		write_zsreg(channel, R5, up->curregs[R5]);
675
	}
675
	}
676
676
677
	spin_unlock_irqrestore(&port->lock, flags);
677
	uart_port_unlock_irqrestore(port, flags);
678
}
678
}
679
679
680
static void __ip22zilog_reset(struct uart_ip22zilog_port *up)
680
static void __ip22zilog_reset(struct uart_ip22zilog_port *up)
Lines 735-743 static int ip22zilog_startup(struct uart_port *port) Link Here
735
	if (ZS_IS_CONS(up))
735
	if (ZS_IS_CONS(up))
736
		return 0;
736
		return 0;
737
737
738
	spin_lock_irqsave(&port->lock, flags);
738
	uart_port_lock_irqsave(port, &flags);
739
	__ip22zilog_startup(up);
739
	__ip22zilog_startup(up);
740
	spin_unlock_irqrestore(&port->lock, flags);
740
	uart_port_unlock_irqrestore(port, flags);
741
	return 0;
741
	return 0;
742
}
742
}
743
743
Lines 775-781 static void ip22zilog_shutdown(struct uart_port *port) Link Here
775
	if (ZS_IS_CONS(up))
775
	if (ZS_IS_CONS(up))
776
		return;
776
		return;
777
777
778
	spin_lock_irqsave(&port->lock, flags);
778
	uart_port_lock_irqsave(port, &flags);
779
779
780
	channel = ZILOG_CHANNEL_FROM_PORT(port);
780
	channel = ZILOG_CHANNEL_FROM_PORT(port);
781
781
Lines 788-794 static void ip22zilog_shutdown(struct uart_port *port) Link Here
788
	up->curregs[R5] &= ~SND_BRK;
788
	up->curregs[R5] &= ~SND_BRK;
789
	ip22zilog_maybe_update_regs(up, channel);
789
	ip22zilog_maybe_update_regs(up, channel);
790
790
791
	spin_unlock_irqrestore(&port->lock, flags);
791
	uart_port_unlock_irqrestore(port, flags);
792
}
792
}
793
793
794
/* Shared by TTY driver and serial console setup.  The port lock is held
794
/* Shared by TTY driver and serial console setup.  The port lock is held
Lines 880-886 ip22zilog_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
880
880
881
	baud = uart_get_baud_rate(port, termios, old, 1200, 76800);
881
	baud = uart_get_baud_rate(port, termios, old, 1200, 76800);
882
882
883
	spin_lock_irqsave(&up->port.lock, flags);
883
	uart_port_lock_irqsave(&up->port, &flags);
884
884
885
	brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
885
	brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
886
886
Lines 894-900 ip22zilog_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
894
	ip22zilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(port));
894
	ip22zilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(port));
895
	uart_update_timeout(port, termios->c_cflag, baud);
895
	uart_update_timeout(port, termios->c_cflag, baud);
896
896
897
	spin_unlock_irqrestore(&up->port.lock, flags);
897
	uart_port_unlock_irqrestore(&up->port, flags);
898
}
898
}
899
899
900
static const char *ip22zilog_type(struct uart_port *port)
900
static const char *ip22zilog_type(struct uart_port *port)
Lines 1016-1025 ip22zilog_console_write(struct console *con, const char *s, unsigned int count) Link Here
1016
	struct uart_ip22zilog_port *up = &ip22zilog_port_table[con->index];
1016
	struct uart_ip22zilog_port *up = &ip22zilog_port_table[con->index];
1017
	unsigned long flags;
1017
	unsigned long flags;
1018
1018
1019
	spin_lock_irqsave(&up->port.lock, flags);
1019
	uart_port_lock_irqsave(&up->port, &flags);
1020
	uart_console_write(&up->port, s, count, ip22zilog_put_char);
1020
	uart_console_write(&up->port, s, count, ip22zilog_put_char);
1021
	udelay(2);
1021
	udelay(2);
1022
	spin_unlock_irqrestore(&up->port.lock, flags);
1022
	uart_port_unlock_irqrestore(&up->port, flags);
1023
}
1023
}
1024
1024
1025
static int __init ip22zilog_console_setup(struct console *con, char *options)
1025
static int __init ip22zilog_console_setup(struct console *con, char *options)
Lines 1034-1046 static int __init ip22zilog_console_setup(struct console *con, char *options) Link Here
1034
1034
1035
	printk(KERN_INFO "Console: ttyS%d (IP22-Zilog)\n", con->index);
1035
	printk(KERN_INFO "Console: ttyS%d (IP22-Zilog)\n", con->index);
1036
1036
1037
	spin_lock_irqsave(&up->port.lock, flags);
1037
	uart_port_lock_irqsave(&up->port, &flags);
1038
1038
1039
	up->curregs[R15] |= BRKIE;
1039
	up->curregs[R15] |= BRKIE;
1040
1040
1041
	__ip22zilog_startup(up);
1041
	__ip22zilog_startup(up);
1042
1042
1043
	spin_unlock_irqrestore(&up->port.lock, flags);
1043
	uart_port_unlock_irqrestore(&up->port, flags);
1044
1044
1045
	if (options)
1045
	if (options)
1046
		uart_parse_options(options, &baud, &parity, &bits, &flow);
1046
		uart_parse_options(options, &baud, &parity, &bits, &flow);
(-)a/drivers/tty/serial/jsm/jsm_neo.c (-2 / +2 lines)
Lines 816-824 static void neo_parse_isr(struct jsm_board *brd, u32 port) Link Here
816
		/* Parse any modem signal changes */
816
		/* Parse any modem signal changes */
817
		jsm_dbg(INTR, &ch->ch_bd->pci_dev,
817
		jsm_dbg(INTR, &ch->ch_bd->pci_dev,
818
			"MOD_STAT: sending to parse_modem_sigs\n");
818
			"MOD_STAT: sending to parse_modem_sigs\n");
819
		spin_lock_irqsave(&ch->uart_port.lock, lock_flags);
819
		uart_port_lock_irqsave(&ch->uart_port, &lock_flags);
820
		neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
820
		neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
821
		spin_unlock_irqrestore(&ch->uart_port.lock, lock_flags);
821
		uart_port_unlock_irqrestore(&ch->uart_port, lock_flags);
822
	}
822
	}
823
}
823
}
824
824
(-)a/drivers/tty/serial/jsm/jsm_tty.c (-8 / +8 lines)
Lines 152-165 static void jsm_tty_send_xchar(struct uart_port *port, char ch) Link Here
152
		container_of(port, struct jsm_channel, uart_port);
152
		container_of(port, struct jsm_channel, uart_port);
153
	struct ktermios *termios;
153
	struct ktermios *termios;
154
154
155
	spin_lock_irqsave(&port->lock, lock_flags);
155
	uart_port_lock_irqsave(port, &lock_flags);
156
	termios = &port->state->port.tty->termios;
156
	termios = &port->state->port.tty->termios;
157
	if (ch == termios->c_cc[VSTART])
157
	if (ch == termios->c_cc[VSTART])
158
		channel->ch_bd->bd_ops->send_start_character(channel);
158
		channel->ch_bd->bd_ops->send_start_character(channel);
159
159
160
	if (ch == termios->c_cc[VSTOP])
160
	if (ch == termios->c_cc[VSTOP])
161
		channel->ch_bd->bd_ops->send_stop_character(channel);
161
		channel->ch_bd->bd_ops->send_stop_character(channel);
162
	spin_unlock_irqrestore(&port->lock, lock_flags);
162
	uart_port_unlock_irqrestore(port, lock_flags);
163
}
163
}
164
164
165
static void jsm_tty_stop_rx(struct uart_port *port)
165
static void jsm_tty_stop_rx(struct uart_port *port)
Lines 176-188 static void jsm_tty_break(struct uart_port *port, int break_state) Link Here
176
	struct jsm_channel *channel =
176
	struct jsm_channel *channel =
177
		container_of(port, struct jsm_channel, uart_port);
177
		container_of(port, struct jsm_channel, uart_port);
178
178
179
	spin_lock_irqsave(&port->lock, lock_flags);
179
	uart_port_lock_irqsave(port, &lock_flags);
180
	if (break_state == -1)
180
	if (break_state == -1)
181
		channel->ch_bd->bd_ops->send_break(channel);
181
		channel->ch_bd->bd_ops->send_break(channel);
182
	else
182
	else
183
		channel->ch_bd->bd_ops->clear_break(channel);
183
		channel->ch_bd->bd_ops->clear_break(channel);
184
184
185
	spin_unlock_irqrestore(&port->lock, lock_flags);
185
	uart_port_unlock_irqrestore(port, lock_flags);
186
}
186
}
187
187
188
static int jsm_tty_open(struct uart_port *port)
188
static int jsm_tty_open(struct uart_port *port)
Lines 241-247 static int jsm_tty_open(struct uart_port *port) Link Here
241
	channel->ch_cached_lsr = 0;
241
	channel->ch_cached_lsr = 0;
242
	channel->ch_stops_sent = 0;
242
	channel->ch_stops_sent = 0;
243
243
244
	spin_lock_irqsave(&port->lock, lock_flags);
244
	uart_port_lock_irqsave(port, &lock_flags);
245
	termios = &port->state->port.tty->termios;
245
	termios = &port->state->port.tty->termios;
246
	channel->ch_c_cflag	= termios->c_cflag;
246
	channel->ch_c_cflag	= termios->c_cflag;
247
	channel->ch_c_iflag	= termios->c_iflag;
247
	channel->ch_c_iflag	= termios->c_iflag;
Lines 261-267 static int jsm_tty_open(struct uart_port *port) Link Here
261
	jsm_carrier(channel);
261
	jsm_carrier(channel);
262
262
263
	channel->ch_open_count++;
263
	channel->ch_open_count++;
264
	spin_unlock_irqrestore(&port->lock, lock_flags);
264
	uart_port_unlock_irqrestore(port, lock_flags);
265
265
266
	jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n");
266
	jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n");
267
	return 0;
267
	return 0;
Lines 307-313 static void jsm_tty_set_termios(struct uart_port *port, Link Here
307
	struct jsm_channel *channel =
307
	struct jsm_channel *channel =
308
		container_of(port, struct jsm_channel, uart_port);
308
		container_of(port, struct jsm_channel, uart_port);
309
309
310
	spin_lock_irqsave(&port->lock, lock_flags);
310
	uart_port_lock_irqsave(port, &lock_flags);
311
	channel->ch_c_cflag	= termios->c_cflag;
311
	channel->ch_c_cflag	= termios->c_cflag;
312
	channel->ch_c_iflag	= termios->c_iflag;
312
	channel->ch_c_iflag	= termios->c_iflag;
313
	channel->ch_c_oflag	= termios->c_oflag;
313
	channel->ch_c_oflag	= termios->c_oflag;
Lines 317-323 static void jsm_tty_set_termios(struct uart_port *port, Link Here
317
317
318
	channel->ch_bd->bd_ops->param(channel);
318
	channel->ch_bd->bd_ops->param(channel);
319
	jsm_carrier(channel);
319
	jsm_carrier(channel);
320
	spin_unlock_irqrestore(&port->lock, lock_flags);
320
	uart_port_unlock_irqrestore(port, lock_flags);
321
}
321
}
322
322
323
static const char *jsm_tty_type(struct uart_port *port)
323
static const char *jsm_tty_type(struct uart_port *port)
(-)a/drivers/tty/serial/liteuart.c (-10 / +10 lines)
Lines 139-151 static irqreturn_t liteuart_interrupt(int irq, void *data) Link Here
139
	 * if polling, the context would be "in_serving_softirq", so use
139
	 * if polling, the context would be "in_serving_softirq", so use
140
	 * irq[save|restore] spin_lock variants to cover all possibilities
140
	 * irq[save|restore] spin_lock variants to cover all possibilities
141
	 */
141
	 */
142
	spin_lock_irqsave(&port->lock, flags);
142
	uart_port_lock_irqsave(port, &flags);
143
	isr = litex_read8(port->membase + OFF_EV_PENDING) & uart->irq_reg;
143
	isr = litex_read8(port->membase + OFF_EV_PENDING) & uart->irq_reg;
144
	if (isr & EV_RX)
144
	if (isr & EV_RX)
145
		liteuart_rx_chars(port);
145
		liteuart_rx_chars(port);
146
	if (isr & EV_TX)
146
	if (isr & EV_TX)
147
		liteuart_tx_chars(port);
147
		liteuart_tx_chars(port);
148
	spin_unlock_irqrestore(&port->lock, flags);
148
	uart_port_unlock_irqrestore(port, flags);
149
149
150
	return IRQ_RETVAL(isr);
150
	return IRQ_RETVAL(isr);
151
}
151
}
Lines 195-204 static int liteuart_startup(struct uart_port *port) Link Here
195
		}
195
		}
196
	}
196
	}
197
197
198
	spin_lock_irqsave(&port->lock, flags);
198
	uart_port_lock_irqsave(port, &flags);
199
	/* only enabling rx irqs during startup */
199
	/* only enabling rx irqs during startup */
200
	liteuart_update_irq_reg(port, true, EV_RX);
200
	liteuart_update_irq_reg(port, true, EV_RX);
201
	spin_unlock_irqrestore(&port->lock, flags);
201
	uart_port_unlock_irqrestore(port, flags);
202
202
203
	if (!port->irq) {
203
	if (!port->irq) {
204
		timer_setup(&uart->timer, liteuart_timer, 0);
204
		timer_setup(&uart->timer, liteuart_timer, 0);
Lines 213-221 static void liteuart_shutdown(struct uart_port *port) Link Here
213
	struct liteuart_port *uart = to_liteuart_port(port);
213
	struct liteuart_port *uart = to_liteuart_port(port);
214
	unsigned long flags;
214
	unsigned long flags;
215
215
216
	spin_lock_irqsave(&port->lock, flags);
216
	uart_port_lock_irqsave(port, &flags);
217
	liteuart_update_irq_reg(port, false, EV_RX | EV_TX);
217
	liteuart_update_irq_reg(port, false, EV_RX | EV_TX);
218
	spin_unlock_irqrestore(&port->lock, flags);
218
	uart_port_unlock_irqrestore(port, flags);
219
219
220
	if (port->irq)
220
	if (port->irq)
221
		free_irq(port->irq, port);
221
		free_irq(port->irq, port);
Lines 229-241 static void liteuart_set_termios(struct uart_port *port, struct ktermios *new, Link Here
229
	unsigned int baud;
229
	unsigned int baud;
230
	unsigned long flags;
230
	unsigned long flags;
231
231
232
	spin_lock_irqsave(&port->lock, flags);
232
	uart_port_lock_irqsave(port, &flags);
233
233
234
	/* update baudrate */
234
	/* update baudrate */
235
	baud = uart_get_baud_rate(port, new, old, 0, 460800);
235
	baud = uart_get_baud_rate(port, new, old, 0, 460800);
236
	uart_update_timeout(port, new->c_cflag, baud);
236
	uart_update_timeout(port, new->c_cflag, baud);
237
237
238
	spin_unlock_irqrestore(&port->lock, flags);
238
	uart_port_unlock_irqrestore(port, flags);
239
}
239
}
240
240
241
static const char *liteuart_type(struct uart_port *port)
241
static const char *liteuart_type(struct uart_port *port)
Lines 382-390 static void liteuart_console_write(struct console *co, const char *s, Link Here
382
	uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index);
382
	uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index);
383
	port = &uart->port;
383
	port = &uart->port;
384
384
385
	spin_lock_irqsave(&port->lock, flags);
385
	uart_port_lock_irqsave(port, &flags);
386
	uart_console_write(port, s, count, liteuart_putchar);
386
	uart_console_write(port, s, count, liteuart_putchar);
387
	spin_unlock_irqrestore(&port->lock, flags);
387
	uart_port_unlock_irqrestore(port, flags);
388
}
388
}
389
389
390
static int liteuart_console_setup(struct console *co, char *options)
390
static int liteuart_console_setup(struct console *co, char *options)
(-)a/drivers/tty/serial/lpc32xx_hs.c (-13 / +13 lines)
Lines 140-154 static void lpc32xx_hsuart_console_write(struct console *co, const char *s, Link Here
140
	if (up->port.sysrq)
140
	if (up->port.sysrq)
141
		locked = 0;
141
		locked = 0;
142
	else if (oops_in_progress)
142
	else if (oops_in_progress)
143
		locked = spin_trylock(&up->port.lock);
143
		locked = uart_port_trylock(&up->port);
144
	else
144
	else
145
		spin_lock(&up->port.lock);
145
		uart_port_lock(&up->port);
146
146
147
	uart_console_write(&up->port, s, count, lpc32xx_hsuart_console_putchar);
147
	uart_console_write(&up->port, s, count, lpc32xx_hsuart_console_putchar);
148
	wait_for_xmit_empty(&up->port);
148
	wait_for_xmit_empty(&up->port);
149
149
150
	if (locked)
150
	if (locked)
151
		spin_unlock(&up->port.lock);
151
		uart_port_unlock(&up->port);
152
	local_irq_restore(flags);
152
	local_irq_restore(flags);
153
}
153
}
154
154
Lines 298-304 static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id) Link Here
298
	struct tty_port *tport = &port->state->port;
298
	struct tty_port *tport = &port->state->port;
299
	u32 status;
299
	u32 status;
300
300
301
	spin_lock(&port->lock);
301
	uart_port_lock(port);
302
302
303
	/* Read UART status and clear latched interrupts */
303
	/* Read UART status and clear latched interrupts */
304
	status = readl(LPC32XX_HSUART_IIR(port->membase));
304
	status = readl(LPC32XX_HSUART_IIR(port->membase));
Lines 333-339 static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id) Link Here
333
		__serial_lpc32xx_tx(port);
333
		__serial_lpc32xx_tx(port);
334
	}
334
	}
335
335
336
	spin_unlock(&port->lock);
336
	uart_port_unlock(port);
337
337
338
	return IRQ_HANDLED;
338
	return IRQ_HANDLED;
339
}
339
}
Lines 404-417 static void serial_lpc32xx_break_ctl(struct uart_port *port, Link Here
404
	unsigned long flags;
404
	unsigned long flags;
405
	u32 tmp;
405
	u32 tmp;
406
406
407
	spin_lock_irqsave(&port->lock, flags);
407
	uart_port_lock_irqsave(port, &flags);
408
	tmp = readl(LPC32XX_HSUART_CTRL(port->membase));
408
	tmp = readl(LPC32XX_HSUART_CTRL(port->membase));
409
	if (break_state != 0)
409
	if (break_state != 0)
410
		tmp |= LPC32XX_HSU_BREAK;
410
		tmp |= LPC32XX_HSU_BREAK;
411
	else
411
	else
412
		tmp &= ~LPC32XX_HSU_BREAK;
412
		tmp &= ~LPC32XX_HSU_BREAK;
413
	writel(tmp, LPC32XX_HSUART_CTRL(port->membase));
413
	writel(tmp, LPC32XX_HSUART_CTRL(port->membase));
414
	spin_unlock_irqrestore(&port->lock, flags);
414
	uart_port_unlock_irqrestore(port, flags);
415
}
415
}
416
416
417
/* port->lock is not held.  */
417
/* port->lock is not held.  */
Lines 421-427 static int serial_lpc32xx_startup(struct uart_port *port) Link Here
421
	unsigned long flags;
421
	unsigned long flags;
422
	u32 tmp;
422
	u32 tmp;
423
423
424
	spin_lock_irqsave(&port->lock, flags);
424
	uart_port_lock_irqsave(port, &flags);
425
425
426
	__serial_uart_flush(port);
426
	__serial_uart_flush(port);
427
427
Lines 441-447 static int serial_lpc32xx_startup(struct uart_port *port) Link Here
441
441
442
	lpc32xx_loopback_set(port->mapbase, 0); /* get out of loopback mode */
442
	lpc32xx_loopback_set(port->mapbase, 0); /* get out of loopback mode */
443
443
444
	spin_unlock_irqrestore(&port->lock, flags);
444
	uart_port_unlock_irqrestore(port, flags);
445
445
446
	retval = request_irq(port->irq, serial_lpc32xx_interrupt,
446
	retval = request_irq(port->irq, serial_lpc32xx_interrupt,
447
			     0, MODNAME, port);
447
			     0, MODNAME, port);
Lines 458-464 static void serial_lpc32xx_shutdown(struct uart_port *port) Link Here
458
	u32 tmp;
458
	u32 tmp;
459
	unsigned long flags;
459
	unsigned long flags;
460
460
461
	spin_lock_irqsave(&port->lock, flags);
461
	uart_port_lock_irqsave(port, &flags);
462
462
463
	tmp = LPC32XX_HSU_TX_TL8B | LPC32XX_HSU_RX_TL32B |
463
	tmp = LPC32XX_HSU_TX_TL8B | LPC32XX_HSU_RX_TL32B |
464
		LPC32XX_HSU_OFFSET(20) | LPC32XX_HSU_TMO_INACT_4B;
464
		LPC32XX_HSU_OFFSET(20) | LPC32XX_HSU_TMO_INACT_4B;
Lines 466-472 static void serial_lpc32xx_shutdown(struct uart_port *port) Link Here
466
466
467
	lpc32xx_loopback_set(port->mapbase, 1); /* go to loopback mode */
467
	lpc32xx_loopback_set(port->mapbase, 1); /* go to loopback mode */
468
468
469
	spin_unlock_irqrestore(&port->lock, flags);
469
	uart_port_unlock_irqrestore(port, flags);
470
470
471
	free_irq(port->irq, port);
471
	free_irq(port->irq, port);
472
}
472
}
Lines 491-497 static void serial_lpc32xx_set_termios(struct uart_port *port, Link Here
491
491
492
	quot = __serial_get_clock_div(port->uartclk, baud);
492
	quot = __serial_get_clock_div(port->uartclk, baud);
493
493
494
	spin_lock_irqsave(&port->lock, flags);
494
	uart_port_lock_irqsave(port, &flags);
495
495
496
	/* Ignore characters? */
496
	/* Ignore characters? */
497
	tmp = readl(LPC32XX_HSUART_CTRL(port->membase));
497
	tmp = readl(LPC32XX_HSUART_CTRL(port->membase));
Lines 505-511 static void serial_lpc32xx_set_termios(struct uart_port *port, Link Here
505
505
506
	uart_update_timeout(port, termios->c_cflag, baud);
506
	uart_update_timeout(port, termios->c_cflag, baud);
507
507
508
	spin_unlock_irqrestore(&port->lock, flags);
508
	uart_port_unlock_irqrestore(port, flags);
509
509
510
	/* Don't rewrite B0 */
510
	/* Don't rewrite B0 */
511
	if (tty_termios_baud_rate(termios))
511
	if (tty_termios_baud_rate(termios))
(-)a/drivers/tty/serial/ma35d1_serial.c (-11 / +11 lines)
Lines 269-284 static void receive_chars(struct uart_ma35d1_port *up) Link Here
269
		if (uart_handle_sysrq_char(&up->port, ch))
269
		if (uart_handle_sysrq_char(&up->port, ch))
270
			continue;
270
			continue;
271
271
272
		spin_lock(&up->port.lock);
272
		uart_port_lock(&up->port);
273
		uart_insert_char(&up->port, fsr, MA35_FSR_RX_OVER_IF, ch, flag);
273
		uart_insert_char(&up->port, fsr, MA35_FSR_RX_OVER_IF, ch, flag);
274
		spin_unlock(&up->port.lock);
274
		uart_port_unlock(&up->port);
275
275
276
		fsr = serial_in(up, MA35_FSR_REG);
276
		fsr = serial_in(up, MA35_FSR_REG);
277
	} while (!(fsr & MA35_FSR_RX_EMPTY) && (max_count-- > 0));
277
	} while (!(fsr & MA35_FSR_RX_EMPTY) && (max_count-- > 0));
278
278
279
	spin_lock(&up->port.lock);
279
	uart_port_lock(&up->port);
280
	tty_flip_buffer_push(&up->port.state->port);
280
	tty_flip_buffer_push(&up->port.state->port);
281
	spin_unlock(&up->port.lock);
281
	uart_port_unlock(&up->port);
282
}
282
}
283
283
284
static irqreturn_t ma35d1serial_interrupt(int irq, void *dev_id)
284
static irqreturn_t ma35d1serial_interrupt(int irq, void *dev_id)
Lines 364-377 static void ma35d1serial_break_ctl(struct uart_port *port, int break_state) Link Here
364
	unsigned long flags;
364
	unsigned long flags;
365
	u32 lcr;
365
	u32 lcr;
366
366
367
	spin_lock_irqsave(&up->port.lock, flags);
367
	uart_port_lock_irqsave(&up->port, &flags);
368
	lcr = serial_in(up, MA35_LCR_REG);
368
	lcr = serial_in(up, MA35_LCR_REG);
369
	if (break_state != 0)
369
	if (break_state != 0)
370
		lcr |= MA35_LCR_BREAK;
370
		lcr |= MA35_LCR_BREAK;
371
	else
371
	else
372
		lcr &= ~MA35_LCR_BREAK;
372
		lcr &= ~MA35_LCR_BREAK;
373
	serial_out(up, MA35_LCR_REG, lcr);
373
	serial_out(up, MA35_LCR_REG, lcr);
374
	spin_unlock_irqrestore(&up->port.lock, flags);
374
	uart_port_unlock_irqrestore(&up->port, flags);
375
}
375
}
376
376
377
static int ma35d1serial_startup(struct uart_port *port)
377
static int ma35d1serial_startup(struct uart_port *port)
Lines 441-447 static void ma35d1serial_set_termios(struct uart_port *port, Link Here
441
	 * Ok, we're now changing the port state.  Do it with
441
	 * Ok, we're now changing the port state.  Do it with
442
	 * interrupts disabled.
442
	 * interrupts disabled.
443
	 */
443
	 */
444
	spin_lock_irqsave(&up->port.lock, flags);
444
	uart_port_lock_irqsave(&up->port, &flags);
445
445
446
	up->port.read_status_mask = MA35_FSR_RX_OVER_IF;
446
	up->port.read_status_mask = MA35_FSR_RX_OVER_IF;
447
	if (termios->c_iflag & INPCK)
447
	if (termios->c_iflag & INPCK)
Lines 475-481 static void ma35d1serial_set_termios(struct uart_port *port, Link Here
475
475
476
	serial_out(up, MA35_LCR_REG, lcr);
476
	serial_out(up, MA35_LCR_REG, lcr);
477
477
478
	spin_unlock_irqrestore(&up->port.lock, flags);
478
	uart_port_unlock_irqrestore(&up->port, flags);
479
}
479
}
480
480
481
static const char *ma35d1serial_type(struct uart_port *port)
481
static const char *ma35d1serial_type(struct uart_port *port)
Lines 560-568 static void ma35d1serial_console_write(struct console *co, const char *s, u32 co Link Here
560
	if (up->port.sysrq)
560
	if (up->port.sysrq)
561
		locked = 0;
561
		locked = 0;
562
	else if (oops_in_progress)
562
	else if (oops_in_progress)
563
		locked = spin_trylock_irqsave(&up->port.lock, flags);
563
		locked = uart_port_trylock_irqsave(&up->port, &flags);
564
	else
564
	else
565
		spin_lock_irqsave(&up->port.lock, flags);
565
		uart_port_lock_irqsave(&up->port, &flags);
566
566
567
	/*
567
	/*
568
	 *  First save the IER then disable the interrupts
568
	 *  First save the IER then disable the interrupts
Lines 576-582 static void ma35d1serial_console_write(struct console *co, const char *s, u32 co Link Here
576
	serial_out(up, MA35_IER_REG, ier);
576
	serial_out(up, MA35_IER_REG, ier);
577
577
578
	if (locked)
578
	if (locked)
579
		spin_unlock_irqrestore(&up->port.lock, flags);
579
		uart_port_unlock_irqrestore(&up->port, flags);
580
}
580
}
581
581
582
static int __init ma35d1serial_console_setup(struct console *co, char *options)
582
static int __init ma35d1serial_console_setup(struct console *co, char *options)
(-)a/drivers/tty/serial/mcf.c (-10 / +10 lines)
Lines 135-146 static void mcf_break_ctl(struct uart_port *port, int break_state) Link Here
135
{
135
{
136
	unsigned long flags;
136
	unsigned long flags;
137
137
138
	spin_lock_irqsave(&port->lock, flags);
138
	uart_port_lock_irqsave(port, &flags);
139
	if (break_state == -1)
139
	if (break_state == -1)
140
		writeb(MCFUART_UCR_CMDBREAKSTART, port->membase + MCFUART_UCR);
140
		writeb(MCFUART_UCR_CMDBREAKSTART, port->membase + MCFUART_UCR);
141
	else
141
	else
142
		writeb(MCFUART_UCR_CMDBREAKSTOP, port->membase + MCFUART_UCR);
142
		writeb(MCFUART_UCR_CMDBREAKSTOP, port->membase + MCFUART_UCR);
143
	spin_unlock_irqrestore(&port->lock, flags);
143
	uart_port_unlock_irqrestore(port, flags);
144
}
144
}
145
145
146
/****************************************************************************/
146
/****************************************************************************/
Lines 150-156 static int mcf_startup(struct uart_port *port) Link Here
150
	struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
150
	struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
151
	unsigned long flags;
151
	unsigned long flags;
152
152
153
	spin_lock_irqsave(&port->lock, flags);
153
	uart_port_lock_irqsave(port, &flags);
154
154
155
	/* Reset UART, get it into known state... */
155
	/* Reset UART, get it into known state... */
156
	writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
156
	writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
Lines 164-170 static int mcf_startup(struct uart_port *port) Link Here
164
	pp->imr = MCFUART_UIR_RXREADY;
164
	pp->imr = MCFUART_UIR_RXREADY;
165
	writeb(pp->imr, port->membase + MCFUART_UIMR);
165
	writeb(pp->imr, port->membase + MCFUART_UIMR);
166
166
167
	spin_unlock_irqrestore(&port->lock, flags);
167
	uart_port_unlock_irqrestore(port, flags);
168
168
169
	return 0;
169
	return 0;
170
}
170
}
Lines 176-182 static void mcf_shutdown(struct uart_port *port) Link Here
176
	struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
176
	struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
177
	unsigned long flags;
177
	unsigned long flags;
178
178
179
	spin_lock_irqsave(&port->lock, flags);
179
	uart_port_lock_irqsave(port, &flags);
180
180
181
	/* Disable all interrupts now */
181
	/* Disable all interrupts now */
182
	pp->imr = 0;
182
	pp->imr = 0;
Lines 186-192 static void mcf_shutdown(struct uart_port *port) Link Here
186
	writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
186
	writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
187
	writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
187
	writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
188
188
189
	spin_unlock_irqrestore(&port->lock, flags);
189
	uart_port_unlock_irqrestore(port, flags);
190
}
190
}
191
191
192
/****************************************************************************/
192
/****************************************************************************/
Lines 252-258 static void mcf_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
252
		mr2 |= MCFUART_MR2_TXCTS;
252
		mr2 |= MCFUART_MR2_TXCTS;
253
	}
253
	}
254
254
255
	spin_lock_irqsave(&port->lock, flags);
255
	uart_port_lock_irqsave(port, &flags);
256
	if (port->rs485.flags & SER_RS485_ENABLED) {
256
	if (port->rs485.flags & SER_RS485_ENABLED) {
257
		dev_dbg(port->dev, "Setting UART to RS485\n");
257
		dev_dbg(port->dev, "Setting UART to RS485\n");
258
		mr2 |= MCFUART_MR2_TXRTS;
258
		mr2 |= MCFUART_MR2_TXRTS;
Lines 273-279 static void mcf_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
273
		port->membase + MCFUART_UCSR);
273
		port->membase + MCFUART_UCSR);
274
	writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
274
	writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
275
		port->membase + MCFUART_UCR);
275
		port->membase + MCFUART_UCR);
276
	spin_unlock_irqrestore(&port->lock, flags);
276
	uart_port_unlock_irqrestore(port, flags);
277
}
277
}
278
278
279
/****************************************************************************/
279
/****************************************************************************/
Lines 350-356 static irqreturn_t mcf_interrupt(int irq, void *data) Link Here
350
350
351
	isr = readb(port->membase + MCFUART_UISR) & pp->imr;
351
	isr = readb(port->membase + MCFUART_UISR) & pp->imr;
352
352
353
	spin_lock(&port->lock);
353
	uart_port_lock(port);
354
	if (isr & MCFUART_UIR_RXREADY) {
354
	if (isr & MCFUART_UIR_RXREADY) {
355
		mcf_rx_chars(pp);
355
		mcf_rx_chars(pp);
356
		ret = IRQ_HANDLED;
356
		ret = IRQ_HANDLED;
Lines 359-365 static irqreturn_t mcf_interrupt(int irq, void *data) Link Here
359
		mcf_tx_chars(pp);
359
		mcf_tx_chars(pp);
360
		ret = IRQ_HANDLED;
360
		ret = IRQ_HANDLED;
361
	}
361
	}
362
	spin_unlock(&port->lock);
362
	uart_port_unlock(port);
363
363
364
	return ret;
364
	return ret;
365
}
365
}
(-)a/drivers/tty/serial/men_z135_uart.c (-4 / +4 lines)
Lines 392-398 static irqreturn_t men_z135_intr(int irq, void *data) Link Here
392
	if (!irq_id)
392
	if (!irq_id)
393
		goto out;
393
		goto out;
394
394
395
	spin_lock(&port->lock);
395
	uart_port_lock(port);
396
	/* It's save to write to IIR[7:6] RXC[9:8] */
396
	/* It's save to write to IIR[7:6] RXC[9:8] */
397
	iowrite8(irq_id, port->membase + MEN_Z135_STAT_REG);
397
	iowrite8(irq_id, port->membase + MEN_Z135_STAT_REG);
398
398
Lines 418-424 static irqreturn_t men_z135_intr(int irq, void *data) Link Here
418
		handled = true;
418
		handled = true;
419
	}
419
	}
420
420
421
	spin_unlock(&port->lock);
421
	uart_port_unlock(port);
422
out:
422
out:
423
	return IRQ_RETVAL(handled);
423
	return IRQ_RETVAL(handled);
424
}
424
}
Lines 708-714 static void men_z135_set_termios(struct uart_port *port, Link Here
708
708
709
	baud = uart_get_baud_rate(port, termios, old, 0, uart_freq / 16);
709
	baud = uart_get_baud_rate(port, termios, old, 0, uart_freq / 16);
710
710
711
	spin_lock_irq(&port->lock);
711
	uart_port_lock_irq(port);
712
	if (tty_termios_baud_rate(termios))
712
	if (tty_termios_baud_rate(termios))
713
		tty_termios_encode_baud_rate(termios, baud, baud);
713
		tty_termios_encode_baud_rate(termios, baud, baud);
714
714
Lines 716-722 static void men_z135_set_termios(struct uart_port *port, Link Here
716
	iowrite32(bd_reg, port->membase + MEN_Z135_BAUD_REG);
716
	iowrite32(bd_reg, port->membase + MEN_Z135_BAUD_REG);
717
717
718
	uart_update_timeout(port, termios->c_cflag, baud);
718
	uart_update_timeout(port, termios->c_cflag, baud);
719
	spin_unlock_irq(&port->lock);
719
	uart_port_unlock_irq(port);
720
}
720
}
721
721
722
static const char *men_z135_type(struct uart_port *port)
722
static const char *men_z135_type(struct uart_port *port)
(-)a/drivers/tty/serial/meson_uart.c (-15 / +15 lines)
Lines 129-142 static void meson_uart_shutdown(struct uart_port *port) Link Here
129
129
130
	free_irq(port->irq, port);
130
	free_irq(port->irq, port);
131
131
132
	spin_lock_irqsave(&port->lock, flags);
132
	uart_port_lock_irqsave(port, &flags);
133
133
134
	val = readl(port->membase + AML_UART_CONTROL);
134
	val = readl(port->membase + AML_UART_CONTROL);
135
	val &= ~AML_UART_RX_EN;
135
	val &= ~AML_UART_RX_EN;
136
	val &= ~(AML_UART_RX_INT_EN | AML_UART_TX_INT_EN);
136
	val &= ~(AML_UART_RX_INT_EN | AML_UART_TX_INT_EN);
137
	writel(val, port->membase + AML_UART_CONTROL);
137
	writel(val, port->membase + AML_UART_CONTROL);
138
138
139
	spin_unlock_irqrestore(&port->lock, flags);
139
	uart_port_unlock_irqrestore(port, flags);
140
}
140
}
141
141
142
static void meson_uart_start_tx(struct uart_port *port)
142
static void meson_uart_start_tx(struct uart_port *port)
Lines 238-244 static irqreturn_t meson_uart_interrupt(int irq, void *dev_id) Link Here
238
{
238
{
239
	struct uart_port *port = (struct uart_port *)dev_id;
239
	struct uart_port *port = (struct uart_port *)dev_id;
240
240
241
	spin_lock(&port->lock);
241
	uart_port_lock(port);
242
242
243
	if (!(readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY))
243
	if (!(readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY))
244
		meson_receive_chars(port);
244
		meson_receive_chars(port);
Lines 248-254 static irqreturn_t meson_uart_interrupt(int irq, void *dev_id) Link Here
248
			meson_uart_start_tx(port);
248
			meson_uart_start_tx(port);
249
	}
249
	}
250
250
251
	spin_unlock(&port->lock);
251
	uart_port_unlock(port);
252
252
253
	return IRQ_HANDLED;
253
	return IRQ_HANDLED;
254
}
254
}
Lines 284-290 static int meson_uart_startup(struct uart_port *port) Link Here
284
	u32 val;
284
	u32 val;
285
	int ret = 0;
285
	int ret = 0;
286
286
287
	spin_lock_irqsave(&port->lock, flags);
287
	uart_port_lock_irqsave(port, &flags);
288
288
289
	val = readl(port->membase + AML_UART_CONTROL);
289
	val = readl(port->membase + AML_UART_CONTROL);
290
	val |= AML_UART_CLEAR_ERR;
290
	val |= AML_UART_CLEAR_ERR;
Lines 301-307 static int meson_uart_startup(struct uart_port *port) Link Here
301
	val = (AML_UART_RECV_IRQ(1) | AML_UART_XMIT_IRQ(port->fifosize / 2));
301
	val = (AML_UART_RECV_IRQ(1) | AML_UART_XMIT_IRQ(port->fifosize / 2));
302
	writel(val, port->membase + AML_UART_MISC);
302
	writel(val, port->membase + AML_UART_MISC);
303
303
304
	spin_unlock_irqrestore(&port->lock, flags);
304
	uart_port_unlock_irqrestore(port, flags);
305
305
306
	ret = request_irq(port->irq, meson_uart_interrupt, 0,
306
	ret = request_irq(port->irq, meson_uart_interrupt, 0,
307
			  port->name, port);
307
			  port->name, port);
Lines 341-347 static void meson_uart_set_termios(struct uart_port *port, Link Here
341
	unsigned long flags;
341
	unsigned long flags;
342
	u32 val;
342
	u32 val;
343
343
344
	spin_lock_irqsave(&port->lock, flags);
344
	uart_port_lock_irqsave(port, &flags);
345
345
346
	cflags = termios->c_cflag;
346
	cflags = termios->c_cflag;
347
	iflags = termios->c_iflag;
347
	iflags = termios->c_iflag;
Lines 401-407 static void meson_uart_set_termios(struct uart_port *port, Link Here
401
					    AML_UART_FRAME_ERR;
401
					    AML_UART_FRAME_ERR;
402
402
403
	uart_update_timeout(port, termios->c_cflag, baud);
403
	uart_update_timeout(port, termios->c_cflag, baud);
404
	spin_unlock_irqrestore(&port->lock, flags);
404
	uart_port_unlock_irqrestore(port, flags);
405
}
405
}
406
406
407
static int meson_uart_verify_port(struct uart_port *port,
407
static int meson_uart_verify_port(struct uart_port *port,
Lines 460-473 static int meson_uart_poll_get_char(struct uart_port *port) Link Here
460
	u32 c;
460
	u32 c;
461
	unsigned long flags;
461
	unsigned long flags;
462
462
463
	spin_lock_irqsave(&port->lock, flags);
463
	uart_port_lock_irqsave(port, &flags);
464
464
465
	if (readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY)
465
	if (readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY)
466
		c = NO_POLL_CHAR;
466
		c = NO_POLL_CHAR;
467
	else
467
	else
468
		c = readl(port->membase + AML_UART_RFIFO);
468
		c = readl(port->membase + AML_UART_RFIFO);
469
469
470
	spin_unlock_irqrestore(&port->lock, flags);
470
	uart_port_unlock_irqrestore(port, flags);
471
471
472
	return c;
472
	return c;
473
}
473
}
Lines 478-484 static void meson_uart_poll_put_char(struct uart_port *port, unsigned char c) Link Here
478
	u32 reg;
478
	u32 reg;
479
	int ret;
479
	int ret;
480
480
481
	spin_lock_irqsave(&port->lock, flags);
481
	uart_port_lock_irqsave(port, &flags);
482
482
483
	/* Wait until FIFO is empty or timeout */
483
	/* Wait until FIFO is empty or timeout */
484
	ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg,
484
	ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg,
Lines 502-508 static void meson_uart_poll_put_char(struct uart_port *port, unsigned char c) Link Here
502
		dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n");
502
		dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n");
503
503
504
out:
504
out:
505
	spin_unlock_irqrestore(&port->lock, flags);
505
	uart_port_unlock_irqrestore(port, flags);
506
}
506
}
507
507
508
#endif /* CONFIG_CONSOLE_POLL */
508
#endif /* CONFIG_CONSOLE_POLL */
Lines 559-567 static void meson_serial_port_write(struct uart_port *port, const char *s, Link Here
559
	if (port->sysrq) {
559
	if (port->sysrq) {
560
		locked = 0;
560
		locked = 0;
561
	} else if (oops_in_progress) {
561
	} else if (oops_in_progress) {
562
		locked = spin_trylock(&port->lock);
562
		locked = uart_port_trylock(port);
563
	} else {
563
	} else {
564
		spin_lock(&port->lock);
564
		uart_port_lock(port);
565
		locked = 1;
565
		locked = 1;
566
	}
566
	}
567
567
Lines 573-579 static void meson_serial_port_write(struct uart_port *port, const char *s, Link Here
573
	writel(val, port->membase + AML_UART_CONTROL);
573
	writel(val, port->membase + AML_UART_CONTROL);
574
574
575
	if (locked)
575
	if (locked)
576
		spin_unlock(&port->lock);
576
		uart_port_unlock(port);
577
	local_irq_restore(flags);
577
	local_irq_restore(flags);
578
}
578
}
579
579
(-)a/drivers/tty/serial/milbeaut_usio.c (-8 / +8 lines)
Lines 207-215 static irqreturn_t mlb_usio_rx_irq(int irq, void *dev_id) Link Here
207
{
207
{
208
	struct uart_port *port = dev_id;
208
	struct uart_port *port = dev_id;
209
209
210
	spin_lock(&port->lock);
210
	uart_port_lock(port);
211
	mlb_usio_rx_chars(port);
211
	mlb_usio_rx_chars(port);
212
	spin_unlock(&port->lock);
212
	uart_port_unlock(port);
213
213
214
	return IRQ_HANDLED;
214
	return IRQ_HANDLED;
215
}
215
}
Lines 218-227 static irqreturn_t mlb_usio_tx_irq(int irq, void *dev_id) Link Here
218
{
218
{
219
	struct uart_port *port = dev_id;
219
	struct uart_port *port = dev_id;
220
220
221
	spin_lock(&port->lock);
221
	uart_port_lock(port);
222
	if (readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TBI)
222
	if (readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TBI)
223
		mlb_usio_tx_chars(port);
223
		mlb_usio_tx_chars(port);
224
	spin_unlock(&port->lock);
224
	uart_port_unlock(port);
225
225
226
	return IRQ_HANDLED;
226
	return IRQ_HANDLED;
227
}
227
}
Lines 267-273 static int mlb_usio_startup(struct uart_port *port) Link Here
267
	escr = readb(port->membase + MLB_USIO_REG_ESCR);
267
	escr = readb(port->membase + MLB_USIO_REG_ESCR);
268
	if (of_property_read_bool(port->dev->of_node, "auto-flow-control"))
268
	if (of_property_read_bool(port->dev->of_node, "auto-flow-control"))
269
		escr |= MLB_USIO_ESCR_FLWEN;
269
		escr |= MLB_USIO_ESCR_FLWEN;
270
	spin_lock_irqsave(&port->lock, flags);
270
	uart_port_lock_irqsave(port, &flags);
271
	writeb(0, port->membase + MLB_USIO_REG_SCR);
271
	writeb(0, port->membase + MLB_USIO_REG_SCR);
272
	writeb(escr, port->membase + MLB_USIO_REG_ESCR);
272
	writeb(escr, port->membase + MLB_USIO_REG_ESCR);
273
	writeb(MLB_USIO_SCR_UPCL, port->membase + MLB_USIO_REG_SCR);
273
	writeb(MLB_USIO_SCR_UPCL, port->membase + MLB_USIO_REG_SCR);
Lines 282-288 static int mlb_usio_startup(struct uart_port *port) Link Here
282
282
283
	writeb(MLB_USIO_SCR_TXE  | MLB_USIO_SCR_RIE | MLB_USIO_SCR_TBIE |
283
	writeb(MLB_USIO_SCR_TXE  | MLB_USIO_SCR_RIE | MLB_USIO_SCR_TBIE |
284
	       MLB_USIO_SCR_RXE, port->membase + MLB_USIO_REG_SCR);
284
	       MLB_USIO_SCR_RXE, port->membase + MLB_USIO_REG_SCR);
285
	spin_unlock_irqrestore(&port->lock, flags);
285
	uart_port_unlock_irqrestore(port, flags);
286
286
287
	return 0;
287
	return 0;
288
}
288
}
Lines 337-343 static void mlb_usio_set_termios(struct uart_port *port, Link Here
337
	else
337
	else
338
		quot = 0;
338
		quot = 0;
339
339
340
	spin_lock_irqsave(&port->lock, flags);
340
	uart_port_lock_irqsave(port, &flags);
341
	uart_update_timeout(port, termios->c_cflag, baud);
341
	uart_update_timeout(port, termios->c_cflag, baud);
342
	port->read_status_mask = MLB_USIO_SSR_ORE | MLB_USIO_SSR_RDRF |
342
	port->read_status_mask = MLB_USIO_SSR_ORE | MLB_USIO_SSR_RDRF |
343
				 MLB_USIO_SSR_TDRE;
343
				 MLB_USIO_SSR_TDRE;
Lines 367-373 static void mlb_usio_set_termios(struct uart_port *port, Link Here
367
	writew(BIT(12), port->membase + MLB_USIO_REG_FBYTE);
367
	writew(BIT(12), port->membase + MLB_USIO_REG_FBYTE);
368
	writeb(MLB_USIO_SCR_RIE | MLB_USIO_SCR_RXE | MLB_USIO_SCR_TBIE |
368
	writeb(MLB_USIO_SCR_RIE | MLB_USIO_SCR_RXE | MLB_USIO_SCR_TBIE |
369
	       MLB_USIO_SCR_TXE, port->membase + MLB_USIO_REG_SCR);
369
	       MLB_USIO_SCR_TXE, port->membase + MLB_USIO_REG_SCR);
370
	spin_unlock_irqrestore(&port->lock, flags);
370
	uart_port_unlock_irqrestore(port, flags);
371
}
371
}
372
372
373
static const char *mlb_usio_type(struct uart_port *port)
373
static const char *mlb_usio_type(struct uart_port *port)
(-)a/drivers/tty/serial/mpc52xx_uart.c (-6 / +6 lines)
Lines 1096-1109 static void Link Here
1096
mpc52xx_uart_break_ctl(struct uart_port *port, int ctl)
1096
mpc52xx_uart_break_ctl(struct uart_port *port, int ctl)
1097
{
1097
{
1098
	unsigned long flags;
1098
	unsigned long flags;
1099
	spin_lock_irqsave(&port->lock, flags);
1099
	uart_port_lock_irqsave(port, &flags);
1100
1100
1101
	if (ctl == -1)
1101
	if (ctl == -1)
1102
		psc_ops->command(port, MPC52xx_PSC_START_BRK);
1102
		psc_ops->command(port, MPC52xx_PSC_START_BRK);
1103
	else
1103
	else
1104
		psc_ops->command(port, MPC52xx_PSC_STOP_BRK);
1104
		psc_ops->command(port, MPC52xx_PSC_STOP_BRK);
1105
1105
1106
	spin_unlock_irqrestore(&port->lock, flags);
1106
	uart_port_unlock_irqrestore(port, flags);
1107
}
1107
}
1108
1108
1109
static int
1109
static int
Lines 1214-1220 mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new, Link Here
1214
	}
1214
	}
1215
1215
1216
	/* Get the lock */
1216
	/* Get the lock */
1217
	spin_lock_irqsave(&port->lock, flags);
1217
	uart_port_lock_irqsave(port, &flags);
1218
1218
1219
	/* Do our best to flush TX & RX, so we don't lose anything */
1219
	/* Do our best to flush TX & RX, so we don't lose anything */
1220
	/* But we don't wait indefinitely ! */
1220
	/* But we don't wait indefinitely ! */
Lines 1250-1256 mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new, Link Here
1250
	psc_ops->command(port, MPC52xx_PSC_RX_ENABLE);
1250
	psc_ops->command(port, MPC52xx_PSC_RX_ENABLE);
1251
1251
1252
	/* We're all set, release the lock */
1252
	/* We're all set, release the lock */
1253
	spin_unlock_irqrestore(&port->lock, flags);
1253
	uart_port_unlock_irqrestore(port, flags);
1254
}
1254
}
1255
1255
1256
static const char *
1256
static const char *
Lines 1477-1487 mpc52xx_uart_int(int irq, void *dev_id) Link Here
1477
	struct uart_port *port = dev_id;
1477
	struct uart_port *port = dev_id;
1478
	irqreturn_t ret;
1478
	irqreturn_t ret;
1479
1479
1480
	spin_lock(&port->lock);
1480
	uart_port_lock(port);
1481
1481
1482
	ret = psc_ops->handle_irq(port);
1482
	ret = psc_ops->handle_irq(port);
1483
1483
1484
	spin_unlock(&port->lock);
1484
	uart_port_unlock(port);
1485
1485
1486
	return ret;
1486
	return ret;
1487
}
1487
}
(-)a/drivers/tty/serial/mps2-uart.c (-8 / +8 lines)
Lines 188-199 static irqreturn_t mps2_uart_rxirq(int irq, void *data) Link Here
188
	if (unlikely(!(irqflag & UARTn_INT_RX)))
188
	if (unlikely(!(irqflag & UARTn_INT_RX)))
189
		return IRQ_NONE;
189
		return IRQ_NONE;
190
190
191
	spin_lock(&port->lock);
191
	uart_port_lock(port);
192
192
193
	mps2_uart_write8(port, UARTn_INT_RX, UARTn_INT);
193
	mps2_uart_write8(port, UARTn_INT_RX, UARTn_INT);
194
	mps2_uart_rx_chars(port);
194
	mps2_uart_rx_chars(port);
195
195
196
	spin_unlock(&port->lock);
196
	uart_port_unlock(port);
197
197
198
	return IRQ_HANDLED;
198
	return IRQ_HANDLED;
199
}
199
}
Lines 206-217 static irqreturn_t mps2_uart_txirq(int irq, void *data) Link Here
206
	if (unlikely(!(irqflag & UARTn_INT_TX)))
206
	if (unlikely(!(irqflag & UARTn_INT_TX)))
207
		return IRQ_NONE;
207
		return IRQ_NONE;
208
208
209
	spin_lock(&port->lock);
209
	uart_port_lock(port);
210
210
211
	mps2_uart_write8(port, UARTn_INT_TX, UARTn_INT);
211
	mps2_uart_write8(port, UARTn_INT_TX, UARTn_INT);
212
	mps2_uart_tx_chars(port);
212
	mps2_uart_tx_chars(port);
213
213
214
	spin_unlock(&port->lock);
214
	uart_port_unlock(port);
215
215
216
	return IRQ_HANDLED;
216
	return IRQ_HANDLED;
217
}
217
}
Lines 222-228 static irqreturn_t mps2_uart_oerrirq(int irq, void *data) Link Here
222
	struct uart_port *port = data;
222
	struct uart_port *port = data;
223
	u8 irqflag = mps2_uart_read8(port, UARTn_INT);
223
	u8 irqflag = mps2_uart_read8(port, UARTn_INT);
224
224
225
	spin_lock(&port->lock);
225
	uart_port_lock(port);
226
226
227
	if (irqflag & UARTn_INT_RX_OVERRUN) {
227
	if (irqflag & UARTn_INT_RX_OVERRUN) {
228
		struct tty_port *tport = &port->state->port;
228
		struct tty_port *tport = &port->state->port;
Lines 244-250 static irqreturn_t mps2_uart_oerrirq(int irq, void *data) Link Here
244
		handled = IRQ_HANDLED;
244
		handled = IRQ_HANDLED;
245
	}
245
	}
246
246
247
	spin_unlock(&port->lock);
247
	uart_port_unlock(port);
248
248
249
	return handled;
249
	return handled;
250
}
250
}
Lines 356-367 mps2_uart_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
356
356
357
	bauddiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
357
	bauddiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
358
358
359
	spin_lock_irqsave(&port->lock, flags);
359
	uart_port_lock_irqsave(port, &flags);
360
360
361
	uart_update_timeout(port, termios->c_cflag, baud);
361
	uart_update_timeout(port, termios->c_cflag, baud);
362
	mps2_uart_write32(port, bauddiv, UARTn_BAUDDIV);
362
	mps2_uart_write32(port, bauddiv, UARTn_BAUDDIV);
363
363
364
	spin_unlock_irqrestore(&port->lock, flags);
364
	uart_port_unlock_irqrestore(port, flags);
365
365
366
	if (tty_termios_baud_rate(termios))
366
	if (tty_termios_baud_rate(termios))
367
		tty_termios_encode_baud_rate(termios, baud, baud);
367
		tty_termios_encode_baud_rate(termios, baud, baud);
(-)a/drivers/tty/serial/msm_serial.c (-19 / +19 lines)
Lines 444-450 static void msm_complete_tx_dma(void *args) Link Here
444
	unsigned int count;
444
	unsigned int count;
445
	u32 val;
445
	u32 val;
446
446
447
	spin_lock_irqsave(&port->lock, flags);
447
	uart_port_lock_irqsave(port, &flags);
448
448
449
	/* Already stopped */
449
	/* Already stopped */
450
	if (!dma->count)
450
	if (!dma->count)
Lines 476-482 static void msm_complete_tx_dma(void *args) Link Here
476
476
477
	msm_handle_tx(port);
477
	msm_handle_tx(port);
478
done:
478
done:
479
	spin_unlock_irqrestore(&port->lock, flags);
479
	uart_port_unlock_irqrestore(port, flags);
480
}
480
}
481
481
482
static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
482
static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
Lines 549-555 static void msm_complete_rx_dma(void *args) Link Here
549
	unsigned long flags;
549
	unsigned long flags;
550
	u32 val;
550
	u32 val;
551
551
552
	spin_lock_irqsave(&port->lock, flags);
552
	uart_port_lock_irqsave(port, &flags);
553
553
554
	/* Already stopped */
554
	/* Already stopped */
555
	if (!dma->count)
555
	if (!dma->count)
Lines 587-602 static void msm_complete_rx_dma(void *args) Link Here
587
		if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
587
		if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
588
			flag = TTY_NORMAL;
588
			flag = TTY_NORMAL;
589
589
590
		spin_unlock_irqrestore(&port->lock, flags);
590
		uart_port_unlock_irqrestore(port, flags);
591
		sysrq = uart_handle_sysrq_char(port, dma->virt[i]);
591
		sysrq = uart_handle_sysrq_char(port, dma->virt[i]);
592
		spin_lock_irqsave(&port->lock, flags);
592
		uart_port_lock_irqsave(port, &flags);
593
		if (!sysrq)
593
		if (!sysrq)
594
			tty_insert_flip_char(tport, dma->virt[i], flag);
594
			tty_insert_flip_char(tport, dma->virt[i], flag);
595
	}
595
	}
596
596
597
	msm_start_rx_dma(msm_port);
597
	msm_start_rx_dma(msm_port);
598
done:
598
done:
599
	spin_unlock_irqrestore(&port->lock, flags);
599
	uart_port_unlock_irqrestore(port, flags);
600
600
601
	if (count)
601
	if (count)
602
		tty_flip_buffer_push(tport);
602
		tty_flip_buffer_push(tport);
Lines 762-770 static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr) Link Here
762
			if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
762
			if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
763
				flag = TTY_NORMAL;
763
				flag = TTY_NORMAL;
764
764
765
			spin_unlock(&port->lock);
765
			uart_port_unlock(port);
766
			sysrq = uart_handle_sysrq_char(port, buf[i]);
766
			sysrq = uart_handle_sysrq_char(port, buf[i]);
767
			spin_lock(&port->lock);
767
			uart_port_lock(port);
768
			if (!sysrq)
768
			if (!sysrq)
769
				tty_insert_flip_char(tport, buf[i], flag);
769
				tty_insert_flip_char(tport, buf[i], flag);
770
		}
770
		}
Lines 824-832 static void msm_handle_rx(struct uart_port *port) Link Here
824
		else if (sr & MSM_UART_SR_PAR_FRAME_ERR)
824
		else if (sr & MSM_UART_SR_PAR_FRAME_ERR)
825
			flag = TTY_FRAME;
825
			flag = TTY_FRAME;
826
826
827
		spin_unlock(&port->lock);
827
		uart_port_unlock(port);
828
		sysrq = uart_handle_sysrq_char(port, c);
828
		sysrq = uart_handle_sysrq_char(port, c);
829
		spin_lock(&port->lock);
829
		uart_port_lock(port);
830
		if (!sysrq)
830
		if (!sysrq)
831
			tty_insert_flip_char(tport, c, flag);
831
			tty_insert_flip_char(tport, c, flag);
832
	}
832
	}
Lines 951-957 static irqreturn_t msm_uart_irq(int irq, void *dev_id) Link Here
951
	unsigned int misr;
951
	unsigned int misr;
952
	u32 val;
952
	u32 val;
953
953
954
	spin_lock_irqsave(&port->lock, flags);
954
	uart_port_lock_irqsave(port, &flags);
955
	misr = msm_read(port, MSM_UART_MISR);
955
	misr = msm_read(port, MSM_UART_MISR);
956
	msm_write(port, 0, MSM_UART_IMR); /* disable interrupt */
956
	msm_write(port, 0, MSM_UART_IMR); /* disable interrupt */
957
957
Lines 983-989 static irqreturn_t msm_uart_irq(int irq, void *dev_id) Link Here
983
		msm_handle_delta_cts(port);
983
		msm_handle_delta_cts(port);
984
984
985
	msm_write(port, msm_port->imr, MSM_UART_IMR); /* restore interrupt */
985
	msm_write(port, msm_port->imr, MSM_UART_IMR); /* restore interrupt */
986
	spin_unlock_irqrestore(&port->lock, flags);
986
	uart_port_unlock_irqrestore(port, flags);
987
987
988
	return IRQ_HANDLED;
988
	return IRQ_HANDLED;
989
}
989
}
Lines 1128-1140 static int msm_set_baud_rate(struct uart_port *port, unsigned int baud, Link Here
1128
	unsigned long flags, rate;
1128
	unsigned long flags, rate;
1129
1129
1130
	flags = *saved_flags;
1130
	flags = *saved_flags;
1131
	spin_unlock_irqrestore(&port->lock, flags);
1131
	uart_port_unlock_irqrestore(port, flags);
1132
1132
1133
	entry = msm_find_best_baud(port, baud, &rate);
1133
	entry = msm_find_best_baud(port, baud, &rate);
1134
	clk_set_rate(msm_port->clk, rate);
1134
	clk_set_rate(msm_port->clk, rate);
1135
	baud = rate / 16 / entry->divisor;
1135
	baud = rate / 16 / entry->divisor;
1136
1136
1137
	spin_lock_irqsave(&port->lock, flags);
1137
	uart_port_lock_irqsave(port, &flags);
1138
	*saved_flags = flags;
1138
	*saved_flags = flags;
1139
	port->uartclk = rate;
1139
	port->uartclk = rate;
1140
1140
Lines 1266-1272 static void msm_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
1266
	unsigned long flags;
1266
	unsigned long flags;
1267
	unsigned int baud, mr;
1267
	unsigned int baud, mr;
1268
1268
1269
	spin_lock_irqsave(&port->lock, flags);
1269
	uart_port_lock_irqsave(port, &flags);
1270
1270
1271
	if (dma->chan) /* Terminate if any */
1271
	if (dma->chan) /* Terminate if any */
1272
		msm_stop_dma(port, dma);
1272
		msm_stop_dma(port, dma);
Lines 1338-1344 static void msm_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
1338
	/* Try to use DMA */
1338
	/* Try to use DMA */
1339
	msm_start_rx_dma(msm_port);
1339
	msm_start_rx_dma(msm_port);
1340
1340
1341
	spin_unlock_irqrestore(&port->lock, flags);
1341
	uart_port_unlock_irqrestore(port, flags);
1342
}
1342
}
1343
1343
1344
static const char *msm_type(struct uart_port *port)
1344
static const char *msm_type(struct uart_port *port)
Lines 1620-1628 static void __msm_console_write(struct uart_port *port, const char *s, Link Here
1620
	if (port->sysrq)
1620
	if (port->sysrq)
1621
		locked = 0;
1621
		locked = 0;
1622
	else if (oops_in_progress)
1622
	else if (oops_in_progress)
1623
		locked = spin_trylock(&port->lock);
1623
		locked = uart_port_trylock(port);
1624
	else
1624
	else
1625
		spin_lock(&port->lock);
1625
		uart_port_lock(port);
1626
1626
1627
	if (is_uartdm)
1627
	if (is_uartdm)
1628
		msm_reset_dm_count(port, count);
1628
		msm_reset_dm_count(port, count);
Lines 1661-1667 static void __msm_console_write(struct uart_port *port, const char *s, Link Here
1661
	}
1661
	}
1662
1662
1663
	if (locked)
1663
	if (locked)
1664
		spin_unlock(&port->lock);
1664
		uart_port_unlock(port);
1665
1665
1666
	local_irq_restore(flags);
1666
	local_irq_restore(flags);
1667
}
1667
}
(-)a/drivers/tty/serial/mvebu-uart.c (-9 / +9 lines)
Lines 187-195 static unsigned int mvebu_uart_tx_empty(struct uart_port *port) Link Here
187
	unsigned long flags;
187
	unsigned long flags;
188
	unsigned int st;
188
	unsigned int st;
189
189
190
	spin_lock_irqsave(&port->lock, flags);
190
	uart_port_lock_irqsave(port, &flags);
191
	st = readl(port->membase + UART_STAT);
191
	st = readl(port->membase + UART_STAT);
192
	spin_unlock_irqrestore(&port->lock, flags);
192
	uart_port_unlock_irqrestore(port, flags);
193
193
194
	return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
194
	return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
195
}
195
}
Lines 249-262 static void mvebu_uart_break_ctl(struct uart_port *port, int brk) Link Here
249
	unsigned int ctl;
249
	unsigned int ctl;
250
	unsigned long flags;
250
	unsigned long flags;
251
251
252
	spin_lock_irqsave(&port->lock, flags);
252
	uart_port_lock_irqsave(port, &flags);
253
	ctl = readl(port->membase + UART_CTRL(port));
253
	ctl = readl(port->membase + UART_CTRL(port));
254
	if (brk == -1)
254
	if (brk == -1)
255
		ctl |= CTRL_SND_BRK_SEQ;
255
		ctl |= CTRL_SND_BRK_SEQ;
256
	else
256
	else
257
		ctl &= ~CTRL_SND_BRK_SEQ;
257
		ctl &= ~CTRL_SND_BRK_SEQ;
258
	writel(ctl, port->membase + UART_CTRL(port));
258
	writel(ctl, port->membase + UART_CTRL(port));
259
	spin_unlock_irqrestore(&port->lock, flags);
259
	uart_port_unlock_irqrestore(port, flags);
260
}
260
}
261
261
262
static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
262
static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
Lines 540-546 static void mvebu_uart_set_termios(struct uart_port *port, Link Here
540
	unsigned long flags;
540
	unsigned long flags;
541
	unsigned int baud, min_baud, max_baud;
541
	unsigned int baud, min_baud, max_baud;
542
542
543
	spin_lock_irqsave(&port->lock, flags);
543
	uart_port_lock_irqsave(port, &flags);
544
544
545
	port->read_status_mask = STAT_RX_RDY(port) | STAT_OVR_ERR |
545
	port->read_status_mask = STAT_RX_RDY(port) | STAT_OVR_ERR |
546
		STAT_TX_RDY(port) | STAT_TX_FIFO_FUL;
546
		STAT_TX_RDY(port) | STAT_TX_FIFO_FUL;
Lines 589-595 static void mvebu_uart_set_termios(struct uart_port *port, Link Here
589
		uart_update_timeout(port, termios->c_cflag, baud);
589
		uart_update_timeout(port, termios->c_cflag, baud);
590
	}
590
	}
591
591
592
	spin_unlock_irqrestore(&port->lock, flags);
592
	uart_port_unlock_irqrestore(port, flags);
593
}
593
}
594
594
595
static const char *mvebu_uart_type(struct uart_port *port)
595
static const char *mvebu_uart_type(struct uart_port *port)
Lines 735-743 static void mvebu_uart_console_write(struct console *co, const char *s, Link Here
735
	int locked = 1;
735
	int locked = 1;
736
736
737
	if (oops_in_progress)
737
	if (oops_in_progress)
738
		locked = spin_trylock_irqsave(&port->lock, flags);
738
		locked = uart_port_trylock_irqsave(port, &flags);
739
	else
739
	else
740
		spin_lock_irqsave(&port->lock, flags);
740
		uart_port_lock_irqsave(port, &flags);
741
741
742
	ier = readl(port->membase + UART_CTRL(port)) & CTRL_BRK_INT;
742
	ier = readl(port->membase + UART_CTRL(port)) & CTRL_BRK_INT;
743
	intr = readl(port->membase + UART_INTR(port)) &
743
	intr = readl(port->membase + UART_INTR(port)) &
Lines 758-764 static void mvebu_uart_console_write(struct console *co, const char *s, Link Here
758
	}
758
	}
759
759
760
	if (locked)
760
	if (locked)
761
		spin_unlock_irqrestore(&port->lock, flags);
761
		uart_port_unlock_irqrestore(port, flags);
762
}
762
}
763
763
764
static int mvebu_uart_console_setup(struct console *co, char *options)
764
static int mvebu_uart_console_setup(struct console *co, char *options)
(-)a/drivers/tty/serial/omap-serial.c (-24 / +20 lines)
Lines 390-399 static void serial_omap_throttle(struct uart_port *port) Link Here
390
	struct uart_omap_port *up = to_uart_omap_port(port);
390
	struct uart_omap_port *up = to_uart_omap_port(port);
391
	unsigned long flags;
391
	unsigned long flags;
392
392
393
	spin_lock_irqsave(&up->port.lock, flags);
393
	uart_port_lock_irqsave(&up->port, &flags);
394
	up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
394
	up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
395
	serial_out(up, UART_IER, up->ier);
395
	serial_out(up, UART_IER, up->ier);
396
	spin_unlock_irqrestore(&up->port.lock, flags);
396
	uart_port_unlock_irqrestore(&up->port, flags);
397
}
397
}
398
398
399
static void serial_omap_unthrottle(struct uart_port *port)
399
static void serial_omap_unthrottle(struct uart_port *port)
Lines 401-410 static void serial_omap_unthrottle(struct uart_port *port) Link Here
401
	struct uart_omap_port *up = to_uart_omap_port(port);
401
	struct uart_omap_port *up = to_uart_omap_port(port);
402
	unsigned long flags;
402
	unsigned long flags;
403
403
404
	spin_lock_irqsave(&up->port.lock, flags);
404
	uart_port_lock_irqsave(&up->port, &flags);
405
	up->ier |= UART_IER_RLSI | UART_IER_RDI;
405
	up->ier |= UART_IER_RLSI | UART_IER_RDI;
406
	serial_out(up, UART_IER, up->ier);
406
	serial_out(up, UART_IER, up->ier);
407
	spin_unlock_irqrestore(&up->port.lock, flags);
407
	uart_port_unlock_irqrestore(&up->port, flags);
408
}
408
}
409
409
410
static unsigned int check_modem_status(struct uart_omap_port *up)
410
static unsigned int check_modem_status(struct uart_omap_port *up)
Lines 527-533 static irqreturn_t serial_omap_irq(int irq, void *dev_id) Link Here
527
	irqreturn_t ret = IRQ_NONE;
527
	irqreturn_t ret = IRQ_NONE;
528
	int max_count = 256;
528
	int max_count = 256;
529
529
530
	spin_lock(&up->port.lock);
530
	uart_port_lock(&up->port);
531
531
532
	do {
532
	do {
533
		iir = serial_in(up, UART_IIR);
533
		iir = serial_in(up, UART_IIR);
Lines 563-569 static irqreturn_t serial_omap_irq(int irq, void *dev_id) Link Here
563
		}
563
		}
564
	} while (max_count--);
564
	} while (max_count--);
565
565
566
	spin_unlock(&up->port.lock);
566
	uart_port_unlock(&up->port);
567
567
568
	tty_flip_buffer_push(&up->port.state->port);
568
	tty_flip_buffer_push(&up->port.state->port);
569
569
Lines 579-587 static unsigned int serial_omap_tx_empty(struct uart_port *port) Link Here
579
	unsigned int ret = 0;
579
	unsigned int ret = 0;
580
580
581
	dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->port.line);
581
	dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->port.line);
582
	spin_lock_irqsave(&up->port.lock, flags);
582
	uart_port_lock_irqsave(&up->port, &flags);
583
	ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
583
	ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
584
	spin_unlock_irqrestore(&up->port.lock, flags);
584
	uart_port_unlock_irqrestore(&up->port, flags);
585
585
586
	return ret;
586
	return ret;
587
}
587
}
Lines 647-659 static void serial_omap_break_ctl(struct uart_port *port, int break_state) Link Here
647
	unsigned long flags;
647
	unsigned long flags;
648
648
649
	dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line);
649
	dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line);
650
	spin_lock_irqsave(&up->port.lock, flags);
650
	uart_port_lock_irqsave(&up->port, &flags);
651
	if (break_state == -1)
651
	if (break_state == -1)
652
		up->lcr |= UART_LCR_SBC;
652
		up->lcr |= UART_LCR_SBC;
653
	else
653
	else
654
		up->lcr &= ~UART_LCR_SBC;
654
		up->lcr &= ~UART_LCR_SBC;
655
	serial_out(up, UART_LCR, up->lcr);
655
	serial_out(up, UART_LCR, up->lcr);
656
	spin_unlock_irqrestore(&up->port.lock, flags);
656
	uart_port_unlock_irqrestore(&up->port, flags);
657
}
657
}
658
658
659
static int serial_omap_startup(struct uart_port *port)
659
static int serial_omap_startup(struct uart_port *port)
Lines 701-713 static int serial_omap_startup(struct uart_port *port) Link Here
701
	 * Now, initialize the UART
701
	 * Now, initialize the UART
702
	 */
702
	 */
703
	serial_out(up, UART_LCR, UART_LCR_WLEN8);
703
	serial_out(up, UART_LCR, UART_LCR_WLEN8);
704
	spin_lock_irqsave(&up->port.lock, flags);
704
	uart_port_lock_irqsave(&up->port, &flags);
705
	/*
705
	/*
706
	 * Most PC uarts need OUT2 raised to enable interrupts.
706
	 * Most PC uarts need OUT2 raised to enable interrupts.
707
	 */
707
	 */
708
	up->port.mctrl |= TIOCM_OUT2;
708
	up->port.mctrl |= TIOCM_OUT2;
709
	serial_omap_set_mctrl(&up->port, up->port.mctrl);
709
	serial_omap_set_mctrl(&up->port, up->port.mctrl);
710
	spin_unlock_irqrestore(&up->port.lock, flags);
710
	uart_port_unlock_irqrestore(&up->port, flags);
711
711
712
	up->msr_saved_flags = 0;
712
	up->msr_saved_flags = 0;
713
	/*
713
	/*
Lines 742-751 static void serial_omap_shutdown(struct uart_port *port) Link Here
742
	up->ier = 0;
742
	up->ier = 0;
743
	serial_out(up, UART_IER, 0);
743
	serial_out(up, UART_IER, 0);
744
744
745
	spin_lock_irqsave(&up->port.lock, flags);
745
	uart_port_lock_irqsave(&up->port, &flags);
746
	up->port.mctrl &= ~TIOCM_OUT2;
746
	up->port.mctrl &= ~TIOCM_OUT2;
747
	serial_omap_set_mctrl(&up->port, up->port.mctrl);
747
	serial_omap_set_mctrl(&up->port, up->port.mctrl);
748
	spin_unlock_irqrestore(&up->port.lock, flags);
748
	uart_port_unlock_irqrestore(&up->port, flags);
749
749
750
	/*
750
	/*
751
	 * Disable break condition and FIFOs
751
	 * Disable break condition and FIFOs
Lines 815-821 serial_omap_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
815
	 * Ok, we're now changing the port state. Do it with
815
	 * Ok, we're now changing the port state. Do it with
816
	 * interrupts disabled.
816
	 * interrupts disabled.
817
	 */
817
	 */
818
	spin_lock_irqsave(&up->port.lock, flags);
818
	uart_port_lock_irqsave(&up->port, &flags);
819
819
820
	/*
820
	/*
821
	 * Update the per-port timeout.
821
	 * Update the per-port timeout.
Lines 1013-1019 serial_omap_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
1013
1013
1014
	serial_omap_set_mctrl(&up->port, up->port.mctrl);
1014
	serial_omap_set_mctrl(&up->port, up->port.mctrl);
1015
1015
1016
	spin_unlock_irqrestore(&up->port.lock, flags);
1016
	uart_port_unlock_irqrestore(&up->port, flags);
1017
	dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line);
1017
	dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line);
1018
}
1018
}
1019
1019
Lines 1212-1224 serial_omap_console_write(struct console *co, const char *s, Link Here
1212
	unsigned int ier;
1212
	unsigned int ier;
1213
	int locked = 1;
1213
	int locked = 1;
1214
1214
1215
	local_irq_save(flags);
1215
	if (up->port.sysrq || oops_in_progress)
1216
	if (up->port.sysrq)
1216
		locked = uart_port_trylock_irqsave(&up->port, &flags);
1217
		locked = 0;
1218
	else if (oops_in_progress)
1219
		locked = spin_trylock(&up->port.lock);
1220
	else
1217
	else
1221
		spin_lock(&up->port.lock);
1218
		uart_port_lock_irqsave(&up->port, &flags);
1222
1219
1223
	/*
1220
	/*
1224
	 * First save the IER then disable the interrupts
1221
	 * First save the IER then disable the interrupts
Lines 1245-1252 serial_omap_console_write(struct console *co, const char *s, Link Here
1245
		check_modem_status(up);
1242
		check_modem_status(up);
1246
1243
1247
	if (locked)
1244
	if (locked)
1248
		spin_unlock(&up->port.lock);
1245
		uart_port_unlock_irqrestore(&up->port, flags);
1249
	local_irq_restore(flags);
1250
}
1246
}
1251
1247
1252
static int __init
1248
static int __init
(-)a/drivers/tty/serial/owl-uart.c (-13 / +13 lines)
Lines 125-136 static unsigned int owl_uart_tx_empty(struct uart_port *port) Link Here
125
	u32 val;
125
	u32 val;
126
	unsigned int ret;
126
	unsigned int ret;
127
127
128
	spin_lock_irqsave(&port->lock, flags);
128
	uart_port_lock_irqsave(port, &flags);
129
129
130
	val = owl_uart_read(port, OWL_UART_STAT);
130
	val = owl_uart_read(port, OWL_UART_STAT);
131
	ret = (val & OWL_UART_STAT_TFES) ? TIOCSER_TEMT : 0;
131
	ret = (val & OWL_UART_STAT_TFES) ? TIOCSER_TEMT : 0;
132
132
133
	spin_unlock_irqrestore(&port->lock, flags);
133
	uart_port_unlock_irqrestore(port, flags);
134
134
135
	return ret;
135
	return ret;
136
}
136
}
Lines 232-238 static irqreturn_t owl_uart_irq(int irq, void *dev_id) Link Here
232
	unsigned long flags;
232
	unsigned long flags;
233
	u32 stat;
233
	u32 stat;
234
234
235
	spin_lock_irqsave(&port->lock, flags);
235
	uart_port_lock_irqsave(port, &flags);
236
236
237
	stat = owl_uart_read(port, OWL_UART_STAT);
237
	stat = owl_uart_read(port, OWL_UART_STAT);
238
238
Lines 246-252 static irqreturn_t owl_uart_irq(int irq, void *dev_id) Link Here
246
	stat |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP;
246
	stat |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP;
247
	owl_uart_write(port, stat, OWL_UART_STAT);
247
	owl_uart_write(port, stat, OWL_UART_STAT);
248
248
249
	spin_unlock_irqrestore(&port->lock, flags);
249
	uart_port_unlock_irqrestore(port, flags);
250
250
251
	return IRQ_HANDLED;
251
	return IRQ_HANDLED;
252
}
252
}
Lines 256-269 static void owl_uart_shutdown(struct uart_port *port) Link Here
256
	u32 val;
256
	u32 val;
257
	unsigned long flags;
257
	unsigned long flags;
258
258
259
	spin_lock_irqsave(&port->lock, flags);
259
	uart_port_lock_irqsave(port, &flags);
260
260
261
	val = owl_uart_read(port, OWL_UART_CTL);
261
	val = owl_uart_read(port, OWL_UART_CTL);
262
	val &= ~(OWL_UART_CTL_TXIE | OWL_UART_CTL_RXIE
262
	val &= ~(OWL_UART_CTL_TXIE | OWL_UART_CTL_RXIE
263
		| OWL_UART_CTL_TXDE | OWL_UART_CTL_RXDE | OWL_UART_CTL_EN);
263
		| OWL_UART_CTL_TXDE | OWL_UART_CTL_RXDE | OWL_UART_CTL_EN);
264
	owl_uart_write(port, val, OWL_UART_CTL);
264
	owl_uart_write(port, val, OWL_UART_CTL);
265
265
266
	spin_unlock_irqrestore(&port->lock, flags);
266
	uart_port_unlock_irqrestore(port, flags);
267
267
268
	free_irq(port->irq, port);
268
	free_irq(port->irq, port);
269
}
269
}
Lines 279-285 static int owl_uart_startup(struct uart_port *port) Link Here
279
	if (ret)
279
	if (ret)
280
		return ret;
280
		return ret;
281
281
282
	spin_lock_irqsave(&port->lock, flags);
282
	uart_port_lock_irqsave(port, &flags);
283
283
284
	val = owl_uart_read(port, OWL_UART_STAT);
284
	val = owl_uart_read(port, OWL_UART_STAT);
285
	val |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP
285
	val |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP
Lines 291-297 static int owl_uart_startup(struct uart_port *port) Link Here
291
	val |= OWL_UART_CTL_EN;
291
	val |= OWL_UART_CTL_EN;
292
	owl_uart_write(port, val, OWL_UART_CTL);
292
	owl_uart_write(port, val, OWL_UART_CTL);
293
293
294
	spin_unlock_irqrestore(&port->lock, flags);
294
	uart_port_unlock_irqrestore(port, flags);
295
295
296
	return 0;
296
	return 0;
297
}
297
}
Lines 311-317 static void owl_uart_set_termios(struct uart_port *port, Link Here
311
	u32 ctl;
311
	u32 ctl;
312
	unsigned long flags;
312
	unsigned long flags;
313
313
314
	spin_lock_irqsave(&port->lock, flags);
314
	uart_port_lock_irqsave(port, &flags);
315
315
316
	ctl = owl_uart_read(port, OWL_UART_CTL);
316
	ctl = owl_uart_read(port, OWL_UART_CTL);
317
317
Lines 371-377 static void owl_uart_set_termios(struct uart_port *port, Link Here
371
371
372
	uart_update_timeout(port, termios->c_cflag, baud);
372
	uart_update_timeout(port, termios->c_cflag, baud);
373
373
374
	spin_unlock_irqrestore(&port->lock, flags);
374
	uart_port_unlock_irqrestore(port, flags);
375
}
375
}
376
376
377
static void owl_uart_release_port(struct uart_port *port)
377
static void owl_uart_release_port(struct uart_port *port)
Lines 515-523 static void owl_uart_port_write(struct uart_port *port, const char *s, Link Here
515
	if (port->sysrq)
515
	if (port->sysrq)
516
		locked = 0;
516
		locked = 0;
517
	else if (oops_in_progress)
517
	else if (oops_in_progress)
518
		locked = spin_trylock(&port->lock);
518
		locked = uart_port_trylock(port);
519
	else {
519
	else {
520
		spin_lock(&port->lock);
520
		uart_port_lock(port);
521
		locked = 1;
521
		locked = 1;
522
	}
522
	}
523
523
Lines 541-547 static void owl_uart_port_write(struct uart_port *port, const char *s, Link Here
541
	owl_uart_write(port, old_ctl, OWL_UART_CTL);
541
	owl_uart_write(port, old_ctl, OWL_UART_CTL);
542
542
543
	if (locked)
543
	if (locked)
544
		spin_unlock(&port->lock);
544
		uart_port_unlock(port);
545
545
546
	local_irq_restore(flags);
546
	local_irq_restore(flags);
547
}
547
}
(-)a/drivers/tty/serial/pch_uart.c (-5 / +5 lines)
Lines 1347-1353 static void pch_uart_set_termios(struct uart_port *port, Link Here
1347
	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
1347
	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
1348
1348
1349
	spin_lock_irqsave(&priv->lock, flags);
1349
	spin_lock_irqsave(&priv->lock, flags);
1350
	spin_lock(&port->lock);
1350
	uart_port_lock(port);
1351
1351
1352
	uart_update_timeout(port, termios->c_cflag, baud);
1352
	uart_update_timeout(port, termios->c_cflag, baud);
1353
	rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb);
1353
	rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb);
Lines 1360-1366 static void pch_uart_set_termios(struct uart_port *port, Link Here
1360
		tty_termios_encode_baud_rate(termios, baud, baud);
1360
		tty_termios_encode_baud_rate(termios, baud, baud);
1361
1361
1362
out:
1362
out:
1363
	spin_unlock(&port->lock);
1363
	uart_port_unlock(port);
1364
	spin_unlock_irqrestore(&priv->lock, flags);
1364
	spin_unlock_irqrestore(&priv->lock, flags);
1365
}
1365
}
1366
1366
Lines 1581-1590 pch_console_write(struct console *co, const char *s, unsigned int count) Link Here
1581
		port_locked = 0;
1581
		port_locked = 0;
1582
	} else if (oops_in_progress) {
1582
	} else if (oops_in_progress) {
1583
		priv_locked = spin_trylock(&priv->lock);
1583
		priv_locked = spin_trylock(&priv->lock);
1584
		port_locked = spin_trylock(&priv->port.lock);
1584
		port_locked = uart_port_trylock(&priv->port);
1585
	} else {
1585
	} else {
1586
		spin_lock(&priv->lock);
1586
		spin_lock(&priv->lock);
1587
		spin_lock(&priv->port.lock);
1587
		uart_port_lock(&priv->port);
1588
	}
1588
	}
1589
1589
1590
	/*
1590
	/*
Lines 1604-1610 pch_console_write(struct console *co, const char *s, unsigned int count) Link Here
1604
	iowrite8(ier, priv->membase + UART_IER);
1604
	iowrite8(ier, priv->membase + UART_IER);
1605
1605
1606
	if (port_locked)
1606
	if (port_locked)
1607
		spin_unlock(&priv->port.lock);
1607
		uart_port_unlock(&priv->port);
1608
	if (priv_locked)
1608
	if (priv_locked)
1609
		spin_unlock(&priv->lock);
1609
		spin_unlock(&priv->lock);
1610
	local_irq_restore(flags);
1610
	local_irq_restore(flags);
(-)a/drivers/tty/serial/pic32_uart.c (-10 / +10 lines)
Lines 243-249 static void pic32_uart_break_ctl(struct uart_port *port, int ctl) Link Here
243
	struct pic32_sport *sport = to_pic32_sport(port);
243
	struct pic32_sport *sport = to_pic32_sport(port);
244
	unsigned long flags;
244
	unsigned long flags;
245
245
246
	spin_lock_irqsave(&port->lock, flags);
246
	uart_port_lock_irqsave(port, &flags);
247
247
248
	if (ctl)
248
	if (ctl)
249
		pic32_uart_writel(sport, PIC32_SET(PIC32_UART_STA),
249
		pic32_uart_writel(sport, PIC32_SET(PIC32_UART_STA),
Lines 252-258 static void pic32_uart_break_ctl(struct uart_port *port, int ctl) Link Here
252
		pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
252
		pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
253
					PIC32_UART_STA_UTXBRK);
253
					PIC32_UART_STA_UTXBRK);
254
254
255
	spin_unlock_irqrestore(&port->lock, flags);
255
	uart_port_unlock_irqrestore(port, flags);
256
}
256
}
257
257
258
/* get port type in string format */
258
/* get port type in string format */
Lines 274-280 static void pic32_uart_do_rx(struct uart_port *port) Link Here
274
	 */
274
	 */
275
	max_count = PIC32_UART_RX_FIFO_DEPTH;
275
	max_count = PIC32_UART_RX_FIFO_DEPTH;
276
276
277
	spin_lock(&port->lock);
277
	uart_port_lock(port);
278
278
279
	tty = &port->state->port;
279
	tty = &port->state->port;
280
280
Lines 331-337 static void pic32_uart_do_rx(struct uart_port *port) Link Here
331
331
332
	} while (--max_count);
332
	} while (--max_count);
333
333
334
	spin_unlock(&port->lock);
334
	uart_port_unlock(port);
335
335
336
	tty_flip_buffer_push(tty);
336
	tty_flip_buffer_push(tty);
337
}
337
}
Lines 410-418 static irqreturn_t pic32_uart_tx_interrupt(int irq, void *dev_id) Link Here
410
	struct uart_port *port = dev_id;
410
	struct uart_port *port = dev_id;
411
	unsigned long flags;
411
	unsigned long flags;
412
412
413
	spin_lock_irqsave(&port->lock, flags);
413
	uart_port_lock_irqsave(port, &flags);
414
	pic32_uart_do_tx(port);
414
	pic32_uart_do_tx(port);
415
	spin_unlock_irqrestore(&port->lock, flags);
415
	uart_port_unlock_irqrestore(port, flags);
416
416
417
	return IRQ_HANDLED;
417
	return IRQ_HANDLED;
418
}
418
}
Lines 580-588 static void pic32_uart_shutdown(struct uart_port *port) Link Here
580
	unsigned long flags;
580
	unsigned long flags;
581
581
582
	/* disable uart */
582
	/* disable uart */
583
	spin_lock_irqsave(&port->lock, flags);
583
	uart_port_lock_irqsave(port, &flags);
584
	pic32_uart_dsbl_and_mask(port);
584
	pic32_uart_dsbl_and_mask(port);
585
	spin_unlock_irqrestore(&port->lock, flags);
585
	uart_port_unlock_irqrestore(port, flags);
586
	clk_disable_unprepare(sport->clk);
586
	clk_disable_unprepare(sport->clk);
587
587
588
	/* free all 3 interrupts for this UART */
588
	/* free all 3 interrupts for this UART */
Lines 604-610 static void pic32_uart_set_termios(struct uart_port *port, Link Here
604
	unsigned int quot;
604
	unsigned int quot;
605
	unsigned long flags;
605
	unsigned long flags;
606
606
607
	spin_lock_irqsave(&port->lock, flags);
607
	uart_port_lock_irqsave(port, &flags);
608
608
609
	/* disable uart and mask all interrupts while changing speed */
609
	/* disable uart and mask all interrupts while changing speed */
610
	pic32_uart_dsbl_and_mask(port);
610
	pic32_uart_dsbl_and_mask(port);
Lines 672-678 static void pic32_uart_set_termios(struct uart_port *port, Link Here
672
	/* enable uart */
672
	/* enable uart */
673
	pic32_uart_en_and_unmask(port);
673
	pic32_uart_en_and_unmask(port);
674
674
675
	spin_unlock_irqrestore(&port->lock, flags);
675
	uart_port_unlock_irqrestore(port, flags);
676
}
676
}
677
677
678
/* serial core request to claim uart iomem */
678
/* serial core request to claim uart iomem */
(-)a/drivers/tty/serial/pmac_zilog.c (-26 / +26 lines)
Lines 246-254 static bool pmz_receive_chars(struct uart_pmac_port *uap) Link Here
246
#endif /* USE_CTRL_O_SYSRQ */
246
#endif /* USE_CTRL_O_SYSRQ */
247
		if (uap->port.sysrq) {
247
		if (uap->port.sysrq) {
248
			int swallow;
248
			int swallow;
249
			spin_unlock(&uap->port.lock);
249
			uart_port_unlock(&uap->port);
250
			swallow = uart_handle_sysrq_char(&uap->port, ch);
250
			swallow = uart_handle_sysrq_char(&uap->port, ch);
251
			spin_lock(&uap->port.lock);
251
			uart_port_lock(&uap->port);
252
			if (swallow)
252
			if (swallow)
253
				goto next_char;
253
				goto next_char;
254
		}
254
		}
Lines 435-441 static irqreturn_t pmz_interrupt(int irq, void *dev_id) Link Here
435
	uap_a = pmz_get_port_A(uap);
435
	uap_a = pmz_get_port_A(uap);
436
	uap_b = uap_a->mate;
436
	uap_b = uap_a->mate;
437
437
438
	spin_lock(&uap_a->port.lock);
438
	uart_port_lock(&uap_a->port);
439
	r3 = read_zsreg(uap_a, R3);
439
	r3 = read_zsreg(uap_a, R3);
440
440
441
	/* Channel A */
441
	/* Channel A */
Lines 456-469 static irqreturn_t pmz_interrupt(int irq, void *dev_id) Link Here
456
		rc = IRQ_HANDLED;
456
		rc = IRQ_HANDLED;
457
	}
457
	}
458
 skip_a:
458
 skip_a:
459
	spin_unlock(&uap_a->port.lock);
459
	uart_port_unlock(&uap_a->port);
460
	if (push)
460
	if (push)
461
		tty_flip_buffer_push(&uap->port.state->port);
461
		tty_flip_buffer_push(&uap->port.state->port);
462
462
463
	if (!uap_b)
463
	if (!uap_b)
464
		goto out;
464
		goto out;
465
465
466
	spin_lock(&uap_b->port.lock);
466
	uart_port_lock(&uap_b->port);
467
	push = false;
467
	push = false;
468
	if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
468
	if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
469
		if (!ZS_IS_OPEN(uap_b)) {
469
		if (!ZS_IS_OPEN(uap_b)) {
Lines 481-487 static irqreturn_t pmz_interrupt(int irq, void *dev_id) Link Here
481
		rc = IRQ_HANDLED;
481
		rc = IRQ_HANDLED;
482
	}
482
	}
483
 skip_b:
483
 skip_b:
484
	spin_unlock(&uap_b->port.lock);
484
	uart_port_unlock(&uap_b->port);
485
	if (push)
485
	if (push)
486
		tty_flip_buffer_push(&uap->port.state->port);
486
		tty_flip_buffer_push(&uap->port.state->port);
487
487
Lines 497-505 static inline u8 pmz_peek_status(struct uart_pmac_port *uap) Link Here
497
	unsigned long flags;
497
	unsigned long flags;
498
	u8 status;
498
	u8 status;
499
	
499
	
500
	spin_lock_irqsave(&uap->port.lock, flags);
500
	uart_port_lock_irqsave(&uap->port, &flags);
501
	status = read_zsreg(uap, R0);
501
	status = read_zsreg(uap, R0);
502
	spin_unlock_irqrestore(&uap->port.lock, flags);
502
	uart_port_unlock_irqrestore(&uap->port, flags);
503
503
504
	return status;
504
	return status;
505
}
505
}
Lines 685-691 static void pmz_break_ctl(struct uart_port *port, int break_state) Link Here
685
	else
685
	else
686
		clear_bits |= SND_BRK;
686
		clear_bits |= SND_BRK;
687
687
688
	spin_lock_irqsave(&port->lock, flags);
688
	uart_port_lock_irqsave(port, &flags);
689
689
690
	new_reg = (uap->curregs[R5] | set_bits) & ~clear_bits;
690
	new_reg = (uap->curregs[R5] | set_bits) & ~clear_bits;
691
	if (new_reg != uap->curregs[R5]) {
691
	if (new_reg != uap->curregs[R5]) {
Lines 693-699 static void pmz_break_ctl(struct uart_port *port, int break_state) Link Here
693
		write_zsreg(uap, R5, uap->curregs[R5]);
693
		write_zsreg(uap, R5, uap->curregs[R5]);
694
	}
694
	}
695
695
696
	spin_unlock_irqrestore(&port->lock, flags);
696
	uart_port_unlock_irqrestore(port, flags);
697
}
697
}
698
698
699
#ifdef CONFIG_PPC_PMAC
699
#ifdef CONFIG_PPC_PMAC
Lines 865-882 static void pmz_irda_reset(struct uart_pmac_port *uap) Link Here
865
{
865
{
866
	unsigned long flags;
866
	unsigned long flags;
867
867
868
	spin_lock_irqsave(&uap->port.lock, flags);
868
	uart_port_lock_irqsave(&uap->port, &flags);
869
	uap->curregs[R5] |= DTR;
869
	uap->curregs[R5] |= DTR;
870
	write_zsreg(uap, R5, uap->curregs[R5]);
870
	write_zsreg(uap, R5, uap->curregs[R5]);
871
	zssync(uap);
871
	zssync(uap);
872
	spin_unlock_irqrestore(&uap->port.lock, flags);
872
	uart_port_unlock_irqrestore(&uap->port, flags);
873
	msleep(110);
873
	msleep(110);
874
874
875
	spin_lock_irqsave(&uap->port.lock, flags);
875
	uart_port_lock_irqsave(&uap->port, &flags);
876
	uap->curregs[R5] &= ~DTR;
876
	uap->curregs[R5] &= ~DTR;
877
	write_zsreg(uap, R5, uap->curregs[R5]);
877
	write_zsreg(uap, R5, uap->curregs[R5]);
878
	zssync(uap);
878
	zssync(uap);
879
	spin_unlock_irqrestore(&uap->port.lock, flags);
879
	uart_port_unlock_irqrestore(&uap->port, flags);
880
	msleep(10);
880
	msleep(10);
881
}
881
}
882
882
Lines 896-904 static int pmz_startup(struct uart_port *port) Link Here
896
	 * initialize the chip
896
	 * initialize the chip
897
	 */
897
	 */
898
	if (!ZS_IS_CONS(uap)) {
898
	if (!ZS_IS_CONS(uap)) {
899
		spin_lock_irqsave(&port->lock, flags);
899
		uart_port_lock_irqsave(port, &flags);
900
		pwr_delay = __pmz_startup(uap);
900
		pwr_delay = __pmz_startup(uap);
901
		spin_unlock_irqrestore(&port->lock, flags);
901
		uart_port_unlock_irqrestore(port, flags);
902
	}	
902
	}	
903
	sprintf(uap->irq_name, PMACZILOG_NAME"%d", uap->port.line);
903
	sprintf(uap->irq_name, PMACZILOG_NAME"%d", uap->port.line);
904
	if (request_irq(uap->port.irq, pmz_interrupt, IRQF_SHARED,
904
	if (request_irq(uap->port.irq, pmz_interrupt, IRQF_SHARED,
Lines 921-929 static int pmz_startup(struct uart_port *port) Link Here
921
		pmz_irda_reset(uap);
921
		pmz_irda_reset(uap);
922
922
923
	/* Enable interrupt requests for the channel */
923
	/* Enable interrupt requests for the channel */
924
	spin_lock_irqsave(&port->lock, flags);
924
	uart_port_lock_irqsave(port, &flags);
925
	pmz_interrupt_control(uap, 1);
925
	pmz_interrupt_control(uap, 1);
926
	spin_unlock_irqrestore(&port->lock, flags);
926
	uart_port_unlock_irqrestore(port, flags);
927
927
928
	return 0;
928
	return 0;
929
}
929
}
Lines 933-939 static void pmz_shutdown(struct uart_port *port) Link Here
933
	struct uart_pmac_port *uap = to_pmz(port);
933
	struct uart_pmac_port *uap = to_pmz(port);
934
	unsigned long flags;
934
	unsigned long flags;
935
935
936
	spin_lock_irqsave(&port->lock, flags);
936
	uart_port_lock_irqsave(port, &flags);
937
937
938
	/* Disable interrupt requests for the channel */
938
	/* Disable interrupt requests for the channel */
939
	pmz_interrupt_control(uap, 0);
939
	pmz_interrupt_control(uap, 0);
Lines 948-966 static void pmz_shutdown(struct uart_port *port) Link Here
948
		pmz_maybe_update_regs(uap);
948
		pmz_maybe_update_regs(uap);
949
	}
949
	}
950
950
951
	spin_unlock_irqrestore(&port->lock, flags);
951
	uart_port_unlock_irqrestore(port, flags);
952
952
953
	/* Release interrupt handler */
953
	/* Release interrupt handler */
954
	free_irq(uap->port.irq, uap);
954
	free_irq(uap->port.irq, uap);
955
955
956
	spin_lock_irqsave(&port->lock, flags);
956
	uart_port_lock_irqsave(port, &flags);
957
957
958
	uap->flags &= ~PMACZILOG_FLAG_IS_OPEN;
958
	uap->flags &= ~PMACZILOG_FLAG_IS_OPEN;
959
959
960
	if (!ZS_IS_CONS(uap))
960
	if (!ZS_IS_CONS(uap))
961
		pmz_set_scc_power(uap, 0);	/* Shut the chip down */
961
		pmz_set_scc_power(uap, 0);	/* Shut the chip down */
962
962
963
	spin_unlock_irqrestore(&port->lock, flags);
963
	uart_port_unlock_irqrestore(port, flags);
964
}
964
}
965
965
966
/* Shared by TTY driver and serial console setup.  The port lock is held
966
/* Shared by TTY driver and serial console setup.  The port lock is held
Lines 1247-1253 static void pmz_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
1247
	struct uart_pmac_port *uap = to_pmz(port);
1247
	struct uart_pmac_port *uap = to_pmz(port);
1248
	unsigned long flags;
1248
	unsigned long flags;
1249
1249
1250
	spin_lock_irqsave(&port->lock, flags);	
1250
	uart_port_lock_irqsave(port, &flags);	
1251
1251
1252
	/* Disable IRQs on the port */
1252
	/* Disable IRQs on the port */
1253
	pmz_interrupt_control(uap, 0);
1253
	pmz_interrupt_control(uap, 0);
Lines 1259-1265 static void pmz_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
1259
	if (ZS_IS_OPEN(uap))
1259
	if (ZS_IS_OPEN(uap))
1260
		pmz_interrupt_control(uap, 1);
1260
		pmz_interrupt_control(uap, 1);
1261
1261
1262
	spin_unlock_irqrestore(&port->lock, flags);
1262
	uart_port_unlock_irqrestore(port, flags);
1263
}
1263
}
1264
1264
1265
static const char *pmz_type(struct uart_port *port)
1265
static const char *pmz_type(struct uart_port *port)
Lines 1896-1902 static void pmz_console_write(struct console *con, const char *s, unsigned int c Link Here
1896
	struct uart_pmac_port *uap = &pmz_ports[con->index];
1896
	struct uart_pmac_port *uap = &pmz_ports[con->index];
1897
	unsigned long flags;
1897
	unsigned long flags;
1898
1898
1899
	spin_lock_irqsave(&uap->port.lock, flags);
1899
	uart_port_lock_irqsave(&uap->port, &flags);
1900
1900
1901
	/* Turn of interrupts and enable the transmitter. */
1901
	/* Turn of interrupts and enable the transmitter. */
1902
	write_zsreg(uap, R1, uap->curregs[1] & ~TxINT_ENAB);
1902
	write_zsreg(uap, R1, uap->curregs[1] & ~TxINT_ENAB);
Lines 1908-1914 static void pmz_console_write(struct console *con, const char *s, unsigned int c Link Here
1908
	write_zsreg(uap, R1, uap->curregs[1]);
1908
	write_zsreg(uap, R1, uap->curregs[1]);
1909
	/* Don't disable the transmitter. */
1909
	/* Don't disable the transmitter. */
1910
1910
1911
	spin_unlock_irqrestore(&uap->port.lock, flags);
1911
	uart_port_unlock_irqrestore(&uap->port, flags);
1912
}
1912
}
1913
1913
1914
/*
1914
/*
(-)a/drivers/tty/serial/pxa.c (-15 / +15 lines)
Lines 225-238 static inline irqreturn_t serial_pxa_irq(int irq, void *dev_id) Link Here
225
	iir = serial_in(up, UART_IIR);
225
	iir = serial_in(up, UART_IIR);
226
	if (iir & UART_IIR_NO_INT)
226
	if (iir & UART_IIR_NO_INT)
227
		return IRQ_NONE;
227
		return IRQ_NONE;
228
	spin_lock(&up->port.lock);
228
	uart_port_lock(&up->port);
229
	lsr = serial_in(up, UART_LSR);
229
	lsr = serial_in(up, UART_LSR);
230
	if (lsr & UART_LSR_DR)
230
	if (lsr & UART_LSR_DR)
231
		receive_chars(up, &lsr);
231
		receive_chars(up, &lsr);
232
	check_modem_status(up);
232
	check_modem_status(up);
233
	if (lsr & UART_LSR_THRE)
233
	if (lsr & UART_LSR_THRE)
234
		transmit_chars(up);
234
		transmit_chars(up);
235
	spin_unlock(&up->port.lock);
235
	uart_port_unlock(&up->port);
236
	return IRQ_HANDLED;
236
	return IRQ_HANDLED;
237
}
237
}
238
238
Lines 242-250 static unsigned int serial_pxa_tx_empty(struct uart_port *port) Link Here
242
	unsigned long flags;
242
	unsigned long flags;
243
	unsigned int ret;
243
	unsigned int ret;
244
244
245
	spin_lock_irqsave(&up->port.lock, flags);
245
	uart_port_lock_irqsave(&up->port, &flags);
246
	ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
246
	ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
247
	spin_unlock_irqrestore(&up->port.lock, flags);
247
	uart_port_unlock_irqrestore(&up->port, flags);
248
248
249
	return ret;
249
	return ret;
250
}
250
}
Lines 295-307 static void serial_pxa_break_ctl(struct uart_port *port, int break_state) Link Here
295
	struct uart_pxa_port *up = (struct uart_pxa_port *)port;
295
	struct uart_pxa_port *up = (struct uart_pxa_port *)port;
296
	unsigned long flags;
296
	unsigned long flags;
297
297
298
	spin_lock_irqsave(&up->port.lock, flags);
298
	uart_port_lock_irqsave(&up->port, &flags);
299
	if (break_state == -1)
299
	if (break_state == -1)
300
		up->lcr |= UART_LCR_SBC;
300
		up->lcr |= UART_LCR_SBC;
301
	else
301
	else
302
		up->lcr &= ~UART_LCR_SBC;
302
		up->lcr &= ~UART_LCR_SBC;
303
	serial_out(up, UART_LCR, up->lcr);
303
	serial_out(up, UART_LCR, up->lcr);
304
	spin_unlock_irqrestore(&up->port.lock, flags);
304
	uart_port_unlock_irqrestore(&up->port, flags);
305
}
305
}
306
306
307
static int serial_pxa_startup(struct uart_port *port)
307
static int serial_pxa_startup(struct uart_port *port)
Lines 346-355 static int serial_pxa_startup(struct uart_port *port) Link Here
346
	 */
346
	 */
347
	serial_out(up, UART_LCR, UART_LCR_WLEN8);
347
	serial_out(up, UART_LCR, UART_LCR_WLEN8);
348
348
349
	spin_lock_irqsave(&up->port.lock, flags);
349
	uart_port_lock_irqsave(&up->port, &flags);
350
	up->port.mctrl |= TIOCM_OUT2;
350
	up->port.mctrl |= TIOCM_OUT2;
351
	serial_pxa_set_mctrl(&up->port, up->port.mctrl);
351
	serial_pxa_set_mctrl(&up->port, up->port.mctrl);
352
	spin_unlock_irqrestore(&up->port.lock, flags);
352
	uart_port_unlock_irqrestore(&up->port, flags);
353
353
354
	/*
354
	/*
355
	 * Finally, enable interrupts.  Note: Modem status interrupts
355
	 * Finally, enable interrupts.  Note: Modem status interrupts
Lines 383-392 static void serial_pxa_shutdown(struct uart_port *port) Link Here
383
	up->ier = 0;
383
	up->ier = 0;
384
	serial_out(up, UART_IER, 0);
384
	serial_out(up, UART_IER, 0);
385
385
386
	spin_lock_irqsave(&up->port.lock, flags);
386
	uart_port_lock_irqsave(&up->port, &flags);
387
	up->port.mctrl &= ~TIOCM_OUT2;
387
	up->port.mctrl &= ~TIOCM_OUT2;
388
	serial_pxa_set_mctrl(&up->port, up->port.mctrl);
388
	serial_pxa_set_mctrl(&up->port, up->port.mctrl);
389
	spin_unlock_irqrestore(&up->port.lock, flags);
389
	uart_port_unlock_irqrestore(&up->port, flags);
390
390
391
	/*
391
	/*
392
	 * Disable break condition and FIFOs
392
	 * Disable break condition and FIFOs
Lines 434-440 serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
434
	 * Ok, we're now changing the port state.  Do it with
434
	 * Ok, we're now changing the port state.  Do it with
435
	 * interrupts disabled.
435
	 * interrupts disabled.
436
	 */
436
	 */
437
	spin_lock_irqsave(&up->port.lock, flags);
437
	uart_port_lock_irqsave(&up->port, &flags);
438
438
439
	/*
439
	/*
440
	 * Ensure the port will be enabled.
440
	 * Ensure the port will be enabled.
Lines 504-510 serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
504
	up->lcr = cval;					/* Save LCR */
504
	up->lcr = cval;					/* Save LCR */
505
	serial_pxa_set_mctrl(&up->port, up->port.mctrl);
505
	serial_pxa_set_mctrl(&up->port, up->port.mctrl);
506
	serial_out(up, UART_FCR, fcr);
506
	serial_out(up, UART_FCR, fcr);
507
	spin_unlock_irqrestore(&up->port.lock, flags);
507
	uart_port_unlock_irqrestore(&up->port, flags);
508
}
508
}
509
509
510
static void
510
static void
Lines 608-616 serial_pxa_console_write(struct console *co, const char *s, unsigned int count) Link Here
608
	if (up->port.sysrq)
608
	if (up->port.sysrq)
609
		locked = 0;
609
		locked = 0;
610
	else if (oops_in_progress)
610
	else if (oops_in_progress)
611
		locked = spin_trylock(&up->port.lock);
611
		locked = uart_port_trylock(&up->port);
612
	else
612
	else
613
		spin_lock(&up->port.lock);
613
		uart_port_lock(&up->port);
614
614
615
	/*
615
	/*
616
	 *	First save the IER then disable the interrupts
616
	 *	First save the IER then disable the interrupts
Lines 628-634 serial_pxa_console_write(struct console *co, const char *s, unsigned int count) Link Here
628
	serial_out(up, UART_IER, ier);
628
	serial_out(up, UART_IER, ier);
629
629
630
	if (locked)
630
	if (locked)
631
		spin_unlock(&up->port.lock);
631
		uart_port_unlock(&up->port);
632
	local_irq_restore(flags);
632
	local_irq_restore(flags);
633
	clk_disable(up->clk);
633
	clk_disable(up->clk);
634
634
(-)a/drivers/tty/serial/qcom_geni_serial.c (-4 / +4 lines)
Lines 482-490 static void qcom_geni_serial_console_write(struct console *co, const char *s, Link Here
482
482
483
	uport = &port->uport;
483
	uport = &port->uport;
484
	if (oops_in_progress)
484
	if (oops_in_progress)
485
		locked = spin_trylock_irqsave(&uport->lock, flags);
485
		locked = uart_port_trylock_irqsave(uport, &flags);
486
	else
486
	else
487
		spin_lock_irqsave(&uport->lock, flags);
487
		uart_port_lock_irqsave(uport, &flags);
488
488
489
	geni_status = readl(uport->membase + SE_GENI_STATUS);
489
	geni_status = readl(uport->membase + SE_GENI_STATUS);
490
490
Lines 520-526 static void qcom_geni_serial_console_write(struct console *co, const char *s, Link Here
520
		qcom_geni_serial_setup_tx(uport, port->tx_remaining);
520
		qcom_geni_serial_setup_tx(uport, port->tx_remaining);
521
521
522
	if (locked)
522
	if (locked)
523
		spin_unlock_irqrestore(&uport->lock, flags);
523
		uart_port_unlock_irqrestore(uport, flags);
524
}
524
}
525
525
526
static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
526
static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
Lines 970-976 static irqreturn_t qcom_geni_serial_isr(int isr, void *dev) Link Here
970
	if (uport->suspended)
970
	if (uport->suspended)
971
		return IRQ_NONE;
971
		return IRQ_NONE;
972
972
973
	spin_lock(&uport->lock);
973
	uart_port_lock(uport);
974
974
975
	m_irq_status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
975
	m_irq_status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
976
	s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
976
	s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
(-)a/drivers/tty/serial/rda-uart.c (-17 / +17 lines)
Lines 139-150 static unsigned int rda_uart_tx_empty(struct uart_port *port) Link Here
139
	unsigned int ret;
139
	unsigned int ret;
140
	u32 val;
140
	u32 val;
141
141
142
	spin_lock_irqsave(&port->lock, flags);
142
	uart_port_lock_irqsave(port, &flags);
143
143
144
	val = rda_uart_read(port, RDA_UART_STATUS);
144
	val = rda_uart_read(port, RDA_UART_STATUS);
145
	ret = (val & RDA_UART_TX_FIFO_MASK) ? TIOCSER_TEMT : 0;
145
	ret = (val & RDA_UART_TX_FIFO_MASK) ? TIOCSER_TEMT : 0;
146
146
147
	spin_unlock_irqrestore(&port->lock, flags);
147
	uart_port_unlock_irqrestore(port, flags);
148
148
149
	return ret;
149
	return ret;
150
}
150
}
Lines 246-252 static void rda_uart_set_termios(struct uart_port *port, Link Here
246
	unsigned int baud;
246
	unsigned int baud;
247
	u32 irq_mask;
247
	u32 irq_mask;
248
248
249
	spin_lock_irqsave(&port->lock, flags);
249
	uart_port_lock_irqsave(port, &flags);
250
250
251
	baud = uart_get_baud_rate(port, termios, old, 9600, port->uartclk / 4);
251
	baud = uart_get_baud_rate(port, termios, old, 9600, port->uartclk / 4);
252
	rda_uart_change_baudrate(rda_port, baud);
252
	rda_uart_change_baudrate(rda_port, baud);
Lines 325-331 static void rda_uart_set_termios(struct uart_port *port, Link Here
325
	/* update the per-port timeout */
325
	/* update the per-port timeout */
326
	uart_update_timeout(port, termios->c_cflag, baud);
326
	uart_update_timeout(port, termios->c_cflag, baud);
327
327
328
	spin_unlock_irqrestore(&port->lock, flags);
328
	uart_port_unlock_irqrestore(port, flags);
329
}
329
}
330
330
331
static void rda_uart_send_chars(struct uart_port *port)
331
static void rda_uart_send_chars(struct uart_port *port)
Lines 408-414 static irqreturn_t rda_interrupt(int irq, void *dev_id) Link Here
408
	unsigned long flags;
408
	unsigned long flags;
409
	u32 val, irq_mask;
409
	u32 val, irq_mask;
410
410
411
	spin_lock_irqsave(&port->lock, flags);
411
	uart_port_lock_irqsave(port, &flags);
412
412
413
	/* Clear IRQ cause */
413
	/* Clear IRQ cause */
414
	val = rda_uart_read(port, RDA_UART_IRQ_CAUSE);
414
	val = rda_uart_read(port, RDA_UART_IRQ_CAUSE);
Lines 425-431 static irqreturn_t rda_interrupt(int irq, void *dev_id) Link Here
425
		rda_uart_send_chars(port);
425
		rda_uart_send_chars(port);
426
	}
426
	}
427
427
428
	spin_unlock_irqrestore(&port->lock, flags);
428
	uart_port_unlock_irqrestore(port, flags);
429
429
430
	return IRQ_HANDLED;
430
	return IRQ_HANDLED;
431
}
431
}
Lines 436-451 static int rda_uart_startup(struct uart_port *port) Link Here
436
	int ret;
436
	int ret;
437
	u32 val;
437
	u32 val;
438
438
439
	spin_lock_irqsave(&port->lock, flags);
439
	uart_port_lock_irqsave(port, &flags);
440
	rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
440
	rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
441
	spin_unlock_irqrestore(&port->lock, flags);
441
	uart_port_unlock_irqrestore(port, flags);
442
442
443
	ret = request_irq(port->irq, rda_interrupt, IRQF_NO_SUSPEND,
443
	ret = request_irq(port->irq, rda_interrupt, IRQF_NO_SUSPEND,
444
			  "rda-uart", port);
444
			  "rda-uart", port);
445
	if (ret)
445
	if (ret)
446
		return ret;
446
		return ret;
447
447
448
	spin_lock_irqsave(&port->lock, flags);
448
	uart_port_lock_irqsave(port, &flags);
449
449
450
	val = rda_uart_read(port, RDA_UART_CTRL);
450
	val = rda_uart_read(port, RDA_UART_CTRL);
451
	val |= RDA_UART_ENABLE;
451
	val |= RDA_UART_ENABLE;
Lines 456-462 static int rda_uart_startup(struct uart_port *port) Link Here
456
	val |= (RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT);
456
	val |= (RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT);
457
	rda_uart_write(port, val, RDA_UART_IRQ_MASK);
457
	rda_uart_write(port, val, RDA_UART_IRQ_MASK);
458
458
459
	spin_unlock_irqrestore(&port->lock, flags);
459
	uart_port_unlock_irqrestore(port, flags);
460
460
461
	return 0;
461
	return 0;
462
}
462
}
Lines 466-472 static void rda_uart_shutdown(struct uart_port *port) Link Here
466
	unsigned long flags;
466
	unsigned long flags;
467
	u32 val;
467
	u32 val;
468
468
469
	spin_lock_irqsave(&port->lock, flags);
469
	uart_port_lock_irqsave(port, &flags);
470
470
471
	rda_uart_stop_tx(port);
471
	rda_uart_stop_tx(port);
472
	rda_uart_stop_rx(port);
472
	rda_uart_stop_rx(port);
Lines 475-481 static void rda_uart_shutdown(struct uart_port *port) Link Here
475
	val &= ~RDA_UART_ENABLE;
475
	val &= ~RDA_UART_ENABLE;
476
	rda_uart_write(port, val, RDA_UART_CTRL);
476
	rda_uart_write(port, val, RDA_UART_CTRL);
477
477
478
	spin_unlock_irqrestore(&port->lock, flags);
478
	uart_port_unlock_irqrestore(port, flags);
479
}
479
}
480
480
481
static const char *rda_uart_type(struct uart_port *port)
481
static const char *rda_uart_type(struct uart_port *port)
Lines 515-521 static void rda_uart_config_port(struct uart_port *port, int flags) Link Here
515
		rda_uart_request_port(port);
515
		rda_uart_request_port(port);
516
	}
516
	}
517
517
518
	spin_lock_irqsave(&port->lock, irq_flags);
518
	uart_port_lock_irqsave(port, &irq_flags);
519
519
520
	/* Clear mask, so no surprise interrupts. */
520
	/* Clear mask, so no surprise interrupts. */
521
	rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
521
	rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
Lines 523-529 static void rda_uart_config_port(struct uart_port *port, int flags) Link Here
523
	/* Clear status register */
523
	/* Clear status register */
524
	rda_uart_write(port, 0, RDA_UART_STATUS);
524
	rda_uart_write(port, 0, RDA_UART_STATUS);
525
525
526
	spin_unlock_irqrestore(&port->lock, irq_flags);
526
	uart_port_unlock_irqrestore(port, irq_flags);
527
}
527
}
528
528
529
static void rda_uart_release_port(struct uart_port *port)
529
static void rda_uart_release_port(struct uart_port *port)
Lines 597-605 static void rda_uart_port_write(struct uart_port *port, const char *s, Link Here
597
	if (port->sysrq) {
597
	if (port->sysrq) {
598
		locked = 0;
598
		locked = 0;
599
	} else if (oops_in_progress) {
599
	} else if (oops_in_progress) {
600
		locked = spin_trylock(&port->lock);
600
		locked = uart_port_trylock(port);
601
	} else {
601
	} else {
602
		spin_lock(&port->lock);
602
		uart_port_lock(port);
603
		locked = 1;
603
		locked = 1;
604
	}
604
	}
605
605
Lines 615-621 static void rda_uart_port_write(struct uart_port *port, const char *s, Link Here
615
	rda_uart_write(port, old_irq_mask, RDA_UART_IRQ_MASK);
615
	rda_uart_write(port, old_irq_mask, RDA_UART_IRQ_MASK);
616
616
617
	if (locked)
617
	if (locked)
618
		spin_unlock(&port->lock);
618
		uart_port_unlock(port);
619
619
620
	local_irq_restore(flags);
620
	local_irq_restore(flags);
621
}
621
}
(-)a/drivers/tty/serial/rp2.c (-10 / +10 lines)
Lines 276-284 static unsigned int rp2_uart_tx_empty(struct uart_port *port) Link Here
276
	 * But the TXEMPTY bit doesn't seem to work unless the TX IRQ is
276
	 * But the TXEMPTY bit doesn't seem to work unless the TX IRQ is
277
	 * enabled.
277
	 * enabled.
278
	 */
278
	 */
279
	spin_lock_irqsave(&up->port.lock, flags);
279
	uart_port_lock_irqsave(&up->port, &flags);
280
	tx_fifo_bytes = readw(up->base + RP2_TX_FIFO_COUNT);
280
	tx_fifo_bytes = readw(up->base + RP2_TX_FIFO_COUNT);
281
	spin_unlock_irqrestore(&up->port.lock, flags);
281
	uart_port_unlock_irqrestore(&up->port, flags);
282
282
283
	return tx_fifo_bytes ? 0 : TIOCSER_TEMT;
283
	return tx_fifo_bytes ? 0 : TIOCSER_TEMT;
284
}
284
}
Lines 323-332 static void rp2_uart_break_ctl(struct uart_port *port, int break_state) Link Here
323
{
323
{
324
	unsigned long flags;
324
	unsigned long flags;
325
325
326
	spin_lock_irqsave(&port->lock, flags);
326
	uart_port_lock_irqsave(port, &flags);
327
	rp2_rmw(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_BREAK_m,
327
	rp2_rmw(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_BREAK_m,
328
		break_state ? RP2_TXRX_CTL_BREAK_m : 0);
328
		break_state ? RP2_TXRX_CTL_BREAK_m : 0);
329
	spin_unlock_irqrestore(&port->lock, flags);
329
	uart_port_unlock_irqrestore(port, flags);
330
}
330
}
331
331
332
static void rp2_uart_enable_ms(struct uart_port *port)
332
static void rp2_uart_enable_ms(struct uart_port *port)
Lines 383-389 static void rp2_uart_set_termios(struct uart_port *port, struct ktermios *new, Link Here
383
	if (tty_termios_baud_rate(new))
383
	if (tty_termios_baud_rate(new))
384
		tty_termios_encode_baud_rate(new, baud, baud);
384
		tty_termios_encode_baud_rate(new, baud, baud);
385
385
386
	spin_lock_irqsave(&port->lock, flags);
386
	uart_port_lock_irqsave(port, &flags);
387
387
388
	/* ignore all characters if CREAD is not set */
388
	/* ignore all characters if CREAD is not set */
389
	port->ignore_status_mask = (new->c_cflag & CREAD) ? 0 : RP2_DUMMY_READ;
389
	port->ignore_status_mask = (new->c_cflag & CREAD) ? 0 : RP2_DUMMY_READ;
Lines 391-397 static void rp2_uart_set_termios(struct uart_port *port, struct ktermios *new, Link Here
391
	__rp2_uart_set_termios(up, new->c_cflag, new->c_iflag, baud_div);
391
	__rp2_uart_set_termios(up, new->c_cflag, new->c_iflag, baud_div);
392
	uart_update_timeout(port, new->c_cflag, baud);
392
	uart_update_timeout(port, new->c_cflag, baud);
393
393
394
	spin_unlock_irqrestore(&port->lock, flags);
394
	uart_port_unlock_irqrestore(port, flags);
395
}
395
}
396
396
397
static void rp2_rx_chars(struct rp2_uart_port *up)
397
static void rp2_rx_chars(struct rp2_uart_port *up)
Lines 440-446 static void rp2_ch_interrupt(struct rp2_uart_port *up) Link Here
440
{
440
{
441
	u32 status;
441
	u32 status;
442
442
443
	spin_lock(&up->port.lock);
443
	uart_port_lock(&up->port);
444
444
445
	/*
445
	/*
446
	 * The IRQ status bits are clear-on-write.  Other status bits in
446
	 * The IRQ status bits are clear-on-write.  Other status bits in
Lines 456-462 static void rp2_ch_interrupt(struct rp2_uart_port *up) Link Here
456
	if (status & RP2_CHAN_STAT_MS_CHANGED_MASK)
456
	if (status & RP2_CHAN_STAT_MS_CHANGED_MASK)
457
		wake_up_interruptible(&up->port.state->port.delta_msr_wait);
457
		wake_up_interruptible(&up->port.state->port.delta_msr_wait);
458
458
459
	spin_unlock(&up->port.lock);
459
	uart_port_unlock(&up->port);
460
}
460
}
461
461
462
static int rp2_asic_interrupt(struct rp2_card *card, unsigned int asic_id)
462
static int rp2_asic_interrupt(struct rp2_card *card, unsigned int asic_id)
Lines 516-525 static void rp2_uart_shutdown(struct uart_port *port) Link Here
516
516
517
	rp2_uart_break_ctl(port, 0);
517
	rp2_uart_break_ctl(port, 0);
518
518
519
	spin_lock_irqsave(&port->lock, flags);
519
	uart_port_lock_irqsave(port, &flags);
520
	rp2_mask_ch_irq(up, up->idx, 0);
520
	rp2_mask_ch_irq(up, up->idx, 0);
521
	rp2_rmw(up, RP2_CHAN_STAT, 0, 0);
521
	rp2_rmw(up, RP2_CHAN_STAT, 0, 0);
522
	spin_unlock_irqrestore(&port->lock, flags);
522
	uart_port_unlock_irqrestore(port, flags);
523
}
523
}
524
524
525
static const char *rp2_uart_type(struct uart_port *port)
525
static const char *rp2_uart_type(struct uart_port *port)
(-)a/drivers/tty/serial/sa1100.c (-10 / +10 lines)
Lines 115-123 static void sa1100_timeout(struct timer_list *t) Link Here
115
	unsigned long flags;
115
	unsigned long flags;
116
116
117
	if (sport->port.state) {
117
	if (sport->port.state) {
118
		spin_lock_irqsave(&sport->port.lock, flags);
118
		uart_port_lock_irqsave(&sport->port, &flags);
119
		sa1100_mctrl_check(sport);
119
		sa1100_mctrl_check(sport);
120
		spin_unlock_irqrestore(&sport->port.lock, flags);
120
		uart_port_unlock_irqrestore(&sport->port, flags);
121
121
122
		mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
122
		mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
123
	}
123
	}
Lines 247-253 static irqreturn_t sa1100_int(int irq, void *dev_id) Link Here
247
	struct sa1100_port *sport = dev_id;
247
	struct sa1100_port *sport = dev_id;
248
	unsigned int status, pass_counter = 0;
248
	unsigned int status, pass_counter = 0;
249
249
250
	spin_lock(&sport->port.lock);
250
	uart_port_lock(&sport->port);
251
	status = UART_GET_UTSR0(sport);
251
	status = UART_GET_UTSR0(sport);
252
	status &= SM_TO_UTSR0(sport->port.read_status_mask) | ~UTSR0_TFS;
252
	status &= SM_TO_UTSR0(sport->port.read_status_mask) | ~UTSR0_TFS;
253
	do {
253
	do {
Lines 276-282 static irqreturn_t sa1100_int(int irq, void *dev_id) Link Here
276
		status &= SM_TO_UTSR0(sport->port.read_status_mask) |
276
		status &= SM_TO_UTSR0(sport->port.read_status_mask) |
277
			  ~UTSR0_TFS;
277
			  ~UTSR0_TFS;
278
	} while (status & (UTSR0_TFS | UTSR0_RFS | UTSR0_RID));
278
	} while (status & (UTSR0_TFS | UTSR0_RFS | UTSR0_RID));
279
	spin_unlock(&sport->port.lock);
279
	uart_port_unlock(&sport->port);
280
280
281
	return IRQ_HANDLED;
281
	return IRQ_HANDLED;
282
}
282
}
Lines 321-334 static void sa1100_break_ctl(struct uart_port *port, int break_state) Link Here
321
	unsigned long flags;
321
	unsigned long flags;
322
	unsigned int utcr3;
322
	unsigned int utcr3;
323
323
324
	spin_lock_irqsave(&sport->port.lock, flags);
324
	uart_port_lock_irqsave(&sport->port, &flags);
325
	utcr3 = UART_GET_UTCR3(sport);
325
	utcr3 = UART_GET_UTCR3(sport);
326
	if (break_state == -1)
326
	if (break_state == -1)
327
		utcr3 |= UTCR3_BRK;
327
		utcr3 |= UTCR3_BRK;
328
	else
328
	else
329
		utcr3 &= ~UTCR3_BRK;
329
		utcr3 &= ~UTCR3_BRK;
330
	UART_PUT_UTCR3(sport, utcr3);
330
	UART_PUT_UTCR3(sport, utcr3);
331
	spin_unlock_irqrestore(&sport->port.lock, flags);
331
	uart_port_unlock_irqrestore(&sport->port, flags);
332
}
332
}
333
333
334
static int sa1100_startup(struct uart_port *port)
334
static int sa1100_startup(struct uart_port *port)
Lines 354-362 static int sa1100_startup(struct uart_port *port) Link Here
354
	/*
354
	/*
355
	 * Enable modem status interrupts
355
	 * Enable modem status interrupts
356
	 */
356
	 */
357
	spin_lock_irq(&sport->port.lock);
357
	uart_port_lock_irq(&sport->port);
358
	sa1100_enable_ms(&sport->port);
358
	sa1100_enable_ms(&sport->port);
359
	spin_unlock_irq(&sport->port.lock);
359
	uart_port_unlock_irq(&sport->port);
360
360
361
	return 0;
361
	return 0;
362
}
362
}
Lines 423-429 sa1100_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
423
423
424
	del_timer_sync(&sport->timer);
424
	del_timer_sync(&sport->timer);
425
425
426
	spin_lock_irqsave(&sport->port.lock, flags);
426
	uart_port_lock_irqsave(&sport->port, &flags);
427
427
428
	sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
428
	sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
429
	sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_ROR);
429
	sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_ROR);
Lines 485-491 sa1100_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
485
	if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
485
	if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
486
		sa1100_enable_ms(&sport->port);
486
		sa1100_enable_ms(&sport->port);
487
487
488
	spin_unlock_irqrestore(&sport->port.lock, flags);
488
	uart_port_unlock_irqrestore(&sport->port, flags);
489
}
489
}
490
490
491
static const char *sa1100_type(struct uart_port *port)
491
static const char *sa1100_type(struct uart_port *port)
(-)a/drivers/tty/serial/samsung_tty.c (-25 / +25 lines)
Lines 248-254 static void s3c24xx_serial_rx_enable(struct uart_port *port) Link Here
248
	unsigned int ucon, ufcon;
248
	unsigned int ucon, ufcon;
249
	int count = 10000;
249
	int count = 10000;
250
250
251
	spin_lock_irqsave(&port->lock, flags);
251
	uart_port_lock_irqsave(port, &flags);
252
252
253
	while (--count && !s3c24xx_serial_txempty_nofifo(port))
253
	while (--count && !s3c24xx_serial_txempty_nofifo(port))
254
		udelay(100);
254
		udelay(100);
Lines 262-268 static void s3c24xx_serial_rx_enable(struct uart_port *port) Link Here
262
	wr_regl(port, S3C2410_UCON, ucon);
262
	wr_regl(port, S3C2410_UCON, ucon);
263
263
264
	ourport->rx_enabled = 1;
264
	ourport->rx_enabled = 1;
265
	spin_unlock_irqrestore(&port->lock, flags);
265
	uart_port_unlock_irqrestore(port, flags);
266
}
266
}
267
267
268
static void s3c24xx_serial_rx_disable(struct uart_port *port)
268
static void s3c24xx_serial_rx_disable(struct uart_port *port)
Lines 271-284 static void s3c24xx_serial_rx_disable(struct uart_port *port) Link Here
271
	unsigned long flags;
271
	unsigned long flags;
272
	unsigned int ucon;
272
	unsigned int ucon;
273
273
274
	spin_lock_irqsave(&port->lock, flags);
274
	uart_port_lock_irqsave(port, &flags);
275
275
276
	ucon = rd_regl(port, S3C2410_UCON);
276
	ucon = rd_regl(port, S3C2410_UCON);
277
	ucon &= ~S3C2410_UCON_RXIRQMODE;
277
	ucon &= ~S3C2410_UCON_RXIRQMODE;
278
	wr_regl(port, S3C2410_UCON, ucon);
278
	wr_regl(port, S3C2410_UCON, ucon);
279
279
280
	ourport->rx_enabled = 0;
280
	ourport->rx_enabled = 0;
281
	spin_unlock_irqrestore(&port->lock, flags);
281
	uart_port_unlock_irqrestore(port, flags);
282
}
282
}
283
283
284
static void s3c24xx_serial_stop_tx(struct uart_port *port)
284
static void s3c24xx_serial_stop_tx(struct uart_port *port)
Lines 344-350 static void s3c24xx_serial_tx_dma_complete(void *args) Link Here
344
				dma->tx_transfer_addr, dma->tx_size,
344
				dma->tx_transfer_addr, dma->tx_size,
345
				DMA_TO_DEVICE);
345
				DMA_TO_DEVICE);
346
346
347
	spin_lock_irqsave(&port->lock, flags);
347
	uart_port_lock_irqsave(port, &flags);
348
348
349
	uart_xmit_advance(port, count);
349
	uart_xmit_advance(port, count);
350
	ourport->tx_in_progress = 0;
350
	ourport->tx_in_progress = 0;
Lines 353-359 static void s3c24xx_serial_tx_dma_complete(void *args) Link Here
353
		uart_write_wakeup(port);
353
		uart_write_wakeup(port);
354
354
355
	s3c24xx_serial_start_next_tx(ourport);
355
	s3c24xx_serial_start_next_tx(ourport);
356
	spin_unlock_irqrestore(&port->lock, flags);
356
	uart_port_unlock_irqrestore(port, flags);
357
}
357
}
358
358
359
static void enable_tx_dma(struct s3c24xx_uart_port *ourport)
359
static void enable_tx_dma(struct s3c24xx_uart_port *ourport)
Lines 619-625 static void s3c24xx_serial_rx_dma_complete(void *args) Link Here
619
	received  = dma->rx_bytes_requested - state.residue;
619
	received  = dma->rx_bytes_requested - state.residue;
620
	async_tx_ack(dma->rx_desc);
620
	async_tx_ack(dma->rx_desc);
621
621
622
	spin_lock_irqsave(&port->lock, flags);
622
	uart_port_lock_irqsave(port, &flags);
623
623
624
	if (received)
624
	if (received)
625
		s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
625
		s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
Lines 631-637 static void s3c24xx_serial_rx_dma_complete(void *args) Link Here
631
631
632
	s3c64xx_start_rx_dma(ourport);
632
	s3c64xx_start_rx_dma(ourport);
633
633
634
	spin_unlock_irqrestore(&port->lock, flags);
634
	uart_port_unlock_irqrestore(port, flags);
635
}
635
}
636
636
637
static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)
637
static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)
Lines 722-728 static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id) Link Here
722
	utrstat = rd_regl(port, S3C2410_UTRSTAT);
722
	utrstat = rd_regl(port, S3C2410_UTRSTAT);
723
	rd_regl(port, S3C2410_UFSTAT);
723
	rd_regl(port, S3C2410_UFSTAT);
724
724
725
	spin_lock(&port->lock);
725
	uart_port_lock(port);
726
726
727
	if (!(utrstat & S3C2410_UTRSTAT_TIMEOUT)) {
727
	if (!(utrstat & S3C2410_UTRSTAT_TIMEOUT)) {
728
		s3c64xx_start_rx_dma(ourport);
728
		s3c64xx_start_rx_dma(ourport);
Lines 751-757 static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id) Link Here
751
	wr_regl(port, S3C2410_UTRSTAT, S3C2410_UTRSTAT_TIMEOUT);
751
	wr_regl(port, S3C2410_UTRSTAT, S3C2410_UTRSTAT_TIMEOUT);
752
752
753
finish:
753
finish:
754
	spin_unlock(&port->lock);
754
	uart_port_unlock(port);
755
755
756
	return IRQ_HANDLED;
756
	return IRQ_HANDLED;
757
}
757
}
Lines 849-857 static irqreturn_t s3c24xx_serial_rx_chars_pio(void *dev_id) Link Here
849
	struct s3c24xx_uart_port *ourport = dev_id;
849
	struct s3c24xx_uart_port *ourport = dev_id;
850
	struct uart_port *port = &ourport->port;
850
	struct uart_port *port = &ourport->port;
851
851
852
	spin_lock(&port->lock);
852
	uart_port_lock(port);
853
	s3c24xx_serial_rx_drain_fifo(ourport);
853
	s3c24xx_serial_rx_drain_fifo(ourport);
854
	spin_unlock(&port->lock);
854
	uart_port_unlock(port);
855
855
856
	return IRQ_HANDLED;
856
	return IRQ_HANDLED;
857
}
857
}
Lines 932-942 static irqreturn_t s3c24xx_serial_tx_irq(int irq, void *id) Link Here
932
	struct s3c24xx_uart_port *ourport = id;
932
	struct s3c24xx_uart_port *ourport = id;
933
	struct uart_port *port = &ourport->port;
933
	struct uart_port *port = &ourport->port;
934
934
935
	spin_lock(&port->lock);
935
	uart_port_lock(port);
936
936
937
	s3c24xx_serial_tx_chars(ourport);
937
	s3c24xx_serial_tx_chars(ourport);
938
938
939
	spin_unlock(&port->lock);
939
	uart_port_unlock(port);
940
	return IRQ_HANDLED;
940
	return IRQ_HANDLED;
941
}
941
}
942
942
Lines 1033-1039 static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state) Link Here
1033
	unsigned long flags;
1033
	unsigned long flags;
1034
	unsigned int ucon;
1034
	unsigned int ucon;
1035
1035
1036
	spin_lock_irqsave(&port->lock, flags);
1036
	uart_port_lock_irqsave(port, &flags);
1037
1037
1038
	ucon = rd_regl(port, S3C2410_UCON);
1038
	ucon = rd_regl(port, S3C2410_UCON);
1039
1039
Lines 1044-1050 static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state) Link Here
1044
1044
1045
	wr_regl(port, S3C2410_UCON, ucon);
1045
	wr_regl(port, S3C2410_UCON, ucon);
1046
1046
1047
	spin_unlock_irqrestore(&port->lock, flags);
1047
	uart_port_unlock_irqrestore(port, flags);
1048
}
1048
}
1049
1049
1050
static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
1050
static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
Lines 1303-1309 static int s3c64xx_serial_startup(struct uart_port *port) Link Here
1303
	ourport->rx_enabled = 1;
1303
	ourport->rx_enabled = 1;
1304
	ourport->tx_enabled = 0;
1304
	ourport->tx_enabled = 0;
1305
1305
1306
	spin_lock_irqsave(&port->lock, flags);
1306
	uart_port_lock_irqsave(port, &flags);
1307
1307
1308
	ufcon = rd_regl(port, S3C2410_UFCON);
1308
	ufcon = rd_regl(port, S3C2410_UFCON);
1309
	ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
1309
	ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
Lines 1313-1319 static int s3c64xx_serial_startup(struct uart_port *port) Link Here
1313
1313
1314
	enable_rx_pio(ourport);
1314
	enable_rx_pio(ourport);
1315
1315
1316
	spin_unlock_irqrestore(&port->lock, flags);
1316
	uart_port_unlock_irqrestore(port, flags);
1317
1317
1318
	/* Enable Rx Interrupt */
1318
	/* Enable Rx Interrupt */
1319
	s3c24xx_clear_bit(port, S3C64XX_UINTM_RXD, S3C64XX_UINTM);
1319
	s3c24xx_clear_bit(port, S3C64XX_UINTM_RXD, S3C64XX_UINTM);
Lines 1341-1347 static int apple_s5l_serial_startup(struct uart_port *port) Link Here
1341
	ourport->rx_enabled = 1;
1341
	ourport->rx_enabled = 1;
1342
	ourport->tx_enabled = 0;
1342
	ourport->tx_enabled = 0;
1343
1343
1344
	spin_lock_irqsave(&port->lock, flags);
1344
	uart_port_lock_irqsave(port, &flags);
1345
1345
1346
	ufcon = rd_regl(port, S3C2410_UFCON);
1346
	ufcon = rd_regl(port, S3C2410_UFCON);
1347
	ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
1347
	ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
Lines 1351-1357 static int apple_s5l_serial_startup(struct uart_port *port) Link Here
1351
1351
1352
	enable_rx_pio(ourport);
1352
	enable_rx_pio(ourport);
1353
1353
1354
	spin_unlock_irqrestore(&port->lock, flags);
1354
	uart_port_unlock_irqrestore(port, flags);
1355
1355
1356
	/* Enable Rx Interrupt */
1356
	/* Enable Rx Interrupt */
1357
	s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTHRESH_ENA, S3C2410_UCON);
1357
	s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTHRESH_ENA, S3C2410_UCON);
Lines 1626-1632 static void s3c24xx_serial_set_termios(struct uart_port *port, Link Here
1626
		ulcon |= S3C2410_LCON_PNONE;
1626
		ulcon |= S3C2410_LCON_PNONE;
1627
	}
1627
	}
1628
1628
1629
	spin_lock_irqsave(&port->lock, flags);
1629
	uart_port_lock_irqsave(port, &flags);
1630
1630
1631
	dev_dbg(port->dev,
1631
	dev_dbg(port->dev,
1632
		"setting ulcon to %08x, brddiv to %d, udivslot %08x\n",
1632
		"setting ulcon to %08x, brddiv to %d, udivslot %08x\n",
Lines 1684-1690 static void s3c24xx_serial_set_termios(struct uart_port *port, Link Here
1684
	if ((termios->c_cflag & CREAD) == 0)
1684
	if ((termios->c_cflag & CREAD) == 0)
1685
		port->ignore_status_mask |= RXSTAT_DUMMY_READ;
1685
		port->ignore_status_mask |= RXSTAT_DUMMY_READ;
1686
1686
1687
	spin_unlock_irqrestore(&port->lock, flags);
1687
	uart_port_unlock_irqrestore(port, flags);
1688
}
1688
}
1689
1689
1690
static const char *s3c24xx_serial_type(struct uart_port *port)
1690
static const char *s3c24xx_serial_type(struct uart_port *port)
Lines 2376-2389 s3c24xx_serial_console_write(struct console *co, const char *s, Link Here
2376
	if (cons_uart->sysrq)
2376
	if (cons_uart->sysrq)
2377
		locked = false;
2377
		locked = false;
2378
	else if (oops_in_progress)
2378
	else if (oops_in_progress)
2379
		locked = spin_trylock_irqsave(&cons_uart->lock, flags);
2379
		locked = uart_port_trylock_irqsave(cons_uart, &flags);
2380
	else
2380
	else
2381
		spin_lock_irqsave(&cons_uart->lock, flags);
2381
		uart_port_lock_irqsave(cons_uart, &flags);
2382
2382
2383
	uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
2383
	uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
2384
2384
2385
	if (locked)
2385
	if (locked)
2386
		spin_unlock_irqrestore(&cons_uart->lock, flags);
2386
		uart_port_unlock_irqrestore(cons_uart, flags);
2387
}
2387
}
2388
2388
2389
/* Shouldn't be __init, as it can be instantiated from other module */
2389
/* Shouldn't be __init, as it can be instantiated from other module */
(-)a/drivers/tty/serial/sb1250-duart.c (-6 / +6 lines)
Lines 610-616 static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios, Link Here
610
	else
610
	else
611
		aux &= ~M_DUART_CTS_CHNG_ENA;
611
		aux &= ~M_DUART_CTS_CHNG_ENA;
612
612
613
	spin_lock(&uport->lock);
613
	uart_port_lock(uport);
614
614
615
	if (sport->tx_stopped)
615
	if (sport->tx_stopped)
616
		command |= M_DUART_TX_DIS;
616
		command |= M_DUART_TX_DIS;
Lines 632-638 static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios, Link Here
632
632
633
	write_sbdchn(sport, R_DUART_CMD, command);
633
	write_sbdchn(sport, R_DUART_CMD, command);
634
634
635
	spin_unlock(&uport->lock);
635
	uart_port_unlock(uport);
636
}
636
}
637
637
638
638
Lines 839-860 static void sbd_console_write(struct console *co, const char *s, Link Here
839
	unsigned int mask;
839
	unsigned int mask;
840
840
841
	/* Disable transmit interrupts and enable the transmitter. */
841
	/* Disable transmit interrupts and enable the transmitter. */
842
	spin_lock_irqsave(&uport->lock, flags);
842
	uart_port_lock_irqsave(uport, &flags);
843
	mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
843
	mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
844
	write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
844
	write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
845
		     mask & ~M_DUART_IMR_TX);
845
		     mask & ~M_DUART_IMR_TX);
846
	write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
846
	write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
847
	spin_unlock_irqrestore(&uport->lock, flags);
847
	uart_port_unlock_irqrestore(uport, flags);
848
848
849
	uart_console_write(&sport->port, s, count, sbd_console_putchar);
849
	uart_console_write(&sport->port, s, count, sbd_console_putchar);
850
850
851
	/* Restore transmit interrupts and the transmitter enable. */
851
	/* Restore transmit interrupts and the transmitter enable. */
852
	spin_lock_irqsave(&uport->lock, flags);
852
	uart_port_lock_irqsave(uport, &flags);
853
	sbd_line_drain(sport);
853
	sbd_line_drain(sport);
854
	if (sport->tx_stopped)
854
	if (sport->tx_stopped)
855
		write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
855
		write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
856
	write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
856
	write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
857
	spin_unlock_irqrestore(&uport->lock, flags);
857
	uart_port_unlock_irqrestore(uport, flags);
858
}
858
}
859
859
860
static int __init sbd_console_setup(struct console *co, char *options)
860
static int __init sbd_console_setup(struct console *co, char *options)
(-)a/drivers/tty/serial/sc16is7xx.c (-20 / +20 lines)
Lines 667-675 static void sc16is7xx_handle_tx(struct uart_port *port) Link Here
667
	}
667
	}
668
668
669
	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
669
	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
670
		spin_lock_irqsave(&port->lock, flags);
670
		uart_port_lock_irqsave(port, &flags);
671
		sc16is7xx_stop_tx(port);
671
		sc16is7xx_stop_tx(port);
672
		spin_unlock_irqrestore(&port->lock, flags);
672
		uart_port_unlock_irqrestore(port, flags);
673
		return;
673
		return;
674
	}
674
	}
675
675
Lines 695-707 static void sc16is7xx_handle_tx(struct uart_port *port) Link Here
695
		sc16is7xx_fifo_write(port, to_send);
695
		sc16is7xx_fifo_write(port, to_send);
696
	}
696
	}
697
697
698
	spin_lock_irqsave(&port->lock, flags);
698
	uart_port_lock_irqsave(port, &flags);
699
	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
699
	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
700
		uart_write_wakeup(port);
700
		uart_write_wakeup(port);
701
701
702
	if (uart_circ_empty(xmit))
702
	if (uart_circ_empty(xmit))
703
		sc16is7xx_stop_tx(port);
703
		sc16is7xx_stop_tx(port);
704
	spin_unlock_irqrestore(&port->lock, flags);
704
	uart_port_unlock_irqrestore(port, flags);
705
}
705
}
706
706
707
static unsigned int sc16is7xx_get_hwmctrl(struct uart_port *port)
707
static unsigned int sc16is7xx_get_hwmctrl(struct uart_port *port)
Lines 733-739 static void sc16is7xx_update_mlines(struct sc16is7xx_one *one) Link Here
733
733
734
	one->old_mctrl = status;
734
	one->old_mctrl = status;
735
735
736
	spin_lock_irqsave(&port->lock, flags);
736
	uart_port_lock_irqsave(port, &flags);
737
	if ((changed & TIOCM_RNG) && (status & TIOCM_RNG))
737
	if ((changed & TIOCM_RNG) && (status & TIOCM_RNG))
738
		port->icount.rng++;
738
		port->icount.rng++;
739
	if (changed & TIOCM_DSR)
739
	if (changed & TIOCM_DSR)
Lines 744-750 static void sc16is7xx_update_mlines(struct sc16is7xx_one *one) Link Here
744
		uart_handle_cts_change(port, status & TIOCM_CTS);
744
		uart_handle_cts_change(port, status & TIOCM_CTS);
745
745
746
	wake_up_interruptible(&port->state->port.delta_msr_wait);
746
	wake_up_interruptible(&port->state->port.delta_msr_wait);
747
	spin_unlock_irqrestore(&port->lock, flags);
747
	uart_port_unlock_irqrestore(port, flags);
748
}
748
}
749
749
750
static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
750
static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
Lines 823-831 static void sc16is7xx_tx_proc(struct kthread_work *ws) Link Here
823
	sc16is7xx_handle_tx(port);
823
	sc16is7xx_handle_tx(port);
824
	mutex_unlock(&s->efr_lock);
824
	mutex_unlock(&s->efr_lock);
825
825
826
	spin_lock_irqsave(&port->lock, flags);
826
	uart_port_lock_irqsave(port, &flags);
827
	sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
827
	sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
828
	spin_unlock_irqrestore(&port->lock, flags);
828
	uart_port_unlock_irqrestore(port, flags);
829
}
829
}
830
830
831
static void sc16is7xx_reconf_rs485(struct uart_port *port)
831
static void sc16is7xx_reconf_rs485(struct uart_port *port)
Lines 836-849 static void sc16is7xx_reconf_rs485(struct uart_port *port) Link Here
836
	struct serial_rs485 *rs485 = &port->rs485;
836
	struct serial_rs485 *rs485 = &port->rs485;
837
	unsigned long irqflags;
837
	unsigned long irqflags;
838
838
839
	spin_lock_irqsave(&port->lock, irqflags);
839
	uart_port_lock_irqsave(port, &irqflags);
840
	if (rs485->flags & SER_RS485_ENABLED) {
840
	if (rs485->flags & SER_RS485_ENABLED) {
841
		efcr |=	SC16IS7XX_EFCR_AUTO_RS485_BIT;
841
		efcr |=	SC16IS7XX_EFCR_AUTO_RS485_BIT;
842
842
843
		if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
843
		if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
844
			efcr |= SC16IS7XX_EFCR_RTS_INVERT_BIT;
844
			efcr |= SC16IS7XX_EFCR_RTS_INVERT_BIT;
845
	}
845
	}
846
	spin_unlock_irqrestore(&port->lock, irqflags);
846
	uart_port_unlock_irqrestore(port, irqflags);
847
847
848
	sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG, mask, efcr);
848
	sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG, mask, efcr);
849
}
849
}
Lines 854-863 static void sc16is7xx_reg_proc(struct kthread_work *ws) Link Here
854
	struct sc16is7xx_one_config config;
854
	struct sc16is7xx_one_config config;
855
	unsigned long irqflags;
855
	unsigned long irqflags;
856
856
857
	spin_lock_irqsave(&one->port.lock, irqflags);
857
	uart_port_lock_irqsave(&one->port, &irqflags);
858
	config = one->config;
858
	config = one->config;
859
	memset(&one->config, 0, sizeof(one->config));
859
	memset(&one->config, 0, sizeof(one->config));
860
	spin_unlock_irqrestore(&one->port.lock, irqflags);
860
	uart_port_unlock_irqrestore(&one->port, irqflags);
861
861
862
	if (config.flags & SC16IS7XX_RECONF_MD) {
862
	if (config.flags & SC16IS7XX_RECONF_MD) {
863
		u8 mcr = 0;
863
		u8 mcr = 0;
Lines 963-980 static void sc16is7xx_throttle(struct uart_port *port) Link Here
963
	 * value set in MCR register. Stop reading data from RX FIFO so the
963
	 * value set in MCR register. Stop reading data from RX FIFO so the
964
	 * AutoRTS feature will de-activate RTS output.
964
	 * AutoRTS feature will de-activate RTS output.
965
	 */
965
	 */
966
	spin_lock_irqsave(&port->lock, flags);
966
	uart_port_lock_irqsave(port, &flags);
967
	sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT);
967
	sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT);
968
	spin_unlock_irqrestore(&port->lock, flags);
968
	uart_port_unlock_irqrestore(port, flags);
969
}
969
}
970
970
971
static void sc16is7xx_unthrottle(struct uart_port *port)
971
static void sc16is7xx_unthrottle(struct uart_port *port)
972
{
972
{
973
	unsigned long flags;
973
	unsigned long flags;
974
974
975
	spin_lock_irqsave(&port->lock, flags);
975
	uart_port_lock_irqsave(port, &flags);
976
	sc16is7xx_ier_set(port, SC16IS7XX_IER_RDI_BIT);
976
	sc16is7xx_ier_set(port, SC16IS7XX_IER_RDI_BIT);
977
	spin_unlock_irqrestore(&port->lock, flags);
977
	uart_port_unlock_irqrestore(port, flags);
978
}
978
}
979
979
980
static unsigned int sc16is7xx_tx_empty(struct uart_port *port)
980
static unsigned int sc16is7xx_tx_empty(struct uart_port *port)
Lines 1113-1119 static void sc16is7xx_set_termios(struct uart_port *port, Link Here
1113
	/* Setup baudrate generator */
1113
	/* Setup baudrate generator */
1114
	baud = sc16is7xx_set_baud(port, baud);
1114
	baud = sc16is7xx_set_baud(port, baud);
1115
1115
1116
	spin_lock_irqsave(&port->lock, flags);
1116
	uart_port_lock_irqsave(port, &flags);
1117
1117
1118
	/* Update timeout according to new baud rate */
1118
	/* Update timeout according to new baud rate */
1119
	uart_update_timeout(port, termios->c_cflag, baud);
1119
	uart_update_timeout(port, termios->c_cflag, baud);
Lines 1121-1127 static void sc16is7xx_set_termios(struct uart_port *port, Link Here
1121
	if (UART_ENABLE_MS(port, termios->c_cflag))
1121
	if (UART_ENABLE_MS(port, termios->c_cflag))
1122
		sc16is7xx_enable_ms(port);
1122
		sc16is7xx_enable_ms(port);
1123
1123
1124
	spin_unlock_irqrestore(&port->lock, flags);
1124
	uart_port_unlock_irqrestore(port, flags);
1125
}
1125
}
1126
1126
1127
static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termios,
1127
static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termios,
Lines 1208-1216 static int sc16is7xx_startup(struct uart_port *port) Link Here
1208
	sc16is7xx_port_write(port, SC16IS7XX_IER_REG, val);
1208
	sc16is7xx_port_write(port, SC16IS7XX_IER_REG, val);
1209
1209
1210
	/* Enable modem status polling */
1210
	/* Enable modem status polling */
1211
	spin_lock_irqsave(&port->lock, flags);
1211
	uart_port_lock_irqsave(port, &flags);
1212
	sc16is7xx_enable_ms(port);
1212
	sc16is7xx_enable_ms(port);
1213
	spin_unlock_irqrestore(&port->lock, flags);
1213
	uart_port_unlock_irqrestore(port, flags);
1214
1214
1215
	return 0;
1215
	return 0;
1216
}
1216
}
(-)a/drivers/tty/serial/serial-tegra.c (-16 / +16 lines)
Lines 411-417 static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud) Link Here
411
		divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
411
		divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
412
	}
412
	}
413
413
414
	spin_lock_irqsave(&tup->uport.lock, flags);
414
	uart_port_lock_irqsave(&tup->uport, &flags);
415
	lcr = tup->lcr_shadow;
415
	lcr = tup->lcr_shadow;
416
	lcr |= UART_LCR_DLAB;
416
	lcr |= UART_LCR_DLAB;
417
	tegra_uart_write(tup, lcr, UART_LCR);
417
	tegra_uart_write(tup, lcr, UART_LCR);
Lines 424-430 static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud) Link Here
424
424
425
	/* Dummy read to ensure the write is posted */
425
	/* Dummy read to ensure the write is posted */
426
	tegra_uart_read(tup, UART_SCR);
426
	tegra_uart_read(tup, UART_SCR);
427
	spin_unlock_irqrestore(&tup->uport.lock, flags);
427
	uart_port_unlock_irqrestore(&tup->uport, flags);
428
428
429
	tup->current_baud = baud;
429
	tup->current_baud = baud;
430
430
Lines 522-534 static void tegra_uart_tx_dma_complete(void *args) Link Here
522
	dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
522
	dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
523
	count = tup->tx_bytes_requested - state.residue;
523
	count = tup->tx_bytes_requested - state.residue;
524
	async_tx_ack(tup->tx_dma_desc);
524
	async_tx_ack(tup->tx_dma_desc);
525
	spin_lock_irqsave(&tup->uport.lock, flags);
525
	uart_port_lock_irqsave(&tup->uport, &flags);
526
	uart_xmit_advance(&tup->uport, count);
526
	uart_xmit_advance(&tup->uport, count);
527
	tup->tx_in_progress = 0;
527
	tup->tx_in_progress = 0;
528
	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
528
	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
529
		uart_write_wakeup(&tup->uport);
529
		uart_write_wakeup(&tup->uport);
530
	tegra_uart_start_next_tx(tup);
530
	tegra_uart_start_next_tx(tup);
531
	spin_unlock_irqrestore(&tup->uport.lock, flags);
531
	uart_port_unlock_irqrestore(&tup->uport, flags);
532
}
532
}
533
533
534
static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
534
static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
Lines 598-610 static unsigned int tegra_uart_tx_empty(struct uart_port *u) Link Here
598
	unsigned int ret = 0;
598
	unsigned int ret = 0;
599
	unsigned long flags;
599
	unsigned long flags;
600
600
601
	spin_lock_irqsave(&u->lock, flags);
601
	uart_port_lock_irqsave(u, &flags);
602
	if (!tup->tx_in_progress) {
602
	if (!tup->tx_in_progress) {
603
		unsigned long lsr = tegra_uart_read(tup, UART_LSR);
603
		unsigned long lsr = tegra_uart_read(tup, UART_LSR);
604
		if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
604
		if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
605
			ret = TIOCSER_TEMT;
605
			ret = TIOCSER_TEMT;
606
	}
606
	}
607
	spin_unlock_irqrestore(&u->lock, flags);
607
	uart_port_unlock_irqrestore(u, flags);
608
	return ret;
608
	return ret;
609
}
609
}
610
610
Lines 727-733 static void tegra_uart_rx_dma_complete(void *args) Link Here
727
	struct dma_tx_state state;
727
	struct dma_tx_state state;
728
	enum dma_status status;
728
	enum dma_status status;
729
729
730
	spin_lock_irqsave(&u->lock, flags);
730
	uart_port_lock_irqsave(u, &flags);
731
731
732
	status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
732
	status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
733
733
Lines 749-755 static void tegra_uart_rx_dma_complete(void *args) Link Here
749
		set_rts(tup, true);
749
		set_rts(tup, true);
750
750
751
done:
751
done:
752
	spin_unlock_irqrestore(&u->lock, flags);
752
	uart_port_unlock_irqrestore(u, flags);
753
}
753
}
754
754
755
static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
755
static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
Lines 836-842 static irqreturn_t tegra_uart_isr(int irq, void *data) Link Here
836
	bool is_rx_int = false;
836
	bool is_rx_int = false;
837
	unsigned long flags;
837
	unsigned long flags;
838
838
839
	spin_lock_irqsave(&u->lock, flags);
839
	uart_port_lock_irqsave(u, &flags);
840
	while (1) {
840
	while (1) {
841
		iir = tegra_uart_read(tup, UART_IIR);
841
		iir = tegra_uart_read(tup, UART_IIR);
842
		if (iir & UART_IIR_NO_INT) {
842
		if (iir & UART_IIR_NO_INT) {
Lines 852-858 static irqreturn_t tegra_uart_isr(int irq, void *data) Link Here
852
			} else if (is_rx_start) {
852
			} else if (is_rx_start) {
853
				tegra_uart_start_rx_dma(tup);
853
				tegra_uart_start_rx_dma(tup);
854
			}
854
			}
855
			spin_unlock_irqrestore(&u->lock, flags);
855
			uart_port_unlock_irqrestore(u, flags);
856
			return IRQ_HANDLED;
856
			return IRQ_HANDLED;
857
		}
857
		}
858
858
Lines 969-979 static void tegra_uart_hw_deinit(struct tegra_uart_port *tup) Link Here
969
		}
969
		}
970
	}
970
	}
971
971
972
	spin_lock_irqsave(&tup->uport.lock, flags);
972
	uart_port_lock_irqsave(&tup->uport, &flags);
973
	/* Reset the Rx and Tx FIFOs */
973
	/* Reset the Rx and Tx FIFOs */
974
	tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
974
	tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
975
	tup->current_baud = 0;
975
	tup->current_baud = 0;
976
	spin_unlock_irqrestore(&tup->uport.lock, flags);
976
	uart_port_unlock_irqrestore(&tup->uport, flags);
977
977
978
	tup->rx_in_progress = 0;
978
	tup->rx_in_progress = 0;
979
	tup->tx_in_progress = 0;
979
	tup->tx_in_progress = 0;
Lines 1292-1298 static void tegra_uart_set_termios(struct uart_port *u, Link Here
1292
	int ret;
1292
	int ret;
1293
1293
1294
	max_divider *= 16;
1294
	max_divider *= 16;
1295
	spin_lock_irqsave(&u->lock, flags);
1295
	uart_port_lock_irqsave(u, &flags);
1296
1296
1297
	/* Changing configuration, it is safe to stop any rx now */
1297
	/* Changing configuration, it is safe to stop any rx now */
1298
	if (tup->rts_active)
1298
	if (tup->rts_active)
Lines 1341-1347 static void tegra_uart_set_termios(struct uart_port *u, Link Here
1341
	baud = uart_get_baud_rate(u, termios, oldtermios,
1341
	baud = uart_get_baud_rate(u, termios, oldtermios,
1342
			parent_clk_rate/max_divider,
1342
			parent_clk_rate/max_divider,
1343
			parent_clk_rate/16);
1343
			parent_clk_rate/16);
1344
	spin_unlock_irqrestore(&u->lock, flags);
1344
	uart_port_unlock_irqrestore(u, flags);
1345
	ret = tegra_set_baudrate(tup, baud);
1345
	ret = tegra_set_baudrate(tup, baud);
1346
	if (ret < 0) {
1346
	if (ret < 0) {
1347
		dev_err(tup->uport.dev, "Failed to set baud rate\n");
1347
		dev_err(tup->uport.dev, "Failed to set baud rate\n");
Lines 1349-1355 static void tegra_uart_set_termios(struct uart_port *u, Link Here
1349
	}
1349
	}
1350
	if (tty_termios_baud_rate(termios))
1350
	if (tty_termios_baud_rate(termios))
1351
		tty_termios_encode_baud_rate(termios, baud, baud);
1351
		tty_termios_encode_baud_rate(termios, baud, baud);
1352
	spin_lock_irqsave(&u->lock, flags);
1352
	uart_port_lock_irqsave(u, &flags);
1353
1353
1354
	/* Flow control */
1354
	/* Flow control */
1355
	if (termios->c_cflag & CRTSCTS)	{
1355
	if (termios->c_cflag & CRTSCTS)	{
Lines 1382-1388 static void tegra_uart_set_termios(struct uart_port *u, Link Here
1382
	if (termios->c_iflag & IGNBRK)
1382
	if (termios->c_iflag & IGNBRK)
1383
		tup->uport.ignore_status_mask |= UART_LSR_BI;
1383
		tup->uport.ignore_status_mask |= UART_LSR_BI;
1384
1384
1385
	spin_unlock_irqrestore(&u->lock, flags);
1385
	uart_port_unlock_irqrestore(u, flags);
1386
}
1386
}
1387
1387
1388
static const char *tegra_uart_type(struct uart_port *u)
1388
static const char *tegra_uart_type(struct uart_port *u)
(-)a/drivers/tty/serial/serial_core.c (-46 / +46 lines)
Lines 79-85 static inline void uart_port_deref(struct uart_port *uport) Link Here
79
	({								\
79
	({								\
80
		struct uart_port *__uport = uart_port_ref(state);	\
80
		struct uart_port *__uport = uart_port_ref(state);	\
81
		if (__uport)						\
81
		if (__uport)						\
82
			spin_lock_irqsave(&__uport->lock, flags);	\
82
			uart_port_lock_irqsave(__uport, &flags);	\
83
		__uport;						\
83
		__uport;						\
84
	})
84
	})
85
85
Lines 87-93 static inline void uart_port_deref(struct uart_port *uport) Link Here
87
	({								\
87
	({								\
88
		struct uart_port *__uport = uport;			\
88
		struct uart_port *__uport = uport;			\
89
		if (__uport) {						\
89
		if (__uport) {						\
90
			spin_unlock_irqrestore(&__uport->lock, flags);	\
90
			uart_port_unlock_irqrestore(__uport, flags);	\
91
			uart_port_deref(__uport);			\
91
			uart_port_deref(__uport);			\
92
		}							\
92
		}							\
93
	})
93
	})
Lines 179-190 uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear) Link Here
179
	unsigned long flags;
179
	unsigned long flags;
180
	unsigned int old;
180
	unsigned int old;
181
181
182
	spin_lock_irqsave(&port->lock, flags);
182
	uart_port_lock_irqsave(port, &flags);
183
	old = port->mctrl;
183
	old = port->mctrl;
184
	port->mctrl = (old & ~clear) | set;
184
	port->mctrl = (old & ~clear) | set;
185
	if (old != port->mctrl && !(port->rs485.flags & SER_RS485_ENABLED))
185
	if (old != port->mctrl && !(port->rs485.flags & SER_RS485_ENABLED))
186
		port->ops->set_mctrl(port, port->mctrl);
186
		port->ops->set_mctrl(port, port->mctrl);
187
	spin_unlock_irqrestore(&port->lock, flags);
187
	uart_port_unlock_irqrestore(port, flags);
188
}
188
}
189
189
190
#define uart_set_mctrl(port, set)	uart_update_mctrl(port, set, 0)
190
#define uart_set_mctrl(port, set)	uart_update_mctrl(port, set, 0)
Lines 219-225 static void uart_change_line_settings(struct tty_struct *tty, struct uart_state Link Here
219
	/*
219
	/*
220
	 * Set modem status enables based on termios cflag
220
	 * Set modem status enables based on termios cflag
221
	 */
221
	 */
222
	spin_lock_irq(&uport->lock);
222
	uart_port_lock_irq(uport);
223
	if (termios->c_cflag & CRTSCTS)
223
	if (termios->c_cflag & CRTSCTS)
224
		uport->status |= UPSTAT_CTS_ENABLE;
224
		uport->status |= UPSTAT_CTS_ENABLE;
225
	else
225
	else
Lines 240-246 static void uart_change_line_settings(struct tty_struct *tty, struct uart_state Link Here
240
		else
240
		else
241
			__uart_start(state);
241
			__uart_start(state);
242
	}
242
	}
243
	spin_unlock_irq(&uport->lock);
243
	uart_port_unlock_irq(uport);
244
}
244
}
245
245
246
/*
246
/*
Lines 702-712 static void uart_send_xchar(struct tty_struct *tty, char ch) Link Here
702
	if (port->ops->send_xchar)
702
	if (port->ops->send_xchar)
703
		port->ops->send_xchar(port, ch);
703
		port->ops->send_xchar(port, ch);
704
	else {
704
	else {
705
		spin_lock_irqsave(&port->lock, flags);
705
		uart_port_lock_irqsave(port, &flags);
706
		port->x_char = ch;
706
		port->x_char = ch;
707
		if (ch)
707
		if (ch)
708
			port->ops->start_tx(port);
708
			port->ops->start_tx(port);
709
		spin_unlock_irqrestore(&port->lock, flags);
709
		uart_port_unlock_irqrestore(port, flags);
710
	}
710
	}
711
	uart_port_deref(port);
711
	uart_port_deref(port);
712
}
712
}
Lines 1085-1093 static int uart_tiocmget(struct tty_struct *tty) Link Here
1085
1085
1086
	if (!tty_io_error(tty)) {
1086
	if (!tty_io_error(tty)) {
1087
		result = uport->mctrl;
1087
		result = uport->mctrl;
1088
		spin_lock_irq(&uport->lock);
1088
		uart_port_lock_irq(uport);
1089
		result |= uport->ops->get_mctrl(uport);
1089
		result |= uport->ops->get_mctrl(uport);
1090
		spin_unlock_irq(&uport->lock);
1090
		uart_port_unlock_irq(uport);
1091
	}
1091
	}
1092
out:
1092
out:
1093
	mutex_unlock(&port->mutex);
1093
	mutex_unlock(&port->mutex);
Lines 1223-1238 static int uart_wait_modem_status(struct uart_state *state, unsigned long arg) Link Here
1223
	uport = uart_port_ref(state);
1223
	uport = uart_port_ref(state);
1224
	if (!uport)
1224
	if (!uport)
1225
		return -EIO;
1225
		return -EIO;
1226
	spin_lock_irq(&uport->lock);
1226
	uart_port_lock_irq(uport);
1227
	memcpy(&cprev, &uport->icount, sizeof(struct uart_icount));
1227
	memcpy(&cprev, &uport->icount, sizeof(struct uart_icount));
1228
	uart_enable_ms(uport);
1228
	uart_enable_ms(uport);
1229
	spin_unlock_irq(&uport->lock);
1229
	uart_port_unlock_irq(uport);
1230
1230
1231
	add_wait_queue(&port->delta_msr_wait, &wait);
1231
	add_wait_queue(&port->delta_msr_wait, &wait);
1232
	for (;;) {
1232
	for (;;) {
1233
		spin_lock_irq(&uport->lock);
1233
		uart_port_lock_irq(uport);
1234
		memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
1234
		memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
1235
		spin_unlock_irq(&uport->lock);
1235
		uart_port_unlock_irq(uport);
1236
1236
1237
		set_current_state(TASK_INTERRUPTIBLE);
1237
		set_current_state(TASK_INTERRUPTIBLE);
1238
1238
Lines 1277-1285 static int uart_get_icount(struct tty_struct *tty, Link Here
1277
	uport = uart_port_ref(state);
1277
	uport = uart_port_ref(state);
1278
	if (!uport)
1278
	if (!uport)
1279
		return -EIO;
1279
		return -EIO;
1280
	spin_lock_irq(&uport->lock);
1280
	uart_port_lock_irq(uport);
1281
	memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
1281
	memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
1282
	spin_unlock_irq(&uport->lock);
1282
	uart_port_unlock_irq(uport);
1283
	uart_port_deref(uport);
1283
	uart_port_deref(uport);
1284
1284
1285
	icount->cts         = cnow.cts;
1285
	icount->cts         = cnow.cts;
Lines 1413-1421 static int uart_rs485_config(struct uart_port *port) Link Here
1413
	uart_sanitize_serial_rs485(port, rs485);
1413
	uart_sanitize_serial_rs485(port, rs485);
1414
	uart_set_rs485_termination(port, rs485);
1414
	uart_set_rs485_termination(port, rs485);
1415
1415
1416
	spin_lock_irqsave(&port->lock, flags);
1416
	uart_port_lock_irqsave(port, &flags);
1417
	ret = port->rs485_config(port, NULL, rs485);
1417
	ret = port->rs485_config(port, NULL, rs485);
1418
	spin_unlock_irqrestore(&port->lock, flags);
1418
	uart_port_unlock_irqrestore(port, flags);
1419
	if (ret)
1419
	if (ret)
1420
		memset(rs485, 0, sizeof(*rs485));
1420
		memset(rs485, 0, sizeof(*rs485));
1421
1421
Lines 1428-1436 static int uart_get_rs485_config(struct uart_port *port, Link Here
1428
	unsigned long flags;
1428
	unsigned long flags;
1429
	struct serial_rs485 aux;
1429
	struct serial_rs485 aux;
1430
1430
1431
	spin_lock_irqsave(&port->lock, flags);
1431
	uart_port_lock_irqsave(port, &flags);
1432
	aux = port->rs485;
1432
	aux = port->rs485;
1433
	spin_unlock_irqrestore(&port->lock, flags);
1433
	uart_port_unlock_irqrestore(port, flags);
1434
1434
1435
	if (copy_to_user(rs485, &aux, sizeof(aux)))
1435
	if (copy_to_user(rs485, &aux, sizeof(aux)))
1436
		return -EFAULT;
1436
		return -EFAULT;
Lines 1457-1463 static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port, Link Here
1457
	uart_sanitize_serial_rs485(port, &rs485);
1457
	uart_sanitize_serial_rs485(port, &rs485);
1458
	uart_set_rs485_termination(port, &rs485);
1458
	uart_set_rs485_termination(port, &rs485);
1459
1459
1460
	spin_lock_irqsave(&port->lock, flags);
1460
	uart_port_lock_irqsave(port, &flags);
1461
	ret = port->rs485_config(port, &tty->termios, &rs485);
1461
	ret = port->rs485_config(port, &tty->termios, &rs485);
1462
	if (!ret) {
1462
	if (!ret) {
1463
		port->rs485 = rs485;
1463
		port->rs485 = rs485;
Lines 1466-1472 static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port, Link Here
1466
		if (!(rs485.flags & SER_RS485_ENABLED))
1466
		if (!(rs485.flags & SER_RS485_ENABLED))
1467
			port->ops->set_mctrl(port, port->mctrl);
1467
			port->ops->set_mctrl(port, port->mctrl);
1468
	}
1468
	}
1469
	spin_unlock_irqrestore(&port->lock, flags);
1469
	uart_port_unlock_irqrestore(port, flags);
1470
	if (ret)
1470
	if (ret)
1471
		return ret;
1471
		return ret;
1472
1472
Lines 1485-1493 static int uart_get_iso7816_config(struct uart_port *port, Link Here
1485
	if (!port->iso7816_config)
1485
	if (!port->iso7816_config)
1486
		return -ENOTTY;
1486
		return -ENOTTY;
1487
1487
1488
	spin_lock_irqsave(&port->lock, flags);
1488
	uart_port_lock_irqsave(port, &flags);
1489
	aux = port->iso7816;
1489
	aux = port->iso7816;
1490
	spin_unlock_irqrestore(&port->lock, flags);
1490
	uart_port_unlock_irqrestore(port, flags);
1491
1491
1492
	if (copy_to_user(iso7816, &aux, sizeof(aux)))
1492
	if (copy_to_user(iso7816, &aux, sizeof(aux)))
1493
		return -EFAULT;
1493
		return -EFAULT;
Lines 1516-1524 static int uart_set_iso7816_config(struct uart_port *port, Link Here
1516
		if (iso7816.reserved[i])
1516
		if (iso7816.reserved[i])
1517
			return -EINVAL;
1517
			return -EINVAL;
1518
1518
1519
	spin_lock_irqsave(&port->lock, flags);
1519
	uart_port_lock_irqsave(port, &flags);
1520
	ret = port->iso7816_config(port, &iso7816);
1520
	ret = port->iso7816_config(port, &iso7816);
1521
	spin_unlock_irqrestore(&port->lock, flags);
1521
	uart_port_unlock_irqrestore(port, flags);
1522
	if (ret)
1522
	if (ret)
1523
		return ret;
1523
		return ret;
1524
1524
Lines 1735-1743 static void uart_tty_port_shutdown(struct tty_port *port) Link Here
1735
	if (WARN(!uport, "detached port still initialized!\n"))
1735
	if (WARN(!uport, "detached port still initialized!\n"))
1736
		return;
1736
		return;
1737
1737
1738
	spin_lock_irq(&uport->lock);
1738
	uart_port_lock_irq(uport);
1739
	uport->ops->stop_rx(uport);
1739
	uport->ops->stop_rx(uport);
1740
	spin_unlock_irq(&uport->lock);
1740
	uart_port_unlock_irq(uport);
1741
1741
1742
	uart_port_shutdown(port);
1742
	uart_port_shutdown(port);
1743
1743
Lines 1751-1760 static void uart_tty_port_shutdown(struct tty_port *port) Link Here
1751
	/*
1751
	/*
1752
	 * Free the transmit buffer.
1752
	 * Free the transmit buffer.
1753
	 */
1753
	 */
1754
	spin_lock_irq(&uport->lock);
1754
	uart_port_lock_irq(uport);
1755
	buf = state->xmit.buf;
1755
	buf = state->xmit.buf;
1756
	state->xmit.buf = NULL;
1756
	state->xmit.buf = NULL;
1757
	spin_unlock_irq(&uport->lock);
1757
	uart_port_unlock_irq(uport);
1758
1758
1759
	free_page((unsigned long)buf);
1759
	free_page((unsigned long)buf);
1760
1760
Lines 1897-1906 static bool uart_carrier_raised(struct tty_port *port) Link Here
1897
	 */
1897
	 */
1898
	if (WARN_ON(!uport))
1898
	if (WARN_ON(!uport))
1899
		return true;
1899
		return true;
1900
	spin_lock_irq(&uport->lock);
1900
	uart_port_lock_irq(uport);
1901
	uart_enable_ms(uport);
1901
	uart_enable_ms(uport);
1902
	mctrl = uport->ops->get_mctrl(uport);
1902
	mctrl = uport->ops->get_mctrl(uport);
1903
	spin_unlock_irq(&uport->lock);
1903
	uart_port_unlock_irq(uport);
1904
	uart_port_deref(uport);
1904
	uart_port_deref(uport);
1905
1905
1906
	return mctrl & TIOCM_CAR;
1906
	return mctrl & TIOCM_CAR;
Lines 2017-2025 static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i) Link Here
2017
		pm_state = state->pm_state;
2017
		pm_state = state->pm_state;
2018
		if (pm_state != UART_PM_STATE_ON)
2018
		if (pm_state != UART_PM_STATE_ON)
2019
			uart_change_pm(state, UART_PM_STATE_ON);
2019
			uart_change_pm(state, UART_PM_STATE_ON);
2020
		spin_lock_irq(&uport->lock);
2020
		uart_port_lock_irq(uport);
2021
		status = uport->ops->get_mctrl(uport);
2021
		status = uport->ops->get_mctrl(uport);
2022
		spin_unlock_irq(&uport->lock);
2022
		uart_port_unlock_irq(uport);
2023
		if (pm_state != UART_PM_STATE_ON)
2023
		if (pm_state != UART_PM_STATE_ON)
2024
			uart_change_pm(state, pm_state);
2024
			uart_change_pm(state, pm_state);
2025
2025
Lines 2358-2366 int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) Link Here
2358
	 */
2358
	 */
2359
	if (!console_suspend_enabled && uart_console(uport)) {
2359
	if (!console_suspend_enabled && uart_console(uport)) {
2360
		if (uport->ops->start_rx) {
2360
		if (uport->ops->start_rx) {
2361
			spin_lock_irq(&uport->lock);
2361
			uart_port_lock_irq(uport);
2362
			uport->ops->stop_rx(uport);
2362
			uport->ops->stop_rx(uport);
2363
			spin_unlock_irq(&uport->lock);
2363
			uart_port_unlock_irq(uport);
2364
		}
2364
		}
2365
		goto unlock;
2365
		goto unlock;
2366
	}
2366
	}
Lines 2375-2381 int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) Link Here
2375
		tty_port_set_suspended(port, true);
2375
		tty_port_set_suspended(port, true);
2376
		tty_port_set_initialized(port, false);
2376
		tty_port_set_initialized(port, false);
2377
2377
2378
		spin_lock_irq(&uport->lock);
2378
		uart_port_lock_irq(uport);
2379
		ops->stop_tx(uport);
2379
		ops->stop_tx(uport);
2380
		if (!(uport->rs485.flags & SER_RS485_ENABLED))
2380
		if (!(uport->rs485.flags & SER_RS485_ENABLED))
2381
			ops->set_mctrl(uport, 0);
2381
			ops->set_mctrl(uport, 0);
Lines 2383-2389 int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) Link Here
2383
		mctrl = uport->mctrl;
2383
		mctrl = uport->mctrl;
2384
		uport->mctrl = 0;
2384
		uport->mctrl = 0;
2385
		ops->stop_rx(uport);
2385
		ops->stop_rx(uport);
2386
		spin_unlock_irq(&uport->lock);
2386
		uart_port_unlock_irq(uport);
2387
2387
2388
		/*
2388
		/*
2389
		 * Wait for the transmitter to empty.
2389
		 * Wait for the transmitter to empty.
Lines 2455-2463 int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) Link Here
2455
			uart_change_pm(state, UART_PM_STATE_ON);
2455
			uart_change_pm(state, UART_PM_STATE_ON);
2456
		uport->ops->set_termios(uport, &termios, NULL);
2456
		uport->ops->set_termios(uport, &termios, NULL);
2457
		if (!console_suspend_enabled && uport->ops->start_rx) {
2457
		if (!console_suspend_enabled && uport->ops->start_rx) {
2458
			spin_lock_irq(&uport->lock);
2458
			uart_port_lock_irq(uport);
2459
			uport->ops->start_rx(uport);
2459
			uport->ops->start_rx(uport);
2460
			spin_unlock_irq(&uport->lock);
2460
			uart_port_unlock_irq(uport);
2461
		}
2461
		}
2462
		if (console_suspend_enabled)
2462
		if (console_suspend_enabled)
2463
			console_start(uport->cons);
2463
			console_start(uport->cons);
Lines 2468-2477 int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) Link Here
2468
		int ret;
2468
		int ret;
2469
2469
2470
		uart_change_pm(state, UART_PM_STATE_ON);
2470
		uart_change_pm(state, UART_PM_STATE_ON);
2471
		spin_lock_irq(&uport->lock);
2471
		uart_port_lock_irq(uport);
2472
		if (!(uport->rs485.flags & SER_RS485_ENABLED))
2472
		if (!(uport->rs485.flags & SER_RS485_ENABLED))
2473
			ops->set_mctrl(uport, 0);
2473
			ops->set_mctrl(uport, 0);
2474
		spin_unlock_irq(&uport->lock);
2474
		uart_port_unlock_irq(uport);
2475
		if (console_suspend_enabled || !uart_console(uport)) {
2475
		if (console_suspend_enabled || !uart_console(uport)) {
2476
			/* Protected by port mutex for now */
2476
			/* Protected by port mutex for now */
2477
			struct tty_struct *tty = port->tty;
2477
			struct tty_struct *tty = port->tty;
Lines 2481-2491 int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) Link Here
2481
				if (tty)
2481
				if (tty)
2482
					uart_change_line_settings(tty, state, NULL);
2482
					uart_change_line_settings(tty, state, NULL);
2483
				uart_rs485_config(uport);
2483
				uart_rs485_config(uport);
2484
				spin_lock_irq(&uport->lock);
2484
				uart_port_lock_irq(uport);
2485
				if (!(uport->rs485.flags & SER_RS485_ENABLED))
2485
				if (!(uport->rs485.flags & SER_RS485_ENABLED))
2486
					ops->set_mctrl(uport, uport->mctrl);
2486
					ops->set_mctrl(uport, uport->mctrl);
2487
				ops->start_tx(uport);
2487
				ops->start_tx(uport);
2488
				spin_unlock_irq(&uport->lock);
2488
				uart_port_unlock_irq(uport);
2489
				tty_port_set_initialized(port, true);
2489
				tty_port_set_initialized(port, true);
2490
			} else {
2490
			} else {
2491
				/*
2491
				/*
Lines 2588-2598 uart_configure_port(struct uart_driver *drv, struct uart_state *state, Link Here
2588
		 * keep the DTR setting that is set in uart_set_options()
2588
		 * keep the DTR setting that is set in uart_set_options()
2589
		 * We probably don't need a spinlock around this, but
2589
		 * We probably don't need a spinlock around this, but
2590
		 */
2590
		 */
2591
		spin_lock_irqsave(&port->lock, flags);
2591
		uart_port_lock_irqsave(port, &flags);
2592
		port->mctrl &= TIOCM_DTR;
2592
		port->mctrl &= TIOCM_DTR;
2593
		if (!(port->rs485.flags & SER_RS485_ENABLED))
2593
		if (!(port->rs485.flags & SER_RS485_ENABLED))
2594
			port->ops->set_mctrl(port, port->mctrl);
2594
			port->ops->set_mctrl(port, port->mctrl);
2595
		spin_unlock_irqrestore(&port->lock, flags);
2595
		uart_port_unlock_irqrestore(port, flags);
2596
2596
2597
		uart_rs485_config(port);
2597
		uart_rs485_config(port);
2598
2598
(-)a/drivers/tty/serial/serial_mctrl_gpio.c (-2 / +2 lines)
Lines 184-190 static irqreturn_t mctrl_gpio_irq_handle(int irq, void *context) Link Here
184
184
185
	mctrl_gpio_get(gpios, &mctrl);
185
	mctrl_gpio_get(gpios, &mctrl);
186
186
187
	spin_lock_irqsave(&port->lock, flags);
187
	uart_port_lock_irqsave(port, &flags);
188
188
189
	mctrl_diff = mctrl ^ gpios->mctrl_prev;
189
	mctrl_diff = mctrl ^ gpios->mctrl_prev;
190
	gpios->mctrl_prev = mctrl;
190
	gpios->mctrl_prev = mctrl;
Lines 205-211 static irqreturn_t mctrl_gpio_irq_handle(int irq, void *context) Link Here
205
		wake_up_interruptible(&port->state->port.delta_msr_wait);
205
		wake_up_interruptible(&port->state->port.delta_msr_wait);
206
	}
206
	}
207
207
208
	spin_unlock_irqrestore(&port->lock, flags);
208
	uart_port_unlock_irqrestore(port, flags);
209
209
210
	return IRQ_HANDLED;
210
	return IRQ_HANDLED;
211
}
211
}
(-)a/drivers/tty/serial/serial_port.c (-2 / +2 lines)
Lines 35-44 static int serial_port_runtime_resume(struct device *dev) Link Here
35
		goto out;
35
		goto out;
36
36
37
	/* Flush any pending TX for the port */
37
	/* Flush any pending TX for the port */
38
	spin_lock_irqsave(&port->lock, flags);
38
	uart_port_lock_irqsave(port, &flags);
39
	if (__serial_port_busy(port))
39
	if (__serial_port_busy(port))
40
		port->ops->start_tx(port);
40
		port->ops->start_tx(port);
41
	spin_unlock_irqrestore(&port->lock, flags);
41
	uart_port_unlock_irqrestore(port, flags);
42
42
43
out:
43
out:
44
	pm_runtime_mark_last_busy(dev);
44
	pm_runtime_mark_last_busy(dev);
(-)a/drivers/tty/serial/serial_txx9.c (-13 / +13 lines)
Lines 335-347 static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id) Link Here
335
	unsigned int status;
335
	unsigned int status;
336
336
337
	while (1) {
337
	while (1) {
338
		spin_lock(&up->lock);
338
		uart_port_lock(up);
339
		status = sio_in(up, TXX9_SIDISR);
339
		status = sio_in(up, TXX9_SIDISR);
340
		if (!(sio_in(up, TXX9_SIDICR) & TXX9_SIDICR_TIE))
340
		if (!(sio_in(up, TXX9_SIDICR) & TXX9_SIDICR_TIE))
341
			status &= ~TXX9_SIDISR_TDIS;
341
			status &= ~TXX9_SIDISR_TDIS;
342
		if (!(status & (TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
342
		if (!(status & (TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
343
				TXX9_SIDISR_TOUT))) {
343
				TXX9_SIDISR_TOUT))) {
344
			spin_unlock(&up->lock);
344
			uart_port_unlock(up);
345
			break;
345
			break;
346
		}
346
		}
347
347
Lines 353-359 static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id) Link Here
353
		sio_mask(up, TXX9_SIDISR,
353
		sio_mask(up, TXX9_SIDISR,
354
			 TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
354
			 TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
355
			 TXX9_SIDISR_TOUT);
355
			 TXX9_SIDISR_TOUT);
356
		spin_unlock(&up->lock);
356
		uart_port_unlock(up);
357
357
358
		if (pass_counter++ > PASS_LIMIT)
358
		if (pass_counter++ > PASS_LIMIT)
359
			break;
359
			break;
Lines 367-375 static unsigned int serial_txx9_tx_empty(struct uart_port *up) Link Here
367
	unsigned long flags;
367
	unsigned long flags;
368
	unsigned int ret;
368
	unsigned int ret;
369
369
370
	spin_lock_irqsave(&up->lock, flags);
370
	uart_port_lock_irqsave(up, &flags);
371
	ret = (sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS) ? TIOCSER_TEMT : 0;
371
	ret = (sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS) ? TIOCSER_TEMT : 0;
372
	spin_unlock_irqrestore(&up->lock, flags);
372
	uart_port_unlock_irqrestore(up, flags);
373
373
374
	return ret;
374
	return ret;
375
}
375
}
Lines 399-410 static void serial_txx9_break_ctl(struct uart_port *up, int break_state) Link Here
399
{
399
{
400
	unsigned long flags;
400
	unsigned long flags;
401
401
402
	spin_lock_irqsave(&up->lock, flags);
402
	uart_port_lock_irqsave(up, &flags);
403
	if (break_state == -1)
403
	if (break_state == -1)
404
		sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
404
		sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
405
	else
405
	else
406
		sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
406
		sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
407
	spin_unlock_irqrestore(&up->lock, flags);
407
	uart_port_unlock_irqrestore(up, flags);
408
}
408
}
409
409
410
#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
410
#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
Lines 517-525 static int serial_txx9_startup(struct uart_port *up) Link Here
517
	/*
517
	/*
518
	 * Now, initialize the UART
518
	 * Now, initialize the UART
519
	 */
519
	 */
520
	spin_lock_irqsave(&up->lock, flags);
520
	uart_port_lock_irqsave(up, &flags);
521
	serial_txx9_set_mctrl(up, up->mctrl);
521
	serial_txx9_set_mctrl(up, up->mctrl);
522
	spin_unlock_irqrestore(&up->lock, flags);
522
	uart_port_unlock_irqrestore(up, flags);
523
523
524
	/* Enable RX/TX */
524
	/* Enable RX/TX */
525
	sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE);
525
	sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE);
Lines 541-549 static void serial_txx9_shutdown(struct uart_port *up) Link Here
541
	 */
541
	 */
542
	sio_out(up, TXX9_SIDICR, 0);	/* disable all intrs */
542
	sio_out(up, TXX9_SIDICR, 0);	/* disable all intrs */
543
543
544
	spin_lock_irqsave(&up->lock, flags);
544
	uart_port_lock_irqsave(up, &flags);
545
	serial_txx9_set_mctrl(up, up->mctrl);
545
	serial_txx9_set_mctrl(up, up->mctrl);
546
	spin_unlock_irqrestore(&up->lock, flags);
546
	uart_port_unlock_irqrestore(up, flags);
547
547
548
	/*
548
	/*
549
	 * Disable break condition
549
	 * Disable break condition
Lines 625-631 serial_txx9_set_termios(struct uart_port *up, struct ktermios *termios, Link Here
625
	 * Ok, we're now changing the port state.  Do it with
625
	 * Ok, we're now changing the port state.  Do it with
626
	 * interrupts disabled.
626
	 * interrupts disabled.
627
	 */
627
	 */
628
	spin_lock_irqsave(&up->lock, flags);
628
	uart_port_lock_irqsave(up, &flags);
629
629
630
	/*
630
	/*
631
	 * Update the per-port timeout.
631
	 * Update the per-port timeout.
Lines 676-682 serial_txx9_set_termios(struct uart_port *up, struct ktermios *termios, Link Here
676
	sio_out(up, TXX9_SIFCR, fcr);
676
	sio_out(up, TXX9_SIFCR, fcr);
677
677
678
	serial_txx9_set_mctrl(up, up->mctrl);
678
	serial_txx9_set_mctrl(up, up->mctrl);
679
	spin_unlock_irqrestore(&up->lock, flags);
679
	uart_port_unlock_irqrestore(up, flags);
680
}
680
}
681
681
682
static void
682
static void
(-)a/drivers/tty/serial/sh-sci.c (-34 / +34 lines)
Lines 1205-1211 static void sci_dma_tx_complete(void *arg) Link Here
1205
1205
1206
	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1206
	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1207
1207
1208
	spin_lock_irqsave(&port->lock, flags);
1208
	uart_port_lock_irqsave(port, &flags);
1209
1209
1210
	uart_xmit_advance(port, s->tx_dma_len);
1210
	uart_xmit_advance(port, s->tx_dma_len);
1211
1211
Lines 1229-1235 static void sci_dma_tx_complete(void *arg) Link Here
1229
		}
1229
		}
1230
	}
1230
	}
1231
1231
1232
	spin_unlock_irqrestore(&port->lock, flags);
1232
	uart_port_unlock_irqrestore(port, flags);
1233
}
1233
}
1234
1234
1235
/* Locking: called with port lock held */
1235
/* Locking: called with port lock held */
Lines 1320-1326 static void sci_dma_rx_complete(void *arg) Link Here
1320
	dev_dbg(port->dev, "%s(%d) active cookie %d\n", __func__, port->line,
1320
	dev_dbg(port->dev, "%s(%d) active cookie %d\n", __func__, port->line,
1321
		s->active_rx);
1321
		s->active_rx);
1322
1322
1323
	spin_lock_irqsave(&port->lock, flags);
1323
	uart_port_lock_irqsave(port, &flags);
1324
1324
1325
	active = sci_dma_rx_find_active(s);
1325
	active = sci_dma_rx_find_active(s);
1326
	if (active >= 0)
1326
	if (active >= 0)
Lines 1347-1366 static void sci_dma_rx_complete(void *arg) Link Here
1347
1347
1348
	dma_async_issue_pending(chan);
1348
	dma_async_issue_pending(chan);
1349
1349
1350
	spin_unlock_irqrestore(&port->lock, flags);
1350
	uart_port_unlock_irqrestore(port, flags);
1351
	dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
1351
	dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
1352
		__func__, s->cookie_rx[active], active, s->active_rx);
1352
		__func__, s->cookie_rx[active], active, s->active_rx);
1353
	return;
1353
	return;
1354
1354
1355
fail:
1355
fail:
1356
	spin_unlock_irqrestore(&port->lock, flags);
1356
	uart_port_unlock_irqrestore(port, flags);
1357
	dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
1357
	dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
1358
	/* Switch to PIO */
1358
	/* Switch to PIO */
1359
	spin_lock_irqsave(&port->lock, flags);
1359
	uart_port_lock_irqsave(port, &flags);
1360
	dmaengine_terminate_async(chan);
1360
	dmaengine_terminate_async(chan);
1361
	sci_dma_rx_chan_invalidate(s);
1361
	sci_dma_rx_chan_invalidate(s);
1362
	sci_dma_rx_reenable_irq(s);
1362
	sci_dma_rx_reenable_irq(s);
1363
	spin_unlock_irqrestore(&port->lock, flags);
1363
	uart_port_unlock_irqrestore(port, flags);
1364
}
1364
}
1365
1365
1366
static void sci_dma_tx_release(struct sci_port *s)
1366
static void sci_dma_tx_release(struct sci_port *s)
Lines 1409-1421 static int sci_dma_rx_submit(struct sci_port *s, bool port_lock_held) Link Here
1409
fail:
1409
fail:
1410
	/* Switch to PIO */
1410
	/* Switch to PIO */
1411
	if (!port_lock_held)
1411
	if (!port_lock_held)
1412
		spin_lock_irqsave(&port->lock, flags);
1412
		uart_port_lock_irqsave(port, &flags);
1413
	if (i)
1413
	if (i)
1414
		dmaengine_terminate_async(chan);
1414
		dmaengine_terminate_async(chan);
1415
	sci_dma_rx_chan_invalidate(s);
1415
	sci_dma_rx_chan_invalidate(s);
1416
	sci_start_rx(port);
1416
	sci_start_rx(port);
1417
	if (!port_lock_held)
1417
	if (!port_lock_held)
1418
		spin_unlock_irqrestore(&port->lock, flags);
1418
		uart_port_unlock_irqrestore(port, flags);
1419
	return -EAGAIN;
1419
	return -EAGAIN;
1420
}
1420
}
1421
1421
Lines 1437-1450 static void sci_dma_tx_work_fn(struct work_struct *work) Link Here
1437
	 * transmit till the end, and then the rest. Take the port lock to get a
1437
	 * transmit till the end, and then the rest. Take the port lock to get a
1438
	 * consistent xmit buffer state.
1438
	 * consistent xmit buffer state.
1439
	 */
1439
	 */
1440
	spin_lock_irq(&port->lock);
1440
	uart_port_lock_irq(port);
1441
	head = xmit->head;
1441
	head = xmit->head;
1442
	tail = xmit->tail;
1442
	tail = xmit->tail;
1443
	buf = s->tx_dma_addr + tail;
1443
	buf = s->tx_dma_addr + tail;
1444
	s->tx_dma_len = CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE);
1444
	s->tx_dma_len = CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE);
1445
	if (!s->tx_dma_len) {
1445
	if (!s->tx_dma_len) {
1446
		/* Transmit buffer has been flushed */
1446
		/* Transmit buffer has been flushed */
1447
		spin_unlock_irq(&port->lock);
1447
		uart_port_unlock_irq(port);
1448
		return;
1448
		return;
1449
	}
1449
	}
1450
1450
Lines 1452-1458 static void sci_dma_tx_work_fn(struct work_struct *work) Link Here
1452
					   DMA_MEM_TO_DEV,
1452
					   DMA_MEM_TO_DEV,
1453
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1453
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1454
	if (!desc) {
1454
	if (!desc) {
1455
		spin_unlock_irq(&port->lock);
1455
		uart_port_unlock_irq(port);
1456
		dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
1456
		dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
1457
		goto switch_to_pio;
1457
		goto switch_to_pio;
1458
	}
1458
	}
Lines 1464-1475 static void sci_dma_tx_work_fn(struct work_struct *work) Link Here
1464
	desc->callback_param = s;
1464
	desc->callback_param = s;
1465
	s->cookie_tx = dmaengine_submit(desc);
1465
	s->cookie_tx = dmaengine_submit(desc);
1466
	if (dma_submit_error(s->cookie_tx)) {
1466
	if (dma_submit_error(s->cookie_tx)) {
1467
		spin_unlock_irq(&port->lock);
1467
		uart_port_unlock_irq(port);
1468
		dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
1468
		dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
1469
		goto switch_to_pio;
1469
		goto switch_to_pio;
1470
	}
1470
	}
1471
1471
1472
	spin_unlock_irq(&port->lock);
1472
	uart_port_unlock_irq(port);
1473
	dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
1473
	dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
1474
		__func__, xmit->buf, tail, head, s->cookie_tx);
1474
		__func__, xmit->buf, tail, head, s->cookie_tx);
1475
1475
Lines 1477-1486 static void sci_dma_tx_work_fn(struct work_struct *work) Link Here
1477
	return;
1477
	return;
1478
1478
1479
switch_to_pio:
1479
switch_to_pio:
1480
	spin_lock_irqsave(&port->lock, flags);
1480
	uart_port_lock_irqsave(port, &flags);
1481
	s->chan_tx = NULL;
1481
	s->chan_tx = NULL;
1482
	sci_start_tx(port);
1482
	sci_start_tx(port);
1483
	spin_unlock_irqrestore(&port->lock, flags);
1483
	uart_port_unlock_irqrestore(port, flags);
1484
	return;
1484
	return;
1485
}
1485
}
1486
1486
Lines 1497-1513 static enum hrtimer_restart sci_dma_rx_timer_fn(struct hrtimer *t) Link Here
1497
1497
1498
	dev_dbg(port->dev, "DMA Rx timed out\n");
1498
	dev_dbg(port->dev, "DMA Rx timed out\n");
1499
1499
1500
	spin_lock_irqsave(&port->lock, flags);
1500
	uart_port_lock_irqsave(port, &flags);
1501
1501
1502
	active = sci_dma_rx_find_active(s);
1502
	active = sci_dma_rx_find_active(s);
1503
	if (active < 0) {
1503
	if (active < 0) {
1504
		spin_unlock_irqrestore(&port->lock, flags);
1504
		uart_port_unlock_irqrestore(port, flags);
1505
		return HRTIMER_NORESTART;
1505
		return HRTIMER_NORESTART;
1506
	}
1506
	}
1507
1507
1508
	status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
1508
	status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
1509
	if (status == DMA_COMPLETE) {
1509
	if (status == DMA_COMPLETE) {
1510
		spin_unlock_irqrestore(&port->lock, flags);
1510
		uart_port_unlock_irqrestore(port, flags);
1511
		dev_dbg(port->dev, "Cookie %d #%d has already completed\n",
1511
		dev_dbg(port->dev, "Cookie %d #%d has already completed\n",
1512
			s->active_rx, active);
1512
			s->active_rx, active);
1513
1513
Lines 1525-1531 static enum hrtimer_restart sci_dma_rx_timer_fn(struct hrtimer *t) Link Here
1525
	 */
1525
	 */
1526
	status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
1526
	status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
1527
	if (status == DMA_COMPLETE) {
1527
	if (status == DMA_COMPLETE) {
1528
		spin_unlock_irqrestore(&port->lock, flags);
1528
		uart_port_unlock_irqrestore(port, flags);
1529
		dev_dbg(port->dev, "Transaction complete after DMA engine was stopped");
1529
		dev_dbg(port->dev, "Transaction complete after DMA engine was stopped");
1530
		return HRTIMER_NORESTART;
1530
		return HRTIMER_NORESTART;
1531
	}
1531
	}
Lines 1546-1552 static enum hrtimer_restart sci_dma_rx_timer_fn(struct hrtimer *t) Link Here
1546
1546
1547
	sci_dma_rx_reenable_irq(s);
1547
	sci_dma_rx_reenable_irq(s);
1548
1548
1549
	spin_unlock_irqrestore(&port->lock, flags);
1549
	uart_port_unlock_irqrestore(port, flags);
1550
1550
1551
	return HRTIMER_NORESTART;
1551
	return HRTIMER_NORESTART;
1552
}
1552
}
Lines 1770-1778 static irqreturn_t sci_tx_interrupt(int irq, void *ptr) Link Here
1770
	struct uart_port *port = ptr;
1770
	struct uart_port *port = ptr;
1771
	unsigned long flags;
1771
	unsigned long flags;
1772
1772
1773
	spin_lock_irqsave(&port->lock, flags);
1773
	uart_port_lock_irqsave(port, &flags);
1774
	sci_transmit_chars(port);
1774
	sci_transmit_chars(port);
1775
	spin_unlock_irqrestore(&port->lock, flags);
1775
	uart_port_unlock_irqrestore(port, flags);
1776
1776
1777
	return IRQ_HANDLED;
1777
	return IRQ_HANDLED;
1778
}
1778
}
Lines 1786-1796 static irqreturn_t sci_tx_end_interrupt(int irq, void *ptr) Link Here
1786
	if (port->type != PORT_SCI)
1786
	if (port->type != PORT_SCI)
1787
		return sci_tx_interrupt(irq, ptr);
1787
		return sci_tx_interrupt(irq, ptr);
1788
1788
1789
	spin_lock_irqsave(&port->lock, flags);
1789
	uart_port_lock_irqsave(port, &flags);
1790
	ctrl = serial_port_in(port, SCSCR);
1790
	ctrl = serial_port_in(port, SCSCR);
1791
	ctrl &= ~(SCSCR_TE | SCSCR_TEIE);
1791
	ctrl &= ~(SCSCR_TE | SCSCR_TEIE);
1792
	serial_port_out(port, SCSCR, ctrl);
1792
	serial_port_out(port, SCSCR, ctrl);
1793
	spin_unlock_irqrestore(&port->lock, flags);
1793
	uart_port_unlock_irqrestore(port, flags);
1794
1794
1795
	return IRQ_HANDLED;
1795
	return IRQ_HANDLED;
1796
}
1796
}
Lines 2187-2193 static void sci_break_ctl(struct uart_port *port, int break_state) Link Here
2187
		return;
2187
		return;
2188
	}
2188
	}
2189
2189
2190
	spin_lock_irqsave(&port->lock, flags);
2190
	uart_port_lock_irqsave(port, &flags);
2191
	scsptr = serial_port_in(port, SCSPTR);
2191
	scsptr = serial_port_in(port, SCSPTR);
2192
	scscr = serial_port_in(port, SCSCR);
2192
	scscr = serial_port_in(port, SCSCR);
2193
2193
Lines 2201-2207 static void sci_break_ctl(struct uart_port *port, int break_state) Link Here
2201
2201
2202
	serial_port_out(port, SCSPTR, scsptr);
2202
	serial_port_out(port, SCSPTR, scsptr);
2203
	serial_port_out(port, SCSCR, scscr);
2203
	serial_port_out(port, SCSCR, scscr);
2204
	spin_unlock_irqrestore(&port->lock, flags);
2204
	uart_port_unlock_irqrestore(port, flags);
2205
}
2205
}
2206
2206
2207
static int sci_startup(struct uart_port *port)
2207
static int sci_startup(struct uart_port *port)
Lines 2233-2239 static void sci_shutdown(struct uart_port *port) Link Here
2233
	s->autorts = false;
2233
	s->autorts = false;
2234
	mctrl_gpio_disable_ms(to_sci_port(port)->gpios);
2234
	mctrl_gpio_disable_ms(to_sci_port(port)->gpios);
2235
2235
2236
	spin_lock_irqsave(&port->lock, flags);
2236
	uart_port_lock_irqsave(port, &flags);
2237
	sci_stop_rx(port);
2237
	sci_stop_rx(port);
2238
	sci_stop_tx(port);
2238
	sci_stop_tx(port);
2239
	/*
2239
	/*
Lines 2243-2249 static void sci_shutdown(struct uart_port *port) Link Here
2243
	scr = serial_port_in(port, SCSCR);
2243
	scr = serial_port_in(port, SCSCR);
2244
	serial_port_out(port, SCSCR, scr &
2244
	serial_port_out(port, SCSCR, scr &
2245
			(SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot));
2245
			(SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot));
2246
	spin_unlock_irqrestore(&port->lock, flags);
2246
	uart_port_unlock_irqrestore(port, flags);
2247
2247
2248
#ifdef CONFIG_SERIAL_SH_SCI_DMA
2248
#ifdef CONFIG_SERIAL_SH_SCI_DMA
2249
	if (s->chan_rx_saved) {
2249
	if (s->chan_rx_saved) {
Lines 2545-2551 static void sci_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
2545
		serial_port_out(port, SCCKS, sccks);
2545
		serial_port_out(port, SCCKS, sccks);
2546
	}
2546
	}
2547
2547
2548
	spin_lock_irqsave(&port->lock, flags);
2548
	uart_port_lock_irqsave(port, &flags);
2549
2549
2550
	sci_reset(port);
2550
	sci_reset(port);
2551
2551
Lines 2667-2673 static void sci_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
2667
	if ((termios->c_cflag & CREAD) != 0)
2667
	if ((termios->c_cflag & CREAD) != 0)
2668
		sci_start_rx(port);
2668
		sci_start_rx(port);
2669
2669
2670
	spin_unlock_irqrestore(&port->lock, flags);
2670
	uart_port_unlock_irqrestore(port, flags);
2671
2671
2672
	sci_port_disable(s);
2672
	sci_port_disable(s);
2673
2673
Lines 3052-3060 static void serial_console_write(struct console *co, const char *s, Link Here
3052
	if (port->sysrq)
3052
	if (port->sysrq)
3053
		locked = 0;
3053
		locked = 0;
3054
	else if (oops_in_progress)
3054
	else if (oops_in_progress)
3055
		locked = spin_trylock_irqsave(&port->lock, flags);
3055
		locked = uart_port_trylock_irqsave(port, &flags);
3056
	else
3056
	else
3057
		spin_lock_irqsave(&port->lock, flags);
3057
		uart_port_lock_irqsave(port, &flags);
3058
3058
3059
	/* first save SCSCR then disable interrupts, keep clock source */
3059
	/* first save SCSCR then disable interrupts, keep clock source */
3060
	ctrl = serial_port_in(port, SCSCR);
3060
	ctrl = serial_port_in(port, SCSCR);
Lines 3074-3080 static void serial_console_write(struct console *co, const char *s, Link Here
3074
	serial_port_out(port, SCSCR, ctrl);
3074
	serial_port_out(port, SCSCR, ctrl);
3075
3075
3076
	if (locked)
3076
	if (locked)
3077
		spin_unlock_irqrestore(&port->lock, flags);
3077
		uart_port_unlock_irqrestore(port, flags);
3078
}
3078
}
3079
3079
3080
static int serial_console_setup(struct console *co, char *options)
3080
static int serial_console_setup(struct console *co, char *options)
(-)a/drivers/tty/serial/sifive.c (-8 / +8 lines)
Lines 521-531 static irqreturn_t sifive_serial_irq(int irq, void *dev_id) Link Here
521
	struct sifive_serial_port *ssp = dev_id;
521
	struct sifive_serial_port *ssp = dev_id;
522
	u32 ip;
522
	u32 ip;
523
523
524
	spin_lock(&ssp->port.lock);
524
	uart_port_lock(&ssp->port);
525
525
526
	ip = __ssp_readl(ssp, SIFIVE_SERIAL_IP_OFFS);
526
	ip = __ssp_readl(ssp, SIFIVE_SERIAL_IP_OFFS);
527
	if (!ip) {
527
	if (!ip) {
528
		spin_unlock(&ssp->port.lock);
528
		uart_port_unlock(&ssp->port);
529
		return IRQ_NONE;
529
		return IRQ_NONE;
530
	}
530
	}
531
531
Lines 534-540 static irqreturn_t sifive_serial_irq(int irq, void *dev_id) Link Here
534
	if (ip & SIFIVE_SERIAL_IP_TXWM_MASK)
534
	if (ip & SIFIVE_SERIAL_IP_TXWM_MASK)
535
		__ssp_transmit_chars(ssp);
535
		__ssp_transmit_chars(ssp);
536
536
537
	spin_unlock(&ssp->port.lock);
537
	uart_port_unlock(&ssp->port);
538
538
539
	return IRQ_HANDLED;
539
	return IRQ_HANDLED;
540
}
540
}
Lines 653-659 static void sifive_serial_set_termios(struct uart_port *port, Link Here
653
				  ssp->port.uartclk / 16);
653
				  ssp->port.uartclk / 16);
654
	__ssp_update_baud_rate(ssp, rate);
654
	__ssp_update_baud_rate(ssp, rate);
655
655
656
	spin_lock_irqsave(&ssp->port.lock, flags);
656
	uart_port_lock_irqsave(&ssp->port, &flags);
657
657
658
	/* Update the per-port timeout */
658
	/* Update the per-port timeout */
659
	uart_update_timeout(port, termios->c_cflag, rate);
659
	uart_update_timeout(port, termios->c_cflag, rate);
Lines 670-676 static void sifive_serial_set_termios(struct uart_port *port, Link Here
670
	if (v != old_v)
670
	if (v != old_v)
671
		__ssp_writel(v, SIFIVE_SERIAL_RXCTRL_OFFS, ssp);
671
		__ssp_writel(v, SIFIVE_SERIAL_RXCTRL_OFFS, ssp);
672
672
673
	spin_unlock_irqrestore(&ssp->port.lock, flags);
673
	uart_port_unlock_irqrestore(&ssp->port, flags);
674
}
674
}
675
675
676
static void sifive_serial_release_port(struct uart_port *port)
676
static void sifive_serial_release_port(struct uart_port *port)
Lines 795-803 static void sifive_serial_console_write(struct console *co, const char *s, Link Here
795
	if (ssp->port.sysrq)
795
	if (ssp->port.sysrq)
796
		locked = 0;
796
		locked = 0;
797
	else if (oops_in_progress)
797
	else if (oops_in_progress)
798
		locked = spin_trylock(&ssp->port.lock);
798
		locked = uart_port_trylock(&ssp->port);
799
	else
799
	else
800
		spin_lock(&ssp->port.lock);
800
		uart_port_lock(&ssp->port);
801
801
802
	ier = __ssp_readl(ssp, SIFIVE_SERIAL_IE_OFFS);
802
	ier = __ssp_readl(ssp, SIFIVE_SERIAL_IE_OFFS);
803
	__ssp_writel(0, SIFIVE_SERIAL_IE_OFFS, ssp);
803
	__ssp_writel(0, SIFIVE_SERIAL_IE_OFFS, ssp);
Lines 807-813 static void sifive_serial_console_write(struct console *co, const char *s, Link Here
807
	__ssp_writel(ier, SIFIVE_SERIAL_IE_OFFS, ssp);
807
	__ssp_writel(ier, SIFIVE_SERIAL_IE_OFFS, ssp);
808
808
809
	if (locked)
809
	if (locked)
810
		spin_unlock(&ssp->port.lock);
810
		uart_port_unlock(&ssp->port);
811
	local_irq_restore(flags);
811
	local_irq_restore(flags);
812
}
812
}
813
813
(-)a/drivers/tty/serial/sprd_serial.c (-15 / +15 lines)
Lines 247-253 static void sprd_complete_tx_dma(void *data) Link Here
247
	struct circ_buf *xmit = &port->state->xmit;
247
	struct circ_buf *xmit = &port->state->xmit;
248
	unsigned long flags;
248
	unsigned long flags;
249
249
250
	spin_lock_irqsave(&port->lock, flags);
250
	uart_port_lock_irqsave(port, &flags);
251
	dma_unmap_single(port->dev, sp->tx_dma.phys_addr,
251
	dma_unmap_single(port->dev, sp->tx_dma.phys_addr,
252
			 sp->tx_dma.trans_len, DMA_TO_DEVICE);
252
			 sp->tx_dma.trans_len, DMA_TO_DEVICE);
253
253
Lines 260-266 static void sprd_complete_tx_dma(void *data) Link Here
260
	    sprd_tx_dma_config(port))
260
	    sprd_tx_dma_config(port))
261
		sp->tx_dma.trans_len = 0;
261
		sp->tx_dma.trans_len = 0;
262
262
263
	spin_unlock_irqrestore(&port->lock, flags);
263
	uart_port_unlock_irqrestore(port, flags);
264
}
264
}
265
265
266
static int sprd_uart_dma_submit(struct uart_port *port,
266
static int sprd_uart_dma_submit(struct uart_port *port,
Lines 429-441 static void sprd_complete_rx_dma(void *data) Link Here
429
	enum dma_status status;
429
	enum dma_status status;
430
	unsigned long flags;
430
	unsigned long flags;
431
431
432
	spin_lock_irqsave(&port->lock, flags);
432
	uart_port_lock_irqsave(port, &flags);
433
433
434
	status = dmaengine_tx_status(sp->rx_dma.chn,
434
	status = dmaengine_tx_status(sp->rx_dma.chn,
435
				     sp->rx_dma.cookie, &state);
435
				     sp->rx_dma.cookie, &state);
436
	if (status != DMA_COMPLETE) {
436
	if (status != DMA_COMPLETE) {
437
		sprd_stop_rx(port);
437
		sprd_stop_rx(port);
438
		spin_unlock_irqrestore(&port->lock, flags);
438
		uart_port_unlock_irqrestore(port, flags);
439
		return;
439
		return;
440
	}
440
	}
441
441
Lines 449-455 static void sprd_complete_rx_dma(void *data) Link Here
449
	if (sprd_start_dma_rx(port))
449
	if (sprd_start_dma_rx(port))
450
		sprd_stop_rx(port);
450
		sprd_stop_rx(port);
451
451
452
	spin_unlock_irqrestore(&port->lock, flags);
452
	uart_port_unlock_irqrestore(port, flags);
453
}
453
}
454
454
455
static int sprd_start_dma_rx(struct uart_port *port)
455
static int sprd_start_dma_rx(struct uart_port *port)
Lines 638-649 static irqreturn_t sprd_handle_irq(int irq, void *dev_id) Link Here
638
	struct uart_port *port = dev_id;
638
	struct uart_port *port = dev_id;
639
	unsigned int ims;
639
	unsigned int ims;
640
640
641
	spin_lock(&port->lock);
641
	uart_port_lock(port);
642
642
643
	ims = serial_in(port, SPRD_IMSR);
643
	ims = serial_in(port, SPRD_IMSR);
644
644
645
	if (!ims) {
645
	if (!ims) {
646
		spin_unlock(&port->lock);
646
		uart_port_unlock(port);
647
		return IRQ_NONE;
647
		return IRQ_NONE;
648
	}
648
	}
649
649
Lines 660-666 static irqreturn_t sprd_handle_irq(int irq, void *dev_id) Link Here
660
	if (ims & SPRD_IMSR_TX_FIFO_EMPTY)
660
	if (ims & SPRD_IMSR_TX_FIFO_EMPTY)
661
		sprd_tx(port);
661
		sprd_tx(port);
662
662
663
	spin_unlock(&port->lock);
663
	uart_port_unlock(port);
664
664
665
	return IRQ_HANDLED;
665
	return IRQ_HANDLED;
666
}
666
}
Lines 727-739 static int sprd_startup(struct uart_port *port) Link Here
727
	serial_out(port, SPRD_CTL1, fc);
727
	serial_out(port, SPRD_CTL1, fc);
728
728
729
	/* enable interrupt */
729
	/* enable interrupt */
730
	spin_lock_irqsave(&port->lock, flags);
730
	uart_port_lock_irqsave(port, &flags);
731
	ien = serial_in(port, SPRD_IEN);
731
	ien = serial_in(port, SPRD_IEN);
732
	ien |= SPRD_IEN_BREAK_DETECT | SPRD_IEN_TIMEOUT;
732
	ien |= SPRD_IEN_BREAK_DETECT | SPRD_IEN_TIMEOUT;
733
	if (!sp->rx_dma.enable)
733
	if (!sp->rx_dma.enable)
734
		ien |= SPRD_IEN_RX_FULL;
734
		ien |= SPRD_IEN_RX_FULL;
735
	serial_out(port, SPRD_IEN, ien);
735
	serial_out(port, SPRD_IEN, ien);
736
	spin_unlock_irqrestore(&port->lock, flags);
736
	uart_port_unlock_irqrestore(port, flags);
737
737
738
	return 0;
738
	return 0;
739
}
739
}
Lines 793-799 static void sprd_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
793
			lcr |= SPRD_LCR_EVEN_PAR;
793
			lcr |= SPRD_LCR_EVEN_PAR;
794
	}
794
	}
795
795
796
	spin_lock_irqsave(&port->lock, flags);
796
	uart_port_lock_irqsave(port, &flags);
797
797
798
	/* update the per-port timeout */
798
	/* update the per-port timeout */
799
	uart_update_timeout(port, termios->c_cflag, baud);
799
	uart_update_timeout(port, termios->c_cflag, baud);
Lines 837-843 static void sprd_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
837
	fc |= RX_TOUT_THLD_DEF | RX_HFC_THLD_DEF;
837
	fc |= RX_TOUT_THLD_DEF | RX_HFC_THLD_DEF;
838
	serial_out(port, SPRD_CTL1, fc);
838
	serial_out(port, SPRD_CTL1, fc);
839
839
840
	spin_unlock_irqrestore(&port->lock, flags);
840
	uart_port_unlock_irqrestore(port, flags);
841
841
842
	/* Don't rewrite B0 */
842
	/* Don't rewrite B0 */
843
	if (tty_termios_baud_rate(termios))
843
	if (tty_termios_baud_rate(termios))
Lines 974-982 static void sprd_console_write(struct console *co, const char *s, Link Here
974
	if (port->sysrq)
974
	if (port->sysrq)
975
		locked = 0;
975
		locked = 0;
976
	else if (oops_in_progress)
976
	else if (oops_in_progress)
977
		locked = spin_trylock_irqsave(&port->lock, flags);
977
		locked = uart_port_trylock_irqsave(port, &flags);
978
	else
978
	else
979
		spin_lock_irqsave(&port->lock, flags);
979
		uart_port_lock_irqsave(port, &flags);
980
980
981
	uart_console_write(port, s, count, sprd_console_putchar);
981
	uart_console_write(port, s, count, sprd_console_putchar);
982
982
Lines 984-990 static void sprd_console_write(struct console *co, const char *s, Link Here
984
	wait_for_xmitr(port);
984
	wait_for_xmitr(port);
985
985
986
	if (locked)
986
	if (locked)
987
		spin_unlock_irqrestore(&port->lock, flags);
987
		uart_port_unlock_irqrestore(port, flags);
988
}
988
}
989
989
990
static int sprd_console_setup(struct console *co, char *options)
990
static int sprd_console_setup(struct console *co, char *options)
(-)a/drivers/tty/serial/st-asc.c (-9 / +9 lines)
Lines 319-325 static irqreturn_t asc_interrupt(int irq, void *ptr) Link Here
319
	struct uart_port *port = ptr;
319
	struct uart_port *port = ptr;
320
	u32 status;
320
	u32 status;
321
321
322
	spin_lock(&port->lock);
322
	uart_port_lock(port);
323
323
324
	status = asc_in(port, ASC_STA);
324
	status = asc_in(port, ASC_STA);
325
325
Lines 334-340 static irqreturn_t asc_interrupt(int irq, void *ptr) Link Here
334
		asc_transmit_chars(port);
334
		asc_transmit_chars(port);
335
	}
335
	}
336
336
337
	spin_unlock(&port->lock);
337
	uart_port_unlock(port);
338
338
339
	return IRQ_HANDLED;
339
	return IRQ_HANDLED;
340
}
340
}
Lines 452-461 static void asc_pm(struct uart_port *port, unsigned int state, Link Here
452
		 * we can come to turning it off. Note this is not called with
452
		 * we can come to turning it off. Note this is not called with
453
		 * the port spinlock held.
453
		 * the port spinlock held.
454
		 */
454
		 */
455
		spin_lock_irqsave(&port->lock, flags);
455
		uart_port_lock_irqsave(port, &flags);
456
		ctl = asc_in(port, ASC_CTL) & ~ASC_CTL_RUN;
456
		ctl = asc_in(port, ASC_CTL) & ~ASC_CTL_RUN;
457
		asc_out(port, ASC_CTL, ctl);
457
		asc_out(port, ASC_CTL, ctl);
458
		spin_unlock_irqrestore(&port->lock, flags);
458
		uart_port_unlock_irqrestore(port, flags);
459
		clk_disable_unprepare(ascport->clk);
459
		clk_disable_unprepare(ascport->clk);
460
		break;
460
		break;
461
	}
461
	}
Lines 480-486 static void asc_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
480
	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
480
	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
481
	cflag = termios->c_cflag;
481
	cflag = termios->c_cflag;
482
482
483
	spin_lock_irqsave(&port->lock, flags);
483
	uart_port_lock_irqsave(port, &flags);
484
484
485
	/* read control register */
485
	/* read control register */
486
	ctrl_val = asc_in(port, ASC_CTL);
486
	ctrl_val = asc_in(port, ASC_CTL);
Lines 594-600 static void asc_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
594
	/* write final value and enable port */
594
	/* write final value and enable port */
595
	asc_out(port, ASC_CTL, (ctrl_val | ASC_CTL_RUN));
595
	asc_out(port, ASC_CTL, (ctrl_val | ASC_CTL_RUN));
596
596
597
	spin_unlock_irqrestore(&port->lock, flags);
597
	uart_port_unlock_irqrestore(port, flags);
598
}
598
}
599
599
600
static const char *asc_type(struct uart_port *port)
600
static const char *asc_type(struct uart_port *port)
Lines 849-857 static void asc_console_write(struct console *co, const char *s, unsigned count) Link Here
849
	if (port->sysrq)
849
	if (port->sysrq)
850
		locked = 0; /* asc_interrupt has already claimed the lock */
850
		locked = 0; /* asc_interrupt has already claimed the lock */
851
	else if (oops_in_progress)
851
	else if (oops_in_progress)
852
		locked = spin_trylock_irqsave(&port->lock, flags);
852
		locked = uart_port_trylock_irqsave(port, &flags);
853
	else
853
	else
854
		spin_lock_irqsave(&port->lock, flags);
854
		uart_port_lock_irqsave(port, &flags);
855
855
856
	/*
856
	/*
857
	 * Disable interrupts so we don't get the IRQ line bouncing
857
	 * Disable interrupts so we don't get the IRQ line bouncing
Lines 869-875 static void asc_console_write(struct console *co, const char *s, unsigned count) Link Here
869
	asc_out(port, ASC_INTEN, intenable);
869
	asc_out(port, ASC_INTEN, intenable);
870
870
871
	if (locked)
871
	if (locked)
872
		spin_unlock_irqrestore(&port->lock, flags);
872
		uart_port_unlock_irqrestore(port, flags);
873
}
873
}
874
874
875
static int asc_console_setup(struct console *co, char *options)
875
static int asc_console_setup(struct console *co, char *options)
(-)a/drivers/tty/serial/stm32-usart.c (-19 / +19 lines)
Lines 537-543 static void stm32_usart_rx_dma_complete(void *arg) Link Here
537
	unsigned int size;
537
	unsigned int size;
538
	unsigned long flags;
538
	unsigned long flags;
539
539
540
	spin_lock_irqsave(&port->lock, flags);
540
	uart_port_lock_irqsave(port, &flags);
541
	size = stm32_usart_receive_chars(port, false);
541
	size = stm32_usart_receive_chars(port, false);
542
	uart_unlock_and_check_sysrq_irqrestore(port, flags);
542
	uart_unlock_and_check_sysrq_irqrestore(port, flags);
543
	if (size)
543
	if (size)
Lines 643-651 static void stm32_usart_tx_dma_complete(void *arg) Link Here
643
	stm32_usart_tx_dma_terminate(stm32port);
643
	stm32_usart_tx_dma_terminate(stm32port);
644
644
645
	/* Let's see if we have pending data to send */
645
	/* Let's see if we have pending data to send */
646
	spin_lock_irqsave(&port->lock, flags);
646
	uart_port_lock_irqsave(port, &flags);
647
	stm32_usart_transmit_chars(port);
647
	stm32_usart_transmit_chars(port);
648
	spin_unlock_irqrestore(&port->lock, flags);
648
	uart_port_unlock_irqrestore(port, flags);
649
}
649
}
650
650
651
static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
651
static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
Lines 889-895 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr) Link Here
889
	if (!stm32_port->throttled) {
889
	if (!stm32_port->throttled) {
890
		if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) ||
890
		if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) ||
891
		    ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) {
891
		    ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) {
892
			spin_lock(&port->lock);
892
			uart_port_lock(port);
893
			size = stm32_usart_receive_chars(port, false);
893
			size = stm32_usart_receive_chars(port, false);
894
			uart_unlock_and_check_sysrq(port);
894
			uart_unlock_and_check_sysrq(port);
895
			if (size)
895
			if (size)
Lines 898-911 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr) Link Here
898
	}
898
	}
899
899
900
	if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
900
	if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
901
		spin_lock(&port->lock);
901
		uart_port_lock(port);
902
		stm32_usart_transmit_chars(port);
902
		stm32_usart_transmit_chars(port);
903
		spin_unlock(&port->lock);
903
		uart_port_unlock(port);
904
	}
904
	}
905
905
906
	/* Receiver timeout irq for DMA RX */
906
	/* Receiver timeout irq for DMA RX */
907
	if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) {
907
	if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) {
908
		spin_lock(&port->lock);
908
		uart_port_lock(port);
909
		size = stm32_usart_receive_chars(port, false);
909
		size = stm32_usart_receive_chars(port, false);
910
		uart_unlock_and_check_sysrq(port);
910
		uart_unlock_and_check_sysrq(port);
911
		if (size)
911
		if (size)
Lines 993-999 static void stm32_usart_throttle(struct uart_port *port) Link Here
993
	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
993
	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
994
	unsigned long flags;
994
	unsigned long flags;
995
995
996
	spin_lock_irqsave(&port->lock, flags);
996
	uart_port_lock_irqsave(port, &flags);
997
997
998
	/*
998
	/*
999
	 * Pause DMA transfer, so the RX data gets queued into the FIFO.
999
	 * Pause DMA transfer, so the RX data gets queued into the FIFO.
Lines 1006-1012 static void stm32_usart_throttle(struct uart_port *port) Link Here
1006
		stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
1006
		stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
1007
1007
1008
	stm32_port->throttled = true;
1008
	stm32_port->throttled = true;
1009
	spin_unlock_irqrestore(&port->lock, flags);
1009
	uart_port_unlock_irqrestore(port, flags);
1010
}
1010
}
1011
1011
1012
/* Unthrottle the remote, the input buffer can now accept data. */
1012
/* Unthrottle the remote, the input buffer can now accept data. */
Lines 1016-1022 static void stm32_usart_unthrottle(struct uart_port *port) Link Here
1016
	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1016
	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1017
	unsigned long flags;
1017
	unsigned long flags;
1018
1018
1019
	spin_lock_irqsave(&port->lock, flags);
1019
	uart_port_lock_irqsave(port, &flags);
1020
	stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
1020
	stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
1021
	if (stm32_port->cr3_irq)
1021
	if (stm32_port->cr3_irq)
1022
		stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
1022
		stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
Lines 1030-1036 static void stm32_usart_unthrottle(struct uart_port *port) Link Here
1030
	if (stm32_port->rx_ch)
1030
	if (stm32_port->rx_ch)
1031
		stm32_usart_rx_dma_start_or_resume(port);
1031
		stm32_usart_rx_dma_start_or_resume(port);
1032
1032
1033
	spin_unlock_irqrestore(&port->lock, flags);
1033
	uart_port_unlock_irqrestore(port, flags);
1034
}
1034
}
1035
1035
1036
/* Receive stop */
1036
/* Receive stop */
Lines 1158-1164 static void stm32_usart_set_termios(struct uart_port *port, Link Here
1158
1158
1159
	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
1159
	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
1160
1160
1161
	spin_lock_irqsave(&port->lock, flags);
1161
	uart_port_lock_irqsave(port, &flags);
1162
1162
1163
	ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
1163
	ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
1164
						isr,
1164
						isr,
Lines 1349-1355 static void stm32_usart_set_termios(struct uart_port *port, Link Here
1349
	writel_relaxed(cr1, port->membase + ofs->cr1);
1349
	writel_relaxed(cr1, port->membase + ofs->cr1);
1350
1350
1351
	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1351
	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1352
	spin_unlock_irqrestore(&port->lock, flags);
1352
	uart_port_unlock_irqrestore(port, flags);
1353
1353
1354
	/* Handle modem control interrupts */
1354
	/* Handle modem control interrupts */
1355
	if (UART_ENABLE_MS(port, termios->c_cflag))
1355
	if (UART_ENABLE_MS(port, termios->c_cflag))
Lines 1399-1407 static void stm32_usart_pm(struct uart_port *port, unsigned int state, Link Here
1399
		pm_runtime_get_sync(port->dev);
1399
		pm_runtime_get_sync(port->dev);
1400
		break;
1400
		break;
1401
	case UART_PM_STATE_OFF:
1401
	case UART_PM_STATE_OFF:
1402
		spin_lock_irqsave(&port->lock, flags);
1402
		uart_port_lock_irqsave(port, &flags);
1403
		stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1403
		stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1404
		spin_unlock_irqrestore(&port->lock, flags);
1404
		uart_port_unlock_irqrestore(port, flags);
1405
		pm_runtime_put_sync(port->dev);
1405
		pm_runtime_put_sync(port->dev);
1406
		break;
1406
		break;
1407
	}
1407
	}
Lines 1884-1892 static void stm32_usart_console_write(struct console *co, const char *s, Link Here
1884
	int locked = 1;
1884
	int locked = 1;
1885
1885
1886
	if (oops_in_progress)
1886
	if (oops_in_progress)
1887
		locked = spin_trylock_irqsave(&port->lock, flags);
1887
		locked = uart_port_trylock_irqsave(port, &flags);
1888
	else
1888
	else
1889
		spin_lock_irqsave(&port->lock, flags);
1889
		uart_port_lock_irqsave(port, &flags);
1890
1890
1891
	/* Save and disable interrupts, enable the transmitter */
1891
	/* Save and disable interrupts, enable the transmitter */
1892
	old_cr1 = readl_relaxed(port->membase + ofs->cr1);
1892
	old_cr1 = readl_relaxed(port->membase + ofs->cr1);
Lines 1900-1906 static void stm32_usart_console_write(struct console *co, const char *s, Link Here
1900
	writel_relaxed(old_cr1, port->membase + ofs->cr1);
1900
	writel_relaxed(old_cr1, port->membase + ofs->cr1);
1901
1901
1902
	if (locked)
1902
	if (locked)
1903
		spin_unlock_irqrestore(&port->lock, flags);
1903
		uart_port_unlock_irqrestore(port, flags);
1904
}
1904
}
1905
1905
1906
static int stm32_usart_console_setup(struct console *co, char *options)
1906
static int stm32_usart_console_setup(struct console *co, char *options)
Lines 2035-2041 static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port, Link Here
2035
		 * low-power mode.
2035
		 * low-power mode.
2036
		 */
2036
		 */
2037
		if (stm32_port->rx_ch) {
2037
		if (stm32_port->rx_ch) {
2038
			spin_lock_irqsave(&port->lock, flags);
2038
			uart_port_lock_irqsave(port, &flags);
2039
			/* Poll data from DMA RX buffer if any */
2039
			/* Poll data from DMA RX buffer if any */
2040
			if (!stm32_usart_rx_dma_pause(stm32_port))
2040
			if (!stm32_usart_rx_dma_pause(stm32_port))
2041
				size += stm32_usart_receive_chars(port, true);
2041
				size += stm32_usart_receive_chars(port, true);
(-)a/drivers/tty/serial/sunhv.c (-14 / +14 lines)
Lines 217-226 static irqreturn_t sunhv_interrupt(int irq, void *dev_id) Link Here
217
	struct tty_port *tport;
217
	struct tty_port *tport;
218
	unsigned long flags;
218
	unsigned long flags;
219
219
220
	spin_lock_irqsave(&port->lock, flags);
220
	uart_port_lock_irqsave(port, &flags);
221
	tport = receive_chars(port);
221
	tport = receive_chars(port);
222
	transmit_chars(port);
222
	transmit_chars(port);
223
	spin_unlock_irqrestore(&port->lock, flags);
223
	uart_port_unlock_irqrestore(port, flags);
224
224
225
	if (tport)
225
	if (tport)
226
		tty_flip_buffer_push(tport);
226
		tty_flip_buffer_push(tport);
Lines 271-277 static void sunhv_send_xchar(struct uart_port *port, char ch) Link Here
271
	if (ch == __DISABLED_CHAR)
271
	if (ch == __DISABLED_CHAR)
272
		return;
272
		return;
273
273
274
	spin_lock_irqsave(&port->lock, flags);
274
	uart_port_lock_irqsave(port, &flags);
275
275
276
	while (limit-- > 0) {
276
	while (limit-- > 0) {
277
		long status = sun4v_con_putchar(ch);
277
		long status = sun4v_con_putchar(ch);
Lines 280-286 static void sunhv_send_xchar(struct uart_port *port, char ch) Link Here
280
		udelay(1);
280
		udelay(1);
281
	}
281
	}
282
282
283
	spin_unlock_irqrestore(&port->lock, flags);
283
	uart_port_unlock_irqrestore(port, flags);
284
}
284
}
285
285
286
/* port->lock held by caller.  */
286
/* port->lock held by caller.  */
Lines 295-301 static void sunhv_break_ctl(struct uart_port *port, int break_state) Link Here
295
		unsigned long flags;
295
		unsigned long flags;
296
		int limit = 10000;
296
		int limit = 10000;
297
297
298
		spin_lock_irqsave(&port->lock, flags);
298
		uart_port_lock_irqsave(port, &flags);
299
299
300
		while (limit-- > 0) {
300
		while (limit-- > 0) {
301
			long status = sun4v_con_putchar(CON_BREAK);
301
			long status = sun4v_con_putchar(CON_BREAK);
Lines 304-310 static void sunhv_break_ctl(struct uart_port *port, int break_state) Link Here
304
			udelay(1);
304
			udelay(1);
305
		}
305
		}
306
306
307
		spin_unlock_irqrestore(&port->lock, flags);
307
		uart_port_unlock_irqrestore(port, flags);
308
	}
308
	}
309
}
309
}
310
310
Lines 328-334 static void sunhv_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
328
	unsigned int iflag, cflag;
328
	unsigned int iflag, cflag;
329
	unsigned long flags;
329
	unsigned long flags;
330
330
331
	spin_lock_irqsave(&port->lock, flags);
331
	uart_port_lock_irqsave(port, &flags);
332
332
333
	iflag = termios->c_iflag;
333
	iflag = termios->c_iflag;
334
	cflag = termios->c_cflag;
334
	cflag = termios->c_cflag;
Lines 343-349 static void sunhv_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
343
	uart_update_timeout(port, cflag,
343
	uart_update_timeout(port, cflag,
344
			    (port->uartclk / (16 * quot)));
344
			    (port->uartclk / (16 * quot)));
345
345
346
	spin_unlock_irqrestore(&port->lock, flags);
346
	uart_port_unlock_irqrestore(port, flags);
347
}
347
}
348
348
349
static const char *sunhv_type(struct uart_port *port)
349
static const char *sunhv_type(struct uart_port *port)
Lines 437-445 static void sunhv_console_write_paged(struct console *con, const char *s, unsign Link Here
437
	int locked = 1;
437
	int locked = 1;
438
438
439
	if (port->sysrq || oops_in_progress)
439
	if (port->sysrq || oops_in_progress)
440
		locked = spin_trylock_irqsave(&port->lock, flags);
440
		locked = uart_port_trylock_irqsave(port, &flags);
441
	else
441
	else
442
		spin_lock_irqsave(&port->lock, flags);
442
		uart_port_lock_irqsave(port, &flags);
443
443
444
	while (n > 0) {
444
	while (n > 0) {
445
		unsigned long ra = __pa(con_write_page);
445
		unsigned long ra = __pa(con_write_page);
Lines 470-476 static void sunhv_console_write_paged(struct console *con, const char *s, unsign Link Here
470
	}
470
	}
471
471
472
	if (locked)
472
	if (locked)
473
		spin_unlock_irqrestore(&port->lock, flags);
473
		uart_port_unlock_irqrestore(port, flags);
474
}
474
}
475
475
476
static inline void sunhv_console_putchar(struct uart_port *port, char c)
476
static inline void sunhv_console_putchar(struct uart_port *port, char c)
Lines 492-500 static void sunhv_console_write_bychar(struct console *con, const char *s, unsig Link Here
492
	int i, locked = 1;
492
	int i, locked = 1;
493
493
494
	if (port->sysrq || oops_in_progress)
494
	if (port->sysrq || oops_in_progress)
495
		locked = spin_trylock_irqsave(&port->lock, flags);
495
		locked = uart_port_trylock_irqsave(port, &flags);
496
	else
496
	else
497
		spin_lock_irqsave(&port->lock, flags);
497
		uart_port_lock_irqsave(port, &flags);
498
498
499
	for (i = 0; i < n; i++) {
499
	for (i = 0; i < n; i++) {
500
		if (*s == '\n')
500
		if (*s == '\n')
Lines 503-509 static void sunhv_console_write_bychar(struct console *con, const char *s, unsig Link Here
503
	}
503
	}
504
504
505
	if (locked)
505
	if (locked)
506
		spin_unlock_irqrestore(&port->lock, flags);
506
		uart_port_unlock_irqrestore(port, flags);
507
}
507
}
508
508
509
static struct console sunhv_console = {
509
static struct console sunhv_console = {
(-)a/drivers/tty/serial/sunplus-uart.c (-13 / +13 lines)
Lines 184-190 static void sunplus_break_ctl(struct uart_port *port, int ctl) Link Here
184
	unsigned long flags;
184
	unsigned long flags;
185
	unsigned int lcr;
185
	unsigned int lcr;
186
186
187
	spin_lock_irqsave(&port->lock, flags);
187
	uart_port_lock_irqsave(port, &flags);
188
188
189
	lcr = readl(port->membase + SUP_UART_LCR);
189
	lcr = readl(port->membase + SUP_UART_LCR);
190
190
Lines 195-201 static void sunplus_break_ctl(struct uart_port *port, int ctl) Link Here
195
195
196
	writel(lcr, port->membase + SUP_UART_LCR);
196
	writel(lcr, port->membase + SUP_UART_LCR);
197
197
198
	spin_unlock_irqrestore(&port->lock, flags);
198
	uart_port_unlock_irqrestore(port, flags);
199
}
199
}
200
200
201
static void transmit_chars(struct uart_port *port)
201
static void transmit_chars(struct uart_port *port)
Lines 277-283 static irqreturn_t sunplus_uart_irq(int irq, void *args) Link Here
277
	struct uart_port *port = args;
277
	struct uart_port *port = args;
278
	unsigned int isc;
278
	unsigned int isc;
279
279
280
	spin_lock(&port->lock);
280
	uart_port_lock(port);
281
281
282
	isc = readl(port->membase + SUP_UART_ISC);
282
	isc = readl(port->membase + SUP_UART_ISC);
283
283
Lines 287-293 static irqreturn_t sunplus_uart_irq(int irq, void *args) Link Here
287
	if (isc & SUP_UART_ISC_TX)
287
	if (isc & SUP_UART_ISC_TX)
288
		transmit_chars(port);
288
		transmit_chars(port);
289
289
290
	spin_unlock(&port->lock);
290
	uart_port_unlock(port);
291
291
292
	return IRQ_HANDLED;
292
	return IRQ_HANDLED;
293
}
293
}
Lines 302-315 static int sunplus_startup(struct uart_port *port) Link Here
302
	if (ret)
302
	if (ret)
303
		return ret;
303
		return ret;
304
304
305
	spin_lock_irqsave(&port->lock, flags);
305
	uart_port_lock_irqsave(port, &flags);
306
	/* isc define Bit[7:4] int setting, Bit[3:0] int status
306
	/* isc define Bit[7:4] int setting, Bit[3:0] int status
307
	 * isc register will clean Bit[3:0] int status after read
307
	 * isc register will clean Bit[3:0] int status after read
308
	 * only do a write to Bit[7:4] int setting
308
	 * only do a write to Bit[7:4] int setting
309
	 */
309
	 */
310
	isc |= SUP_UART_ISC_RXM;
310
	isc |= SUP_UART_ISC_RXM;
311
	writel(isc, port->membase + SUP_UART_ISC);
311
	writel(isc, port->membase + SUP_UART_ISC);
312
	spin_unlock_irqrestore(&port->lock, flags);
312
	uart_port_unlock_irqrestore(port, flags);
313
313
314
	return 0;
314
	return 0;
315
}
315
}
Lines 318-330 static void sunplus_shutdown(struct uart_port *port) Link Here
318
{
318
{
319
	unsigned long flags;
319
	unsigned long flags;
320
320
321
	spin_lock_irqsave(&port->lock, flags);
321
	uart_port_lock_irqsave(port, &flags);
322
	/* isc define Bit[7:4] int setting, Bit[3:0] int status
322
	/* isc define Bit[7:4] int setting, Bit[3:0] int status
323
	 * isc register will clean Bit[3:0] int status after read
323
	 * isc register will clean Bit[3:0] int status after read
324
	 * only do a write to Bit[7:4] int setting
324
	 * only do a write to Bit[7:4] int setting
325
	 */
325
	 */
326
	writel(0, port->membase + SUP_UART_ISC); /* disable all interrupt */
326
	writel(0, port->membase + SUP_UART_ISC); /* disable all interrupt */
327
	spin_unlock_irqrestore(&port->lock, flags);
327
	uart_port_unlock_irqrestore(port, flags);
328
328
329
	free_irq(port->irq, port);
329
	free_irq(port->irq, port);
330
}
330
}
Lines 372-378 static void sunplus_set_termios(struct uart_port *port, Link Here
372
			lcr |= UART_LCR_EPAR;
372
			lcr |= UART_LCR_EPAR;
373
	}
373
	}
374
374
375
	spin_lock_irqsave(&port->lock, flags);
375
	uart_port_lock_irqsave(port, &flags);
376
376
377
	uart_update_timeout(port, termios->c_cflag, baud);
377
	uart_update_timeout(port, termios->c_cflag, baud);
378
378
Lines 407-413 static void sunplus_set_termios(struct uart_port *port, Link Here
407
	writel(div_l, port->membase + SUP_UART_DIV_L);
407
	writel(div_l, port->membase + SUP_UART_DIV_L);
408
	writel(lcr, port->membase + SUP_UART_LCR);
408
	writel(lcr, port->membase + SUP_UART_LCR);
409
409
410
	spin_unlock_irqrestore(&port->lock, flags);
410
	uart_port_unlock_irqrestore(port, flags);
411
}
411
}
412
412
413
static void sunplus_set_ldisc(struct uart_port *port, struct ktermios *termios)
413
static void sunplus_set_ldisc(struct uart_port *port, struct ktermios *termios)
Lines 517-531 static void sunplus_console_write(struct console *co, Link Here
517
	if (sunplus_console_ports[co->index]->port.sysrq)
517
	if (sunplus_console_ports[co->index]->port.sysrq)
518
		locked = 0;
518
		locked = 0;
519
	else if (oops_in_progress)
519
	else if (oops_in_progress)
520
		locked = spin_trylock(&sunplus_console_ports[co->index]->port.lock);
520
		locked = uart_port_trylock(&sunplus_console_ports[co->index]->port);
521
	else
521
	else
522
		spin_lock(&sunplus_console_ports[co->index]->port.lock);
522
		uart_port_lock(&sunplus_console_ports[co->index]->port);
523
523
524
	uart_console_write(&sunplus_console_ports[co->index]->port, s, count,
524
	uart_console_write(&sunplus_console_ports[co->index]->port, s, count,
525
			   sunplus_uart_console_putchar);
525
			   sunplus_uart_console_putchar);
526
526
527
	if (locked)
527
	if (locked)
528
		spin_unlock(&sunplus_console_ports[co->index]->port.lock);
528
		uart_port_unlock(&sunplus_console_ports[co->index]->port);
529
529
530
	local_irq_restore(flags);
530
	local_irq_restore(flags);
531
}
531
}
(-)a/drivers/tty/serial/sunsab.c (-17 / +17 lines)
Lines 310-316 static irqreturn_t sunsab_interrupt(int irq, void *dev_id) Link Here
310
	unsigned long flags;
310
	unsigned long flags;
311
	unsigned char gis;
311
	unsigned char gis;
312
312
313
	spin_lock_irqsave(&up->port.lock, flags);
313
	uart_port_lock_irqsave(&up->port, &flags);
314
314
315
	status.stat = 0;
315
	status.stat = 0;
316
	gis = readb(&up->regs->r.gis) >> up->gis_shift;
316
	gis = readb(&up->regs->r.gis) >> up->gis_shift;
Lines 331-337 static irqreturn_t sunsab_interrupt(int irq, void *dev_id) Link Here
331
			transmit_chars(up, &status);
331
			transmit_chars(up, &status);
332
	}
332
	}
333
333
334
	spin_unlock_irqrestore(&up->port.lock, flags);
334
	uart_port_unlock_irqrestore(&up->port, flags);
335
335
336
	if (port)
336
	if (port)
337
		tty_flip_buffer_push(port);
337
		tty_flip_buffer_push(port);
Lines 473-484 static void sunsab_send_xchar(struct uart_port *port, char ch) Link Here
473
	if (ch == __DISABLED_CHAR)
473
	if (ch == __DISABLED_CHAR)
474
		return;
474
		return;
475
475
476
	spin_lock_irqsave(&up->port.lock, flags);
476
	uart_port_lock_irqsave(&up->port, &flags);
477
477
478
	sunsab_tec_wait(up);
478
	sunsab_tec_wait(up);
479
	writeb(ch, &up->regs->w.tic);
479
	writeb(ch, &up->regs->w.tic);
480
480
481
	spin_unlock_irqrestore(&up->port.lock, flags);
481
	uart_port_unlock_irqrestore(&up->port, flags);
482
}
482
}
483
483
484
/* port->lock held by caller.  */
484
/* port->lock held by caller.  */
Lines 499-505 static void sunsab_break_ctl(struct uart_port *port, int break_state) Link Here
499
	unsigned long flags;
499
	unsigned long flags;
500
	unsigned char val;
500
	unsigned char val;
501
501
502
	spin_lock_irqsave(&up->port.lock, flags);
502
	uart_port_lock_irqsave(&up->port, &flags);
503
503
504
	val = up->cached_dafo;
504
	val = up->cached_dafo;
505
	if (break_state)
505
	if (break_state)
Lines 512-518 static void sunsab_break_ctl(struct uart_port *port, int break_state) Link Here
512
	if (test_bit(SAB82532_XPR, &up->irqflags))
512
	if (test_bit(SAB82532_XPR, &up->irqflags))
513
		sunsab_tx_idle(up);
513
		sunsab_tx_idle(up);
514
514
515
	spin_unlock_irqrestore(&up->port.lock, flags);
515
	uart_port_unlock_irqrestore(&up->port, flags);
516
}
516
}
517
517
518
/* port->lock is not held.  */
518
/* port->lock is not held.  */
Lines 527-533 static int sunsab_startup(struct uart_port *port) Link Here
527
	if (err)
527
	if (err)
528
		return err;
528
		return err;
529
529
530
	spin_lock_irqsave(&up->port.lock, flags);
530
	uart_port_lock_irqsave(&up->port, &flags);
531
531
532
	/*
532
	/*
533
	 * Wait for any commands or immediate characters
533
	 * Wait for any commands or immediate characters
Lines 582-588 static int sunsab_startup(struct uart_port *port) Link Here
582
	set_bit(SAB82532_ALLS, &up->irqflags);
582
	set_bit(SAB82532_ALLS, &up->irqflags);
583
	set_bit(SAB82532_XPR, &up->irqflags);
583
	set_bit(SAB82532_XPR, &up->irqflags);
584
584
585
	spin_unlock_irqrestore(&up->port.lock, flags);
585
	uart_port_unlock_irqrestore(&up->port, flags);
586
586
587
	return 0;
587
	return 0;
588
}
588
}
Lines 594-600 static void sunsab_shutdown(struct uart_port *port) Link Here
594
		container_of(port, struct uart_sunsab_port, port);
594
		container_of(port, struct uart_sunsab_port, port);
595
	unsigned long flags;
595
	unsigned long flags;
596
596
597
	spin_lock_irqsave(&up->port.lock, flags);
597
	uart_port_lock_irqsave(&up->port, &flags);
598
598
599
	/* Disable Interrupts */
599
	/* Disable Interrupts */
600
	up->interrupt_mask0 = 0xff;
600
	up->interrupt_mask0 = 0xff;
Lines 628-634 static void sunsab_shutdown(struct uart_port *port) Link Here
628
	writeb(tmp, &up->regs->rw.ccr0);
628
	writeb(tmp, &up->regs->rw.ccr0);
629
#endif
629
#endif
630
630
631
	spin_unlock_irqrestore(&up->port.lock, flags);
631
	uart_port_unlock_irqrestore(&up->port, flags);
632
	free_irq(up->port.irq, up);
632
	free_irq(up->port.irq, up);
633
}
633
}
634
634
Lines 779-787 static void sunsab_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
779
	unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
779
	unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
780
	unsigned int quot = uart_get_divisor(port, baud);
780
	unsigned int quot = uart_get_divisor(port, baud);
781
781
782
	spin_lock_irqsave(&up->port.lock, flags);
782
	uart_port_lock_irqsave(&up->port, &flags);
783
	sunsab_convert_to_sab(up, termios->c_cflag, termios->c_iflag, baud, quot);
783
	sunsab_convert_to_sab(up, termios->c_cflag, termios->c_iflag, baud, quot);
784
	spin_unlock_irqrestore(&up->port.lock, flags);
784
	uart_port_unlock_irqrestore(&up->port, flags);
785
}
785
}
786
786
787
static const char *sunsab_type(struct uart_port *port)
787
static const char *sunsab_type(struct uart_port *port)
Lines 857-871 static void sunsab_console_write(struct console *con, const char *s, unsigned n) Link Here
857
	int locked = 1;
857
	int locked = 1;
858
858
859
	if (up->port.sysrq || oops_in_progress)
859
	if (up->port.sysrq || oops_in_progress)
860
		locked = spin_trylock_irqsave(&up->port.lock, flags);
860
		locked = uart_port_trylock_irqsave(&up->port, &flags);
861
	else
861
	else
862
		spin_lock_irqsave(&up->port.lock, flags);
862
		uart_port_lock_irqsave(&up->port, &flags);
863
863
864
	uart_console_write(&up->port, s, n, sunsab_console_putchar);
864
	uart_console_write(&up->port, s, n, sunsab_console_putchar);
865
	sunsab_tec_wait(up);
865
	sunsab_tec_wait(up);
866
866
867
	if (locked)
867
	if (locked)
868
		spin_unlock_irqrestore(&up->port.lock, flags);
868
		uart_port_unlock_irqrestore(&up->port, flags);
869
}
869
}
870
870
871
static int sunsab_console_setup(struct console *con, char *options)
871
static int sunsab_console_setup(struct console *con, char *options)
Lines 914-920 static int sunsab_console_setup(struct console *con, char *options) Link Here
914
	 */
914
	 */
915
	sunsab_startup(&up->port);
915
	sunsab_startup(&up->port);
916
916
917
	spin_lock_irqsave(&up->port.lock, flags);
917
	uart_port_lock_irqsave(&up->port, &flags);
918
918
919
	/*
919
	/*
920
	 * Finally, enable interrupts
920
	 * Finally, enable interrupts
Lines 932-938 static int sunsab_console_setup(struct console *con, char *options) Link Here
932
	sunsab_convert_to_sab(up, con->cflag, 0, baud, quot);
932
	sunsab_convert_to_sab(up, con->cflag, 0, baud, quot);
933
	sunsab_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
933
	sunsab_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
934
934
935
	spin_unlock_irqrestore(&up->port.lock, flags);
935
	uart_port_unlock_irqrestore(&up->port, flags);
936
	
936
	
937
	return 0;
937
	return 0;
938
}
938
}
(-)a/drivers/tty/serial/sunsu.c (-23 / +23 lines)
Lines 212-220 static void enable_rsa(struct uart_sunsu_port *up) Link Here
212
{
212
{
213
	if (up->port.type == PORT_RSA) {
213
	if (up->port.type == PORT_RSA) {
214
		if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
214
		if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
215
			spin_lock_irq(&up->port.lock);
215
			uart_port_lock_irq(&up->port);
216
			__enable_rsa(up);
216
			__enable_rsa(up);
217
			spin_unlock_irq(&up->port.lock);
217
			uart_port_unlock_irq(&up->port);
218
		}
218
		}
219
		if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
219
		if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
220
			serial_outp(up, UART_RSA_FRR, 0);
220
			serial_outp(up, UART_RSA_FRR, 0);
Lines 234-240 static void disable_rsa(struct uart_sunsu_port *up) Link Here
234
234
235
	if (up->port.type == PORT_RSA &&
235
	if (up->port.type == PORT_RSA &&
236
	    up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
236
	    up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
237
		spin_lock_irq(&up->port.lock);
237
		uart_port_lock_irq(&up->port);
238
238
239
		mode = serial_inp(up, UART_RSA_MSR);
239
		mode = serial_inp(up, UART_RSA_MSR);
240
		result = !(mode & UART_RSA_MSR_FIFO);
240
		result = !(mode & UART_RSA_MSR_FIFO);
Lines 247-253 static void disable_rsa(struct uart_sunsu_port *up) Link Here
247
247
248
		if (result)
248
		if (result)
249
			up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
249
			up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
250
		spin_unlock_irq(&up->port.lock);
250
		uart_port_unlock_irq(&up->port);
251
	}
251
	}
252
}
252
}
253
#endif /* CONFIG_SERIAL_8250_RSA */
253
#endif /* CONFIG_SERIAL_8250_RSA */
Lines 311-320 static void sunsu_enable_ms(struct uart_port *port) Link Here
311
		container_of(port, struct uart_sunsu_port, port);
311
		container_of(port, struct uart_sunsu_port, port);
312
	unsigned long flags;
312
	unsigned long flags;
313
313
314
	spin_lock_irqsave(&up->port.lock, flags);
314
	uart_port_lock_irqsave(&up->port, &flags);
315
	up->ier |= UART_IER_MSI;
315
	up->ier |= UART_IER_MSI;
316
	serial_out(up, UART_IER, up->ier);
316
	serial_out(up, UART_IER, up->ier);
317
	spin_unlock_irqrestore(&up->port.lock, flags);
317
	uart_port_unlock_irqrestore(&up->port, flags);
318
}
318
}
319
319
320
static void
320
static void
Lines 456-462 static irqreturn_t sunsu_serial_interrupt(int irq, void *dev_id) Link Here
456
	unsigned long flags;
456
	unsigned long flags;
457
	unsigned char status;
457
	unsigned char status;
458
458
459
	spin_lock_irqsave(&up->port.lock, flags);
459
	uart_port_lock_irqsave(&up->port, &flags);
460
460
461
	do {
461
	do {
462
		status = serial_inp(up, UART_LSR);
462
		status = serial_inp(up, UART_LSR);
Lines 470-476 static irqreturn_t sunsu_serial_interrupt(int irq, void *dev_id) Link Here
470
470
471
	} while (!(serial_in(up, UART_IIR) & UART_IIR_NO_INT));
471
	} while (!(serial_in(up, UART_IIR) & UART_IIR_NO_INT));
472
472
473
	spin_unlock_irqrestore(&up->port.lock, flags);
473
	uart_port_unlock_irqrestore(&up->port, flags);
474
474
475
	return IRQ_HANDLED;
475
	return IRQ_HANDLED;
476
}
476
}
Lines 545-553 static unsigned int sunsu_tx_empty(struct uart_port *port) Link Here
545
	unsigned long flags;
545
	unsigned long flags;
546
	unsigned int ret;
546
	unsigned int ret;
547
547
548
	spin_lock_irqsave(&up->port.lock, flags);
548
	uart_port_lock_irqsave(&up->port, &flags);
549
	ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
549
	ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
550
	spin_unlock_irqrestore(&up->port.lock, flags);
550
	uart_port_unlock_irqrestore(&up->port, flags);
551
551
552
	return ret;
552
	return ret;
553
}
553
}
Lines 599-611 static void sunsu_break_ctl(struct uart_port *port, int break_state) Link Here
599
		container_of(port, struct uart_sunsu_port, port);
599
		container_of(port, struct uart_sunsu_port, port);
600
	unsigned long flags;
600
	unsigned long flags;
601
601
602
	spin_lock_irqsave(&up->port.lock, flags);
602
	uart_port_lock_irqsave(&up->port, &flags);
603
	if (break_state == -1)
603
	if (break_state == -1)
604
		up->lcr |= UART_LCR_SBC;
604
		up->lcr |= UART_LCR_SBC;
605
	else
605
	else
606
		up->lcr &= ~UART_LCR_SBC;
606
		up->lcr &= ~UART_LCR_SBC;
607
	serial_out(up, UART_LCR, up->lcr);
607
	serial_out(up, UART_LCR, up->lcr);
608
	spin_unlock_irqrestore(&up->port.lock, flags);
608
	uart_port_unlock_irqrestore(&up->port, flags);
609
}
609
}
610
610
611
static int sunsu_startup(struct uart_port *port)
611
static int sunsu_startup(struct uart_port *port)
Lines 683-694 static int sunsu_startup(struct uart_port *port) Link Here
683
	 */
683
	 */
684
	serial_outp(up, UART_LCR, UART_LCR_WLEN8);
684
	serial_outp(up, UART_LCR, UART_LCR_WLEN8);
685
685
686
	spin_lock_irqsave(&up->port.lock, flags);
686
	uart_port_lock_irqsave(&up->port, &flags);
687
687
688
	up->port.mctrl |= TIOCM_OUT2;
688
	up->port.mctrl |= TIOCM_OUT2;
689
689
690
	sunsu_set_mctrl(&up->port, up->port.mctrl);
690
	sunsu_set_mctrl(&up->port, up->port.mctrl);
691
	spin_unlock_irqrestore(&up->port.lock, flags);
691
	uart_port_unlock_irqrestore(&up->port, flags);
692
692
693
	/*
693
	/*
694
	 * Finally, enable interrupts.  Note: Modem status interrupts
694
	 * Finally, enable interrupts.  Note: Modem status interrupts
Lines 731-737 static void sunsu_shutdown(struct uart_port *port) Link Here
731
	up->ier = 0;
731
	up->ier = 0;
732
	serial_outp(up, UART_IER, 0);
732
	serial_outp(up, UART_IER, 0);
733
733
734
	spin_lock_irqsave(&up->port.lock, flags);
734
	uart_port_lock_irqsave(&up->port, &flags);
735
	if (up->port.flags & UPF_FOURPORT) {
735
	if (up->port.flags & UPF_FOURPORT) {
736
		/* reset interrupts on the AST Fourport board */
736
		/* reset interrupts on the AST Fourport board */
737
		inb((up->port.iobase & 0xfe0) | 0x1f);
737
		inb((up->port.iobase & 0xfe0) | 0x1f);
Lines 740-746 static void sunsu_shutdown(struct uart_port *port) Link Here
740
		up->port.mctrl &= ~TIOCM_OUT2;
740
		up->port.mctrl &= ~TIOCM_OUT2;
741
741
742
	sunsu_set_mctrl(&up->port, up->port.mctrl);
742
	sunsu_set_mctrl(&up->port, up->port.mctrl);
743
	spin_unlock_irqrestore(&up->port.lock, flags);
743
	uart_port_unlock_irqrestore(&up->port, flags);
744
744
745
	/*
745
	/*
746
	 * Disable break condition and FIFOs
746
	 * Disable break condition and FIFOs
Lines 826-832 sunsu_change_speed(struct uart_port *port, unsigned int cflag, Link Here
826
	 * Ok, we're now changing the port state.  Do it with
826
	 * Ok, we're now changing the port state.  Do it with
827
	 * interrupts disabled.
827
	 * interrupts disabled.
828
	 */
828
	 */
829
	spin_lock_irqsave(&up->port.lock, flags);
829
	uart_port_lock_irqsave(&up->port, &flags);
830
830
831
	/*
831
	/*
832
	 * Update the per-port timeout.
832
	 * Update the per-port timeout.
Lines 891-897 sunsu_change_speed(struct uart_port *port, unsigned int cflag, Link Here
891
891
892
	up->cflag = cflag;
892
	up->cflag = cflag;
893
893
894
	spin_unlock_irqrestore(&up->port.lock, flags);
894
	uart_port_unlock_irqrestore(&up->port, flags);
895
}
895
}
896
896
897
static void
897
static void
Lines 1038-1044 static void sunsu_autoconfig(struct uart_sunsu_port *up) Link Here
1038
	up->type_probed = PORT_UNKNOWN;
1038
	up->type_probed = PORT_UNKNOWN;
1039
	up->port.iotype = UPIO_MEM;
1039
	up->port.iotype = UPIO_MEM;
1040
1040
1041
	spin_lock_irqsave(&up->port.lock, flags);
1041
	uart_port_lock_irqsave(&up->port, &flags);
1042
1042
1043
	if (!(up->port.flags & UPF_BUGGY_UART)) {
1043
	if (!(up->port.flags & UPF_BUGGY_UART)) {
1044
		/*
1044
		/*
Lines 1173-1179 static void sunsu_autoconfig(struct uart_sunsu_port *up) Link Here
1173
	serial_outp(up, UART_IER, 0);
1173
	serial_outp(up, UART_IER, 0);
1174
1174
1175
out:
1175
out:
1176
	spin_unlock_irqrestore(&up->port.lock, flags);
1176
	uart_port_unlock_irqrestore(&up->port, flags);
1177
}
1177
}
1178
1178
1179
static struct uart_driver sunsu_reg = {
1179
static struct uart_driver sunsu_reg = {
Lines 1298-1306 static void sunsu_console_write(struct console *co, const char *s, Link Here
1298
	int locked = 1;
1298
	int locked = 1;
1299
1299
1300
	if (up->port.sysrq || oops_in_progress)
1300
	if (up->port.sysrq || oops_in_progress)
1301
		locked = spin_trylock_irqsave(&up->port.lock, flags);
1301
		locked = uart_port_trylock_irqsave(&up->port, &flags);
1302
	else
1302
	else
1303
		spin_lock_irqsave(&up->port.lock, flags);
1303
		uart_port_lock_irqsave(&up->port, &flags);
1304
1304
1305
	/*
1305
	/*
1306
	 *	First save the UER then disable the interrupts
1306
	 *	First save the UER then disable the interrupts
Lines 1318-1324 static void sunsu_console_write(struct console *co, const char *s, Link Here
1318
	serial_out(up, UART_IER, ier);
1318
	serial_out(up, UART_IER, ier);
1319
1319
1320
	if (locked)
1320
	if (locked)
1321
		spin_unlock_irqrestore(&up->port.lock, flags);
1321
		uart_port_unlock_irqrestore(&up->port, flags);
1322
}
1322
}
1323
1323
1324
/*
1324
/*
(-)a/drivers/tty/serial/sunzilog.c (-21 / +21 lines)
Lines 531-537 static irqreturn_t sunzilog_interrupt(int irq, void *dev_id) Link Here
531
		struct tty_port *port;
531
		struct tty_port *port;
532
		unsigned char r3;
532
		unsigned char r3;
533
533
534
		spin_lock(&up->port.lock);
534
		uart_port_lock(&up->port);
535
		r3 = read_zsreg(channel, R3);
535
		r3 = read_zsreg(channel, R3);
536
536
537
		/* Channel A */
537
		/* Channel A */
Lines 548-554 static irqreturn_t sunzilog_interrupt(int irq, void *dev_id) Link Here
548
			if (r3 & CHATxIP)
548
			if (r3 & CHATxIP)
549
				sunzilog_transmit_chars(up, channel);
549
				sunzilog_transmit_chars(up, channel);
550
		}
550
		}
551
		spin_unlock(&up->port.lock);
551
		uart_port_unlock(&up->port);
552
552
553
		if (port)
553
		if (port)
554
			tty_flip_buffer_push(port);
554
			tty_flip_buffer_push(port);
Lines 557-563 static irqreturn_t sunzilog_interrupt(int irq, void *dev_id) Link Here
557
		up = up->next;
557
		up = up->next;
558
		channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
558
		channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
559
559
560
		spin_lock(&up->port.lock);
560
		uart_port_lock(&up->port);
561
		port = NULL;
561
		port = NULL;
562
		if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
562
		if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
563
			writeb(RES_H_IUS, &channel->control);
563
			writeb(RES_H_IUS, &channel->control);
Lines 571-577 static irqreturn_t sunzilog_interrupt(int irq, void *dev_id) Link Here
571
			if (r3 & CHBTxIP)
571
			if (r3 & CHBTxIP)
572
				sunzilog_transmit_chars(up, channel);
572
				sunzilog_transmit_chars(up, channel);
573
		}
573
		}
574
		spin_unlock(&up->port.lock);
574
		uart_port_unlock(&up->port);
575
575
576
		if (port)
576
		if (port)
577
			tty_flip_buffer_push(port);
577
			tty_flip_buffer_push(port);
Lines 604-614 static unsigned int sunzilog_tx_empty(struct uart_port *port) Link Here
604
	unsigned char status;
604
	unsigned char status;
605
	unsigned int ret;
605
	unsigned int ret;
606
606
607
	spin_lock_irqsave(&port->lock, flags);
607
	uart_port_lock_irqsave(port, &flags);
608
608
609
	status = sunzilog_read_channel_status(port);
609
	status = sunzilog_read_channel_status(port);
610
610
611
	spin_unlock_irqrestore(&port->lock, flags);
611
	uart_port_unlock_irqrestore(port, flags);
612
612
613
	if (status & Tx_BUF_EMP)
613
	if (status & Tx_BUF_EMP)
614
		ret = TIOCSER_TEMT;
614
		ret = TIOCSER_TEMT;
Lines 764-770 static void sunzilog_break_ctl(struct uart_port *port, int break_state) Link Here
764
	else
764
	else
765
		clear_bits |= SND_BRK;
765
		clear_bits |= SND_BRK;
766
766
767
	spin_lock_irqsave(&port->lock, flags);
767
	uart_port_lock_irqsave(port, &flags);
768
768
769
	new_reg = (up->curregs[R5] | set_bits) & ~clear_bits;
769
	new_reg = (up->curregs[R5] | set_bits) & ~clear_bits;
770
	if (new_reg != up->curregs[R5]) {
770
	if (new_reg != up->curregs[R5]) {
Lines 774-780 static void sunzilog_break_ctl(struct uart_port *port, int break_state) Link Here
774
		write_zsreg(channel, R5, up->curregs[R5]);
774
		write_zsreg(channel, R5, up->curregs[R5]);
775
	}
775
	}
776
776
777
	spin_unlock_irqrestore(&port->lock, flags);
777
	uart_port_unlock_irqrestore(port, flags);
778
}
778
}
779
779
780
static void __sunzilog_startup(struct uart_sunzilog_port *up)
780
static void __sunzilog_startup(struct uart_sunzilog_port *up)
Lines 800-808 static int sunzilog_startup(struct uart_port *port) Link Here
800
	if (ZS_IS_CONS(up))
800
	if (ZS_IS_CONS(up))
801
		return 0;
801
		return 0;
802
802
803
	spin_lock_irqsave(&port->lock, flags);
803
	uart_port_lock_irqsave(port, &flags);
804
	__sunzilog_startup(up);
804
	__sunzilog_startup(up);
805
	spin_unlock_irqrestore(&port->lock, flags);
805
	uart_port_unlock_irqrestore(port, flags);
806
	return 0;
806
	return 0;
807
}
807
}
808
808
Lines 840-846 static void sunzilog_shutdown(struct uart_port *port) Link Here
840
	if (ZS_IS_CONS(up))
840
	if (ZS_IS_CONS(up))
841
		return;
841
		return;
842
842
843
	spin_lock_irqsave(&port->lock, flags);
843
	uart_port_lock_irqsave(port, &flags);
844
844
845
	channel = ZILOG_CHANNEL_FROM_PORT(port);
845
	channel = ZILOG_CHANNEL_FROM_PORT(port);
846
846
Lines 853-859 static void sunzilog_shutdown(struct uart_port *port) Link Here
853
	up->curregs[R5] &= ~SND_BRK;
853
	up->curregs[R5] &= ~SND_BRK;
854
	sunzilog_maybe_update_regs(up, channel);
854
	sunzilog_maybe_update_regs(up, channel);
855
855
856
	spin_unlock_irqrestore(&port->lock, flags);
856
	uart_port_unlock_irqrestore(port, flags);
857
}
857
}
858
858
859
/* Shared by TTY driver and serial console setup.  The port lock is held
859
/* Shared by TTY driver and serial console setup.  The port lock is held
Lines 945-951 sunzilog_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
945
945
946
	baud = uart_get_baud_rate(port, termios, old, 1200, 76800);
946
	baud = uart_get_baud_rate(port, termios, old, 1200, 76800);
947
947
948
	spin_lock_irqsave(&up->port.lock, flags);
948
	uart_port_lock_irqsave(&up->port, &flags);
949
949
950
	brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
950
	brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
951
951
Lines 962-968 sunzilog_set_termios(struct uart_port *port, struct ktermios *termios, Link Here
962
962
963
	uart_update_timeout(port, termios->c_cflag, baud);
963
	uart_update_timeout(port, termios->c_cflag, baud);
964
964
965
	spin_unlock_irqrestore(&up->port.lock, flags);
965
	uart_port_unlock_irqrestore(&up->port, flags);
966
}
966
}
967
967
968
static const char *sunzilog_type(struct uart_port *port)
968
static const char *sunzilog_type(struct uart_port *port)
Lines 1201-1215 sunzilog_console_write(struct console *con, const char *s, unsigned int count) Link Here
1201
	int locked = 1;
1201
	int locked = 1;
1202
1202
1203
	if (up->port.sysrq || oops_in_progress)
1203
	if (up->port.sysrq || oops_in_progress)
1204
		locked = spin_trylock_irqsave(&up->port.lock, flags);
1204
		locked = uart_port_trylock_irqsave(&up->port, &flags);
1205
	else
1205
	else
1206
		spin_lock_irqsave(&up->port.lock, flags);
1206
		uart_port_lock_irqsave(&up->port, &flags);
1207
1207
1208
	uart_console_write(&up->port, s, count, sunzilog_putchar);
1208
	uart_console_write(&up->port, s, count, sunzilog_putchar);
1209
	udelay(2);
1209
	udelay(2);
1210
1210
1211
	if (locked)
1211
	if (locked)
1212
		spin_unlock_irqrestore(&up->port.lock, flags);
1212
		uart_port_unlock_irqrestore(&up->port, flags);
1213
}
1213
}
1214
1214
1215
static int __init sunzilog_console_setup(struct console *con, char *options)
1215
static int __init sunzilog_console_setup(struct console *con, char *options)
Lines 1244-1250 static int __init sunzilog_console_setup(struct console *con, char *options) Link Here
1244
1244
1245
	brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
1245
	brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
1246
1246
1247
	spin_lock_irqsave(&up->port.lock, flags);
1247
	uart_port_lock_irqsave(&up->port, &flags);
1248
1248
1249
	up->curregs[R15] |= BRKIE;
1249
	up->curregs[R15] |= BRKIE;
1250
	sunzilog_convert_to_zs(up, con->cflag, 0, brg);
1250
	sunzilog_convert_to_zs(up, con->cflag, 0, brg);
Lines 1252-1258 static int __init sunzilog_console_setup(struct console *con, char *options) Link Here
1252
	sunzilog_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
1252
	sunzilog_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
1253
	__sunzilog_startup(up);
1253
	__sunzilog_startup(up);
1254
1254
1255
	spin_unlock_irqrestore(&up->port.lock, flags);
1255
	uart_port_unlock_irqrestore(&up->port, flags);
1256
1256
1257
	return 0;
1257
	return 0;
1258
}
1258
}
Lines 1333-1339 static void sunzilog_init_hw(struct uart_sunzilog_port *up) Link Here
1333
1333
1334
	channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
1334
	channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
1335
1335
1336
	spin_lock_irqsave(&up->port.lock, flags);
1336
	uart_port_lock_irqsave(&up->port, &flags);
1337
	if (ZS_IS_CHANNEL_A(up)) {
1337
	if (ZS_IS_CHANNEL_A(up)) {
1338
		write_zsreg(channel, R9, FHWRES);
1338
		write_zsreg(channel, R9, FHWRES);
1339
		ZSDELAY_LONG();
1339
		ZSDELAY_LONG();
Lines 1383-1389 static void sunzilog_init_hw(struct uart_sunzilog_port *up) Link Here
1383
		write_zsreg(channel, R9, up->curregs[R9]);
1383
		write_zsreg(channel, R9, up->curregs[R9]);
1384
	}
1384
	}
1385
1385
1386
	spin_unlock_irqrestore(&up->port.lock, flags);
1386
	uart_port_unlock_irqrestore(&up->port, flags);
1387
1387
1388
#ifdef CONFIG_SERIO
1388
#ifdef CONFIG_SERIO
1389
	if (up->flags & (SUNZILOG_FLAG_CONS_KEYB |
1389
	if (up->flags & (SUNZILOG_FLAG_CONS_KEYB |
(-)a/drivers/tty/serial/timbuart.c (-4 / +4 lines)
Lines 174-180 static void timbuart_tasklet(struct tasklet_struct *t) Link Here
174
	struct timbuart_port *uart = from_tasklet(uart, t, tasklet);
174
	struct timbuart_port *uart = from_tasklet(uart, t, tasklet);
175
	u32 isr, ier = 0;
175
	u32 isr, ier = 0;
176
176
177
	spin_lock(&uart->port.lock);
177
	uart_port_lock(&uart->port);
178
178
179
	isr = ioread32(uart->port.membase + TIMBUART_ISR);
179
	isr = ioread32(uart->port.membase + TIMBUART_ISR);
180
	dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
180
	dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
Lines 189-195 static void timbuart_tasklet(struct tasklet_struct *t) Link Here
189
189
190
	iowrite32(ier, uart->port.membase + TIMBUART_IER);
190
	iowrite32(ier, uart->port.membase + TIMBUART_IER);
191
191
192
	spin_unlock(&uart->port.lock);
192
	uart_port_unlock(&uart->port);
193
	dev_dbg(uart->port.dev, "%s leaving\n", __func__);
193
	dev_dbg(uart->port.dev, "%s leaving\n", __func__);
194
}
194
}
195
195
Lines 295-304 static void timbuart_set_termios(struct uart_port *port, Link Here
295
		tty_termios_copy_hw(termios, old);
295
		tty_termios_copy_hw(termios, old);
296
	tty_termios_encode_baud_rate(termios, baud, baud);
296
	tty_termios_encode_baud_rate(termios, baud, baud);
297
297
298
	spin_lock_irqsave(&port->lock, flags);
298
	uart_port_lock_irqsave(port, &flags);
299
	iowrite8((u8)bindex, port->membase + TIMBUART_BAUDRATE);
299
	iowrite8((u8)bindex, port->membase + TIMBUART_BAUDRATE);
300
	uart_update_timeout(port, termios->c_cflag, baud);
300
	uart_update_timeout(port, termios->c_cflag, baud);
301
	spin_unlock_irqrestore(&port->lock, flags);
301
	uart_port_unlock_irqrestore(port, flags);
302
}
302
}
303
303
304
static const char *timbuart_type(struct uart_port *port)
304
static const char *timbuart_type(struct uart_port *port)
(-)a/drivers/tty/serial/uartlite.c (-9 / +9 lines)
Lines 216-226 static irqreturn_t ulite_isr(int irq, void *dev_id) Link Here
216
	unsigned long flags;
216
	unsigned long flags;
217
217
218
	do {
218
	do {
219
		spin_lock_irqsave(&port->lock, flags);
219
		uart_port_lock_irqsave(port, &flags);
220
		stat = uart_in32(ULITE_STATUS, port);
220
		stat = uart_in32(ULITE_STATUS, port);
221
		busy  = ulite_receive(port, stat);
221
		busy  = ulite_receive(port, stat);
222
		busy |= ulite_transmit(port, stat);
222
		busy |= ulite_transmit(port, stat);
223
		spin_unlock_irqrestore(&port->lock, flags);
223
		uart_port_unlock_irqrestore(port, flags);
224
		n++;
224
		n++;
225
	} while (busy);
225
	} while (busy);
226
226
Lines 238-246 static unsigned int ulite_tx_empty(struct uart_port *port) Link Here
238
	unsigned long flags;
238
	unsigned long flags;
239
	unsigned int ret;
239
	unsigned int ret;
240
240
241
	spin_lock_irqsave(&port->lock, flags);
241
	uart_port_lock_irqsave(port, &flags);
242
	ret = uart_in32(ULITE_STATUS, port);
242
	ret = uart_in32(ULITE_STATUS, port);
243
	spin_unlock_irqrestore(&port->lock, flags);
243
	uart_port_unlock_irqrestore(port, flags);
244
244
245
	return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0;
245
	return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0;
246
}
246
}
Lines 323-329 static void ulite_set_termios(struct uart_port *port, Link Here
323
	termios->c_cflag |= pdata->cflags & (PARENB | PARODD | CSIZE);
323
	termios->c_cflag |= pdata->cflags & (PARENB | PARODD | CSIZE);
324
	tty_termios_encode_baud_rate(termios, pdata->baud, pdata->baud);
324
	tty_termios_encode_baud_rate(termios, pdata->baud, pdata->baud);
325
325
326
	spin_lock_irqsave(&port->lock, flags);
326
	uart_port_lock_irqsave(port, &flags);
327
327
328
	port->read_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN
328
	port->read_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN
329
		| ULITE_STATUS_TXFULL;
329
		| ULITE_STATUS_TXFULL;
Lines 346-352 static void ulite_set_termios(struct uart_port *port, Link Here
346
	/* update timeout */
346
	/* update timeout */
347
	uart_update_timeout(port, termios->c_cflag, pdata->baud);
347
	uart_update_timeout(port, termios->c_cflag, pdata->baud);
348
348
349
	spin_unlock_irqrestore(&port->lock, flags);
349
	uart_port_unlock_irqrestore(port, flags);
350
}
350
}
351
351
352
static const char *ulite_type(struct uart_port *port)
352
static const char *ulite_type(struct uart_port *port)
Lines 495-503 static void ulite_console_write(struct console *co, const char *s, Link Here
495
	int locked = 1;
495
	int locked = 1;
496
496
497
	if (oops_in_progress) {
497
	if (oops_in_progress) {
498
		locked = spin_trylock_irqsave(&port->lock, flags);
498
		locked = uart_port_trylock_irqsave(port, &flags);
499
	} else
499
	} else
500
		spin_lock_irqsave(&port->lock, flags);
500
		uart_port_lock_irqsave(port, &flags);
501
501
502
	/* save and disable interrupt */
502
	/* save and disable interrupt */
503
	ier = uart_in32(ULITE_STATUS, port) & ULITE_STATUS_IE;
503
	ier = uart_in32(ULITE_STATUS, port) & ULITE_STATUS_IE;
Lines 512-518 static void ulite_console_write(struct console *co, const char *s, Link Here
512
		uart_out32(ULITE_CONTROL_IE, ULITE_CONTROL, port);
512
		uart_out32(ULITE_CONTROL_IE, ULITE_CONTROL, port);
513
513
514
	if (locked)
514
	if (locked)
515
		spin_unlock_irqrestore(&port->lock, flags);
515
		uart_port_unlock_irqrestore(port, flags);
516
}
516
}
517
517
518
static int ulite_console_setup(struct console *co, char *options)
518
static int ulite_console_setup(struct console *co, char *options)
(-)a/drivers/tty/serial/ucc_uart.c (-2 / +2 lines)
Lines 931-937 static void qe_uart_set_termios(struct uart_port *port, Link Here
931
	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
931
	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
932
932
933
	/* Do we really need a spinlock here? */
933
	/* Do we really need a spinlock here? */
934
	spin_lock_irqsave(&port->lock, flags);
934
	uart_port_lock_irqsave(port, &flags);
935
935
936
	/* Update the per-port timeout. */
936
	/* Update the per-port timeout. */
937
	uart_update_timeout(port, termios->c_cflag, baud);
937
	uart_update_timeout(port, termios->c_cflag, baud);
Lines 949-955 static void qe_uart_set_termios(struct uart_port *port, Link Here
949
		qe_setbrg(qe_port->us_info.tx_clock, baud, 16);
949
		qe_setbrg(qe_port->us_info.tx_clock, baud, 16);
950
	}
950
	}
951
951
952
	spin_unlock_irqrestore(&port->lock, flags);
952
	uart_port_unlock_irqrestore(port, flags);
953
}
953
}
954
954
955
/*
955
/*
(-)a/drivers/tty/serial/vt8500_serial.c (-4 / +4 lines)
Lines 227-233 static irqreturn_t vt8500_irq(int irq, void *dev_id) Link Here
227
	struct uart_port *port = dev_id;
227
	struct uart_port *port = dev_id;
228
	unsigned long isr;
228
	unsigned long isr;
229
229
230
	spin_lock(&port->lock);
230
	uart_port_lock(port);
231
	isr = vt8500_read(port, VT8500_URISR);
231
	isr = vt8500_read(port, VT8500_URISR);
232
232
233
	/* Acknowledge active status bits */
233
	/* Acknowledge active status bits */
Lines 240-246 static irqreturn_t vt8500_irq(int irq, void *dev_id) Link Here
240
	if (isr & TCTS)
240
	if (isr & TCTS)
241
		handle_delta_cts(port);
241
		handle_delta_cts(port);
242
242
243
	spin_unlock(&port->lock);
243
	uart_port_unlock(port);
244
244
245
	return IRQ_HANDLED;
245
	return IRQ_HANDLED;
246
}
246
}
Lines 342-348 static void vt8500_set_termios(struct uart_port *port, Link Here
342
	unsigned int baud, lcr;
342
	unsigned int baud, lcr;
343
	unsigned int loops = 1000;
343
	unsigned int loops = 1000;
344
344
345
	spin_lock_irqsave(&port->lock, flags);
345
	uart_port_lock_irqsave(port, &flags);
346
346
347
	/* calculate and set baud rate */
347
	/* calculate and set baud rate */
348
	baud = uart_get_baud_rate(port, termios, old, 900, 921600);
348
	baud = uart_get_baud_rate(port, termios, old, 900, 921600);
Lines 410-416 static void vt8500_set_termios(struct uart_port *port, Link Here
410
	vt8500_write(&vt8500_port->uart, 0x881, VT8500_URFCR);
410
	vt8500_write(&vt8500_port->uart, 0x881, VT8500_URFCR);
411
	vt8500_write(&vt8500_port->uart, vt8500_port->ier, VT8500_URIER);
411
	vt8500_write(&vt8500_port->uart, vt8500_port->ier, VT8500_URIER);
412
412
413
	spin_unlock_irqrestore(&port->lock, flags);
413
	uart_port_unlock_irqrestore(port, flags);
414
}
414
}
415
415
416
static const char *vt8500_type(struct uart_port *port)
416
static const char *vt8500_type(struct uart_port *port)
(-)a/drivers/tty/serial/xilinx_uartps.c (-28 / +28 lines)
Lines 346-352 static irqreturn_t cdns_uart_isr(int irq, void *dev_id) Link Here
346
	struct uart_port *port = (struct uart_port *)dev_id;
346
	struct uart_port *port = (struct uart_port *)dev_id;
347
	unsigned int isrstatus;
347
	unsigned int isrstatus;
348
348
349
	spin_lock(&port->lock);
349
	uart_port_lock(port);
350
350
351
	/* Read the interrupt status register to determine which
351
	/* Read the interrupt status register to determine which
352
	 * interrupt(s) is/are active and clear them.
352
	 * interrupt(s) is/are active and clear them.
Lines 369-375 static irqreturn_t cdns_uart_isr(int irq, void *dev_id) Link Here
369
	    !(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
369
	    !(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
370
		cdns_uart_handle_rx(dev_id, isrstatus);
370
		cdns_uart_handle_rx(dev_id, isrstatus);
371
371
372
	spin_unlock(&port->lock);
372
	uart_port_unlock(port);
373
	return IRQ_HANDLED;
373
	return IRQ_HANDLED;
374
}
374
}
375
375
Lines 506-519 static int cdns_uart_clk_notifier_cb(struct notifier_block *nb, Link Here
506
			return NOTIFY_BAD;
506
			return NOTIFY_BAD;
507
		}
507
		}
508
508
509
		spin_lock_irqsave(&cdns_uart->port->lock, flags);
509
		uart_port_lock_irqsave(cdns_uart->port, &flags);
510
510
511
		/* Disable the TX and RX to set baud rate */
511
		/* Disable the TX and RX to set baud rate */
512
		ctrl_reg = readl(port->membase + CDNS_UART_CR);
512
		ctrl_reg = readl(port->membase + CDNS_UART_CR);
513
		ctrl_reg |= CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS;
513
		ctrl_reg |= CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS;
514
		writel(ctrl_reg, port->membase + CDNS_UART_CR);
514
		writel(ctrl_reg, port->membase + CDNS_UART_CR);
515
515
516
		spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
516
		uart_port_unlock_irqrestore(cdns_uart->port, flags);
517
517
518
		return NOTIFY_OK;
518
		return NOTIFY_OK;
519
	}
519
	}
Lines 523-529 static int cdns_uart_clk_notifier_cb(struct notifier_block *nb, Link Here
523
		 * frequency.
523
		 * frequency.
524
		 */
524
		 */
525
525
526
		spin_lock_irqsave(&cdns_uart->port->lock, flags);
526
		uart_port_lock_irqsave(cdns_uart->port, &flags);
527
527
528
		locked = 1;
528
		locked = 1;
529
		port->uartclk = ndata->new_rate;
529
		port->uartclk = ndata->new_rate;
Lines 533-539 static int cdns_uart_clk_notifier_cb(struct notifier_block *nb, Link Here
533
		fallthrough;
533
		fallthrough;
534
	case ABORT_RATE_CHANGE:
534
	case ABORT_RATE_CHANGE:
535
		if (!locked)
535
		if (!locked)
536
			spin_lock_irqsave(&cdns_uart->port->lock, flags);
536
			uart_port_lock_irqsave(cdns_uart->port, &flags);
537
537
538
		/* Set TX/RX Reset */
538
		/* Set TX/RX Reset */
539
		ctrl_reg = readl(port->membase + CDNS_UART_CR);
539
		ctrl_reg = readl(port->membase + CDNS_UART_CR);
Lines 555-561 static int cdns_uart_clk_notifier_cb(struct notifier_block *nb, Link Here
555
		ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
555
		ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
556
		writel(ctrl_reg, port->membase + CDNS_UART_CR);
556
		writel(ctrl_reg, port->membase + CDNS_UART_CR);
557
557
558
		spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
558
		uart_port_unlock_irqrestore(cdns_uart->port, flags);
559
559
560
		return NOTIFY_OK;
560
		return NOTIFY_OK;
561
	default:
561
	default:
Lines 652-658 static void cdns_uart_break_ctl(struct uart_port *port, int ctl) Link Here
652
	unsigned int status;
652
	unsigned int status;
653
	unsigned long flags;
653
	unsigned long flags;
654
654
655
	spin_lock_irqsave(&port->lock, flags);
655
	uart_port_lock_irqsave(port, &flags);
656
656
657
	status = readl(port->membase + CDNS_UART_CR);
657
	status = readl(port->membase + CDNS_UART_CR);
658
658
Lines 664-670 static void cdns_uart_break_ctl(struct uart_port *port, int ctl) Link Here
664
			writel(CDNS_UART_CR_STOPBRK | status,
664
			writel(CDNS_UART_CR_STOPBRK | status,
665
					port->membase + CDNS_UART_CR);
665
					port->membase + CDNS_UART_CR);
666
	}
666
	}
667
	spin_unlock_irqrestore(&port->lock, flags);
667
	uart_port_unlock_irqrestore(port, flags);
668
}
668
}
669
669
670
/**
670
/**
Lines 683-689 static void cdns_uart_set_termios(struct uart_port *port, Link Here
683
	unsigned long flags;
683
	unsigned long flags;
684
	unsigned int ctrl_reg, mode_reg;
684
	unsigned int ctrl_reg, mode_reg;
685
685
686
	spin_lock_irqsave(&port->lock, flags);
686
	uart_port_lock_irqsave(port, &flags);
687
687
688
	/* Disable the TX and RX to set baud rate */
688
	/* Disable the TX and RX to set baud rate */
689
	ctrl_reg = readl(port->membase + CDNS_UART_CR);
689
	ctrl_reg = readl(port->membase + CDNS_UART_CR);
Lines 794-800 static void cdns_uart_set_termios(struct uart_port *port, Link Here
794
		cval &= ~CDNS_UART_MODEMCR_FCM;
794
		cval &= ~CDNS_UART_MODEMCR_FCM;
795
	writel(cval, port->membase + CDNS_UART_MODEMCR);
795
	writel(cval, port->membase + CDNS_UART_MODEMCR);
796
796
797
	spin_unlock_irqrestore(&port->lock, flags);
797
	uart_port_unlock_irqrestore(port, flags);
798
}
798
}
799
799
800
/**
800
/**
Lines 813-819 static int cdns_uart_startup(struct uart_port *port) Link Here
813
813
814
	is_brk_support = cdns_uart->quirks & CDNS_UART_RXBS_SUPPORT;
814
	is_brk_support = cdns_uart->quirks & CDNS_UART_RXBS_SUPPORT;
815
815
816
	spin_lock_irqsave(&port->lock, flags);
816
	uart_port_lock_irqsave(port, &flags);
817
817
818
	/* Disable the TX and RX */
818
	/* Disable the TX and RX */
819
	writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
819
	writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
Lines 861-867 static int cdns_uart_startup(struct uart_port *port) Link Here
861
	writel(readl(port->membase + CDNS_UART_ISR),
861
	writel(readl(port->membase + CDNS_UART_ISR),
862
			port->membase + CDNS_UART_ISR);
862
			port->membase + CDNS_UART_ISR);
863
863
864
	spin_unlock_irqrestore(&port->lock, flags);
864
	uart_port_unlock_irqrestore(port, flags);
865
865
866
	ret = request_irq(port->irq, cdns_uart_isr, 0, CDNS_UART_NAME, port);
866
	ret = request_irq(port->irq, cdns_uart_isr, 0, CDNS_UART_NAME, port);
867
	if (ret) {
867
	if (ret) {
Lines 889-895 static void cdns_uart_shutdown(struct uart_port *port) Link Here
889
	int status;
889
	int status;
890
	unsigned long flags;
890
	unsigned long flags;
891
891
892
	spin_lock_irqsave(&port->lock, flags);
892
	uart_port_lock_irqsave(port, &flags);
893
893
894
	/* Disable interrupts */
894
	/* Disable interrupts */
895
	status = readl(port->membase + CDNS_UART_IMR);
895
	status = readl(port->membase + CDNS_UART_IMR);
Lines 900-906 static void cdns_uart_shutdown(struct uart_port *port) Link Here
900
	writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
900
	writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
901
			port->membase + CDNS_UART_CR);
901
			port->membase + CDNS_UART_CR);
902
902
903
	spin_unlock_irqrestore(&port->lock, flags);
903
	uart_port_unlock_irqrestore(port, flags);
904
904
905
	free_irq(port->irq, port);
905
	free_irq(port->irq, port);
906
}
906
}
Lines 1050-1056 static int cdns_uart_poll_get_char(struct uart_port *port) Link Here
1050
	int c;
1050
	int c;
1051
	unsigned long flags;
1051
	unsigned long flags;
1052
1052
1053
	spin_lock_irqsave(&port->lock, flags);
1053
	uart_port_lock_irqsave(port, &flags);
1054
1054
1055
	/* Check if FIFO is empty */
1055
	/* Check if FIFO is empty */
1056
	if (readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_RXEMPTY)
1056
	if (readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_RXEMPTY)
Lines 1058-1064 static int cdns_uart_poll_get_char(struct uart_port *port) Link Here
1058
	else /* Read a character */
1058
	else /* Read a character */
1059
		c = (unsigned char) readl(port->membase + CDNS_UART_FIFO);
1059
		c = (unsigned char) readl(port->membase + CDNS_UART_FIFO);
1060
1060
1061
	spin_unlock_irqrestore(&port->lock, flags);
1061
	uart_port_unlock_irqrestore(port, flags);
1062
1062
1063
	return c;
1063
	return c;
1064
}
1064
}
Lines 1067-1073 static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c) Link Here
1067
{
1067
{
1068
	unsigned long flags;
1068
	unsigned long flags;
1069
1069
1070
	spin_lock_irqsave(&port->lock, flags);
1070
	uart_port_lock_irqsave(port, &flags);
1071
1071
1072
	/* Wait until FIFO is empty */
1072
	/* Wait until FIFO is empty */
1073
	while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
1073
	while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
Lines 1080-1086 static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c) Link Here
1080
	while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
1080
	while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
1081
		cpu_relax();
1081
		cpu_relax();
1082
1082
1083
	spin_unlock_irqrestore(&port->lock, flags);
1083
	uart_port_unlock_irqrestore(port, flags);
1084
}
1084
}
1085
#endif
1085
#endif
1086
1086
Lines 1232-1240 static void cdns_uart_console_write(struct console *co, const char *s, Link Here
1232
	if (port->sysrq)
1232
	if (port->sysrq)
1233
		locked = 0;
1233
		locked = 0;
1234
	else if (oops_in_progress)
1234
	else if (oops_in_progress)
1235
		locked = spin_trylock_irqsave(&port->lock, flags);
1235
		locked = uart_port_trylock_irqsave(port, &flags);
1236
	else
1236
	else
1237
		spin_lock_irqsave(&port->lock, flags);
1237
		uart_port_lock_irqsave(port, &flags);
1238
1238
1239
	/* save and disable interrupt */
1239
	/* save and disable interrupt */
1240
	imr = readl(port->membase + CDNS_UART_IMR);
1240
	imr = readl(port->membase + CDNS_UART_IMR);
Lines 1257-1263 static void cdns_uart_console_write(struct console *co, const char *s, Link Here
1257
	writel(imr, port->membase + CDNS_UART_IER);
1257
	writel(imr, port->membase + CDNS_UART_IER);
1258
1258
1259
	if (locked)
1259
	if (locked)
1260
		spin_unlock_irqrestore(&port->lock, flags);
1260
		uart_port_unlock_irqrestore(port, flags);
1261
}
1261
}
1262
1262
1263
/**
1263
/**
Lines 1325-1331 static int cdns_uart_suspend(struct device *device) Link Here
1325
	if (console_suspend_enabled && uart_console(port) && may_wake) {
1325
	if (console_suspend_enabled && uart_console(port) && may_wake) {
1326
		unsigned long flags;
1326
		unsigned long flags;
1327
1327
1328
		spin_lock_irqsave(&port->lock, flags);
1328
		uart_port_lock_irqsave(port, &flags);
1329
		/* Empty the receive FIFO 1st before making changes */
1329
		/* Empty the receive FIFO 1st before making changes */
1330
		while (!(readl(port->membase + CDNS_UART_SR) &
1330
		while (!(readl(port->membase + CDNS_UART_SR) &
1331
					CDNS_UART_SR_RXEMPTY))
1331
					CDNS_UART_SR_RXEMPTY))
Lines 1334-1340 static int cdns_uart_suspend(struct device *device) Link Here
1334
		writel(1, port->membase + CDNS_UART_RXWM);
1334
		writel(1, port->membase + CDNS_UART_RXWM);
1335
		/* disable RX timeout interrups */
1335
		/* disable RX timeout interrups */
1336
		writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IDR);
1336
		writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IDR);
1337
		spin_unlock_irqrestore(&port->lock, flags);
1337
		uart_port_unlock_irqrestore(port, flags);
1338
	}
1338
	}
1339
1339
1340
	/*
1340
	/*
Lines 1372-1378 static int cdns_uart_resume(struct device *device) Link Here
1372
			return ret;
1372
			return ret;
1373
		}
1373
		}
1374
1374
1375
		spin_lock_irqsave(&port->lock, flags);
1375
		uart_port_lock_irqsave(port, &flags);
1376
1376
1377
		/* Set TX/RX Reset */
1377
		/* Set TX/RX Reset */
1378
		ctrl_reg = readl(port->membase + CDNS_UART_CR);
1378
		ctrl_reg = readl(port->membase + CDNS_UART_CR);
Lines 1392-1405 static int cdns_uart_resume(struct device *device) Link Here
1392
1392
1393
		clk_disable(cdns_uart->uartclk);
1393
		clk_disable(cdns_uart->uartclk);
1394
		clk_disable(cdns_uart->pclk);
1394
		clk_disable(cdns_uart->pclk);
1395
		spin_unlock_irqrestore(&port->lock, flags);
1395
		uart_port_unlock_irqrestore(port, flags);
1396
	} else {
1396
	} else {
1397
		spin_lock_irqsave(&port->lock, flags);
1397
		uart_port_lock_irqsave(port, &flags);
1398
		/* restore original rx trigger level */
1398
		/* restore original rx trigger level */
1399
		writel(rx_trigger_level, port->membase + CDNS_UART_RXWM);
1399
		writel(rx_trigger_level, port->membase + CDNS_UART_RXWM);
1400
		/* enable RX timeout interrupt */
1400
		/* enable RX timeout interrupt */
1401
		writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IER);
1401
		writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IER);
1402
		spin_unlock_irqrestore(&port->lock, flags);
1402
		uart_port_unlock_irqrestore(port, flags);
1403
	}
1403
	}
1404
1404
1405
	return uart_resume_port(cdns_uart->cdns_uart_driver, port);
1405
	return uart_resume_port(cdns_uart->cdns_uart_driver, port);
(-)a/drivers/tty/tty_io.c (-2 / +9 lines)
Lines 3540-3547 static ssize_t show_cons_active(struct device *dev, Link Here
3540
	for_each_console(c) {
3540
	for_each_console(c) {
3541
		if (!c->device)
3541
		if (!c->device)
3542
			continue;
3542
			continue;
3543
		if (!c->write)
3543
		if (c->flags & CON_NBCON) {
3544
			continue;
3544
			if (!c->write_atomic &&
3545
			    !(c->write_thread && c->kthread)) {
3546
				continue;
3547
			}
3548
		} else {
3549
			if (!c->write)
3550
				continue;
3551
		}
3545
		if ((c->flags & CON_ENABLED) == 0)
3552
		if ((c->flags & CON_ENABLED) == 0)
3546
			continue;
3553
			continue;
3547
		cs[i++] = c;
3554
		cs[i++] = c;
(-)a/fs/proc/consoles.c (-3 / +11 lines)
Lines 21-32 static int show_console_dev(struct seq_file *m, void *v) Link Here
21
		{ CON_ENABLED,		'E' },
21
		{ CON_ENABLED,		'E' },
22
		{ CON_CONSDEV,		'C' },
22
		{ CON_CONSDEV,		'C' },
23
		{ CON_BOOT,		'B' },
23
		{ CON_BOOT,		'B' },
24
		{ CON_NBCON,		'N' },
24
		{ CON_PRINTBUFFER,	'p' },
25
		{ CON_PRINTBUFFER,	'p' },
25
		{ CON_BRL,		'b' },
26
		{ CON_BRL,		'b' },
26
		{ CON_ANYTIME,		'a' },
27
		{ CON_ANYTIME,		'a' },
27
	};
28
	};
28
	char flags[ARRAY_SIZE(con_flags) + 1];
29
	char flags[ARRAY_SIZE(con_flags) + 1];
29
	struct console *con = v;
30
	struct console *con = v;
31
	char con_write = '-';
30
	unsigned int a;
32
	unsigned int a;
31
	dev_t dev = 0;
33
	dev_t dev = 0;
32
34
Lines 57-65 static int show_console_dev(struct seq_file *m, void *v) Link Here
57
	seq_setwidth(m, 21 - 1);
59
	seq_setwidth(m, 21 - 1);
58
	seq_printf(m, "%s%d", con->name, con->index);
60
	seq_printf(m, "%s%d", con->name, con->index);
59
	seq_pad(m, ' ');
61
	seq_pad(m, ' ');
60
	seq_printf(m, "%c%c%c (%s)", con->read ? 'R' : '-',
62
	if (con->flags & CON_NBCON) {
61
			con->write ? 'W' : '-', con->unblank ? 'U' : '-',
63
		if (con->write_atomic || con->write_thread)
62
			flags);
64
			con_write = 'W';
65
	} else {
66
		if (con->write)
67
			con_write = 'W';
68
	}
69
	seq_printf(m, "%c%c%c (%s)", con->read ? 'R' : '-', con_write,
70
		   con->unblank ? 'U' : '-', flags);
63
	if (dev)
71
	if (dev)
64
		seq_printf(m, " %4d:%d", MAJOR(dev), MINOR(dev));
72
		seq_printf(m, " %4d:%d", MAJOR(dev), MINOR(dev));
65
73
(-)a/include/linux/bottom_half.h (+2 lines)
Lines 35-42 static inline void local_bh_enable(void) Link Here
35
35
36
#ifdef CONFIG_PREEMPT_RT
36
#ifdef CONFIG_PREEMPT_RT
37
extern bool local_bh_blocked(void);
37
extern bool local_bh_blocked(void);
38
extern void softirq_preempt(void);
38
#else
39
#else
39
static inline bool local_bh_blocked(void) { return false; }
40
static inline bool local_bh_blocked(void) { return false; }
41
static inline void softirq_preempt(void) { }
40
#endif
42
#endif
41
43
42
#endif /* _LINUX_BH_H */
44
#endif /* _LINUX_BH_H */
(-)a/include/linux/console.h (+148 lines)
Lines 16-22 Link Here
16
16
17
#include <linux/atomic.h>
17
#include <linux/atomic.h>
18
#include <linux/bits.h>
18
#include <linux/bits.h>
19
#include <linux/irq_work.h>
19
#include <linux/rculist.h>
20
#include <linux/rculist.h>
21
#include <linux/rcuwait.h>
20
#include <linux/types.h>
22
#include <linux/types.h>
21
23
22
struct vc_data;
24
struct vc_data;
Lines 156-161 static inline int con_debug_leave(void) Link Here
156
 *			/dev/kmesg which requires a larger output buffer.
158
 *			/dev/kmesg which requires a larger output buffer.
157
 * @CON_SUSPENDED:	Indicates if a console is suspended. If true, the
159
 * @CON_SUSPENDED:	Indicates if a console is suspended. If true, the
158
 *			printing callbacks must not be called.
160
 *			printing callbacks must not be called.
161
 * @CON_NBCON:		Console can operate outside of the legacy style console_lock
162
 *			constraints.
159
 */
163
 */
160
enum cons_flags {
164
enum cons_flags {
161
	CON_PRINTBUFFER		= BIT(0),
165
	CON_PRINTBUFFER		= BIT(0),
Lines 166-171 enum cons_flags { Link Here
166
	CON_BRL			= BIT(5),
170
	CON_BRL			= BIT(5),
167
	CON_EXTENDED		= BIT(6),
171
	CON_EXTENDED		= BIT(6),
168
	CON_SUSPENDED		= BIT(7),
172
	CON_SUSPENDED		= BIT(7),
173
	CON_NBCON		= BIT(8),
174
};
175
176
/**
177
 * struct nbcon_state - console state for nbcon consoles
178
 * @atom:	Compound of the state fields for atomic operations
179
 *
180
 * @req_prio:		The priority of a handover request
181
 * @prio:		The priority of the current owner
182
 * @unsafe:		Console is busy in a non takeover region
183
 * @unsafe_takeover:	A hostile takeover in an unsafe state happened in the
184
 *			past. The console cannot be safe until re-initialized.
185
 * @cpu:		The CPU on which the owner runs
186
 *
187
 * To be used for reading and preparing of the value stored in the nbcon
188
 * state variable @console::nbcon_state.
189
 *
190
 * The @prio and @req_prio fields are particularly important to allow
191
 * spin-waiting to timeout and give up without the risk of a waiter being
192
 * assigned the lock after giving up.
193
 */
194
struct nbcon_state {
195
	union {
196
		unsigned int	atom;
197
		struct {
198
			unsigned int prio		:  2;
199
			unsigned int req_prio		:  2;
200
			unsigned int unsafe		:  1;
201
			unsigned int unsafe_takeover	:  1;
202
			unsigned int cpu		: 24;
203
		};
204
	};
205
};
206
207
/*
208
 * The nbcon_state struct is used to easily create and interpret values that
209
 * are stored in the @console::nbcon_state variable. Ensure this struct stays
210
 * within the size boundaries of the atomic variable's underlying type in
211
 * order to avoid any accidental truncation.
212
 */
213
static_assert(sizeof(struct nbcon_state) <= sizeof(int));
214
215
/**
216
 * nbcon_prio - console owner priority for nbcon consoles
217
 * @NBCON_PRIO_NONE:		Unused
218
 * @NBCON_PRIO_NORMAL:		Normal (non-emergency) usage
219
 * @NBCON_PRIO_EMERGENCY:	Emergency output (WARN/OOPS...)
220
 * @NBCON_PRIO_PANIC:		Panic output
221
 * @NBCON_PRIO_MAX:		The number of priority levels
222
 *
223
 * A higher priority context can takeover the console when it is
224
 * in the safe state. The final attempt to flush consoles in panic()
225
 * can be allowed to do so even in an unsafe state (Hope and pray).
226
 */
227
enum nbcon_prio {
228
	NBCON_PRIO_NONE = 0,
229
	NBCON_PRIO_NORMAL,
230
	NBCON_PRIO_EMERGENCY,
231
	NBCON_PRIO_PANIC,
232
	NBCON_PRIO_MAX,
233
};
234
235
struct console;
236
struct printk_buffers;
237
238
/**
239
 * struct nbcon_context - Context for console acquire/release
240
 * @console:			The associated console
241
 * @spinwait_max_us:		Limit for spin-wait acquire
242
 * @prio:			Priority of the context
243
 * @allow_unsafe_takeover:	Allow performing takeover even if unsafe. Can
244
 *				be used only with NBCON_PRIO_PANIC @prio. It
245
 *				might cause a system freeze when the console
246
 *				is used later.
247
 * @backlog:			Ringbuffer has pending records
248
 * @pbufs:			Pointer to the text buffer for this context
249
 * @seq:			The sequence number to print for this context
250
 */
251
struct nbcon_context {
252
	/* members set by caller */
253
	struct console		*console;
254
	unsigned int		spinwait_max_us;
255
	enum nbcon_prio		prio;
256
	unsigned int		allow_unsafe_takeover	: 1;
257
258
	/* members set by emit */
259
	unsigned int		backlog			: 1;
260
261
	/* members set by acquire */
262
	struct printk_buffers	*pbufs;
263
	u64			seq;
264
};
265
266
/**
267
 * struct nbcon_write_context - Context handed to the nbcon write callbacks
268
 * @ctxt:		The core console context
269
 * @outbuf:		Pointer to the text buffer for output
270
 * @len:		Length to write
271
 * @unsafe_takeover:	If a hostile takeover in an unsafe state has occurred
272
 */
273
struct nbcon_write_context {
274
	struct nbcon_context	__private ctxt;
275
	char			*outbuf;
276
	unsigned int		len;
277
	bool			unsafe_takeover;
169
};
278
};
170
279
171
/**
280
/**
Lines 187-192 enum cons_flags { Link Here
187
 * @dropped:		Number of unreported dropped ringbuffer records
296
 * @dropped:		Number of unreported dropped ringbuffer records
188
 * @data:		Driver private data
297
 * @data:		Driver private data
189
 * @node:		hlist node for the console list
298
 * @node:		hlist node for the console list
299
 *
300
 * @write_atomic:	Write callback for atomic context
301
 * @write_thread:	Write callback for non-atomic context
302
 * @uart_port:		Callback to provide the associated uart port
303
 * @nbcon_state:	State for nbcon consoles
304
 * @nbcon_seq:		Sequence number of the next record for nbcon to print
305
 * @pbufs:		Pointer to nbcon private buffer
306
 * @locked_port:	True, if the port lock is locked by nbcon
307
 * @kthread:		Printer kthread for this console
308
 * @rcuwait:		RCU-safe wait object for @kthread waking
309
 * @irq_work:		Defer @kthread waking to IRQ work context
190
 */
310
 */
191
struct console {
311
struct console {
192
	char			name[16];
312
	char			name[16];
Lines 206-211 struct console { Link Here
206
	unsigned long		dropped;
326
	unsigned long		dropped;
207
	void			*data;
327
	void			*data;
208
	struct hlist_node	node;
328
	struct hlist_node	node;
329
330
	/* nbcon console specific members */
331
	bool			(*write_atomic)(struct console *con,
332
						struct nbcon_write_context *wctxt);
333
	bool			(*write_thread)(struct console *con,
334
						struct nbcon_write_context *wctxt);
335
	struct uart_port *	(*uart_port)(struct console *con);
336
	atomic_t		__private nbcon_state;
337
	atomic_long_t		__private nbcon_seq;
338
	struct printk_buffers	*pbufs;
339
	bool			locked_port;
340
	struct task_struct	*kthread;
341
	struct rcuwait		rcuwait;
342
	struct irq_work		irq_work;
209
};
343
};
210
344
211
#ifdef CONFIG_LOCKDEP
345
#ifdef CONFIG_LOCKDEP
Lines 332-337 static inline bool console_is_registered(const struct console *con) Link Here
332
	lockdep_assert_console_list_lock_held();			\
466
	lockdep_assert_console_list_lock_held();			\
333
	hlist_for_each_entry(con, &console_list, node)
467
	hlist_for_each_entry(con, &console_list, node)
334
468
469
#ifdef CONFIG_PRINTK
470
extern enum nbcon_prio nbcon_atomic_enter(enum nbcon_prio prio);
471
extern void nbcon_atomic_exit(enum nbcon_prio prio, enum nbcon_prio prev_prio);
472
extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
473
extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
474
extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
475
#else
476
static inline enum nbcon_prio nbcon_atomic_enter(enum nbcon_prio prio) { return NBCON_PRIO_NONE; }
477
static inline void nbcon_atomic_exit(enum nbcon_prio prio, enum nbcon_prio prev_prio) { }
478
static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
479
static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
480
static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
481
#endif
482
335
extern int console_set_on_cmdline;
483
extern int console_set_on_cmdline;
336
extern struct console *early_console;
484
extern struct console *early_console;
337
485
(-)a/include/linux/entry-common.h (-1 / +1 lines)
Lines 60-66 Link Here
60
#define EXIT_TO_USER_MODE_WORK						\
60
#define EXIT_TO_USER_MODE_WORK						\
61
	(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE |		\
61
	(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE |		\
62
	 _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL |	\
62
	 _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL |	\
63
	 ARCH_EXIT_TO_USER_MODE_WORK)
63
	 _TIF_NEED_RESCHED_LAZY | ARCH_EXIT_TO_USER_MODE_WORK)
64
64
65
/**
65
/**
66
 * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
66
 * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
(-)a/include/linux/entry-kvm.h (-1 / +1 lines)
Lines 18-24 Link Here
18
18
19
#define XFER_TO_GUEST_MODE_WORK						\
19
#define XFER_TO_GUEST_MODE_WORK						\
20
	(_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL |	\
20
	(_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL |	\
21
	 _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
21
	 _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED_LAZY | ARCH_XFER_TO_GUEST_MODE_WORK)
22
22
23
struct kvm_vcpu;
23
struct kvm_vcpu;
24
24
(-)a/include/linux/interrupt.h (+29 lines)
Lines 609-614 extern void __raise_softirq_irqoff(unsigned int nr); Link Here
609
extern void raise_softirq_irqoff(unsigned int nr);
609
extern void raise_softirq_irqoff(unsigned int nr);
610
extern void raise_softirq(unsigned int nr);
610
extern void raise_softirq(unsigned int nr);
611
611
612
#ifdef CONFIG_PREEMPT_RT
613
DECLARE_PER_CPU(struct task_struct *, timersd);
614
DECLARE_PER_CPU(unsigned long, pending_timer_softirq);
615
616
extern void raise_timer_softirq(void);
617
extern void raise_hrtimer_softirq(void);
618
619
static inline unsigned int local_pending_timers(void)
620
{
621
        return __this_cpu_read(pending_timer_softirq);
622
}
623
624
#else
625
static inline void raise_timer_softirq(void)
626
{
627
	raise_softirq(TIMER_SOFTIRQ);
628
}
629
630
static inline void raise_hrtimer_softirq(void)
631
{
632
	raise_softirq_irqoff(HRTIMER_SOFTIRQ);
633
}
634
635
static inline unsigned int local_pending_timers(void)
636
{
637
        return local_softirq_pending();
638
}
639
#endif
640
612
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
641
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
613
642
614
static inline struct task_struct *this_cpu_ksoftirqd(void)
643
static inline struct task_struct *this_cpu_ksoftirqd(void)
(-)a/include/linux/netdevice.h (+4 lines)
Lines 3236-3242 struct softnet_data { Link Here
3236
	int			defer_count;
3236
	int			defer_count;
3237
	int			defer_ipi_scheduled;
3237
	int			defer_ipi_scheduled;
3238
	struct sk_buff		*defer_list;
3238
	struct sk_buff		*defer_list;
3239
#ifndef CONFIG_PREEMPT_RT
3239
	call_single_data_t	defer_csd;
3240
	call_single_data_t	defer_csd;
3241
#else
3242
	struct work_struct	defer_work;
3243
#endif
3240
};
3244
};
3241
3245
3242
static inline void input_queue_head_incr(struct softnet_data *sd)
3246
static inline void input_queue_head_incr(struct softnet_data *sd)
(-)a/include/linux/preempt.h (-2 / +8 lines)
Lines 219-233 do { \ Link Here
219
#define preempt_enable() \
219
#define preempt_enable() \
220
do { \
220
do { \
221
	barrier(); \
221
	barrier(); \
222
	if (unlikely(preempt_count_dec_and_test())) \
222
	if (unlikely(preempt_count_dec_and_test())) { \
223
		instrumentation_begin(); \
223
		__preempt_schedule(); \
224
		__preempt_schedule(); \
225
		instrumentation_end(); \
226
	} \
224
} while (0)
227
} while (0)
225
228
226
#define preempt_enable_notrace() \
229
#define preempt_enable_notrace() \
227
do { \
230
do { \
228
	barrier(); \
231
	barrier(); \
229
	if (unlikely(__preempt_count_dec_and_test())) \
232
	if (unlikely(__preempt_count_dec_and_test())) { \
233
		instrumentation_begin(); \
230
		__preempt_schedule_notrace(); \
234
		__preempt_schedule_notrace(); \
235
		instrumentation_end(); \
236
	} \
231
} while (0)
237
} while (0)
232
238
233
#define preempt_check_resched() \
239
#define preempt_check_resched() \
(-)a/include/linux/printk.h (-2 / +23 lines)
Lines 9-14 Link Here
9
#include <linux/ratelimit_types.h>
9
#include <linux/ratelimit_types.h>
10
#include <linux/once_lite.h>
10
#include <linux/once_lite.h>
11
11
12
struct uart_port;
13
12
extern const char linux_banner[];
14
extern const char linux_banner[];
13
extern const char linux_proc_banner[];
15
extern const char linux_proc_banner[];
14
16
Lines 159-171 __printf(1, 2) __cold int _printk_deferred(const char *fmt, ...); Link Here
159
161
160
extern void __printk_safe_enter(void);
162
extern void __printk_safe_enter(void);
161
extern void __printk_safe_exit(void);
163
extern void __printk_safe_exit(void);
164
extern void __printk_deferred_enter(void);
165
extern void __printk_deferred_exit(void);
166
162
/*
167
/*
163
 * The printk_deferred_enter/exit macros are available only as a hack for
168
 * The printk_deferred_enter/exit macros are available only as a hack for
164
 * some code paths that need to defer all printk console printing. Interrupts
169
 * some code paths that need to defer all printk console printing. Interrupts
165
 * must be disabled for the deferred duration.
170
 * must be disabled for the deferred duration.
166
 */
171
 */
167
#define printk_deferred_enter __printk_safe_enter
172
#define printk_deferred_enter() __printk_deferred_enter()
168
#define printk_deferred_exit __printk_safe_exit
173
#define printk_deferred_exit() __printk_deferred_exit()
169
174
170
/*
175
/*
171
 * Please don't use printk_ratelimit(), because it shares ratelimiting state
176
 * Please don't use printk_ratelimit(), because it shares ratelimiting state
Lines 192-197 void show_regs_print_info(const char *log_lvl); Link Here
192
extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
197
extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
193
extern asmlinkage void dump_stack(void) __cold;
198
extern asmlinkage void dump_stack(void) __cold;
194
void printk_trigger_flush(void);
199
void printk_trigger_flush(void);
200
extern void nbcon_atomic_flush_all(void);
201
extern void nbcon_handle_port_lock(struct uart_port *up);
202
extern void nbcon_handle_port_unlock(struct uart_port *up);
195
#else
203
#else
196
static inline __printf(1, 0)
204
static inline __printf(1, 0)
197
int vprintk(const char *s, va_list args)
205
int vprintk(const char *s, va_list args)
Lines 271-276 static inline void dump_stack(void) Link Here
271
static inline void printk_trigger_flush(void)
279
static inline void printk_trigger_flush(void)
272
{
280
{
273
}
281
}
282
283
static inline void nbcon_atomic_flush_all(void)
284
{
285
}
286
287
static inline void nbcon_handle_port_lock(struct uart_port *up)
288
{
289
}
290
291
static inline void nbcon_handle_port_unlock(struct uart_port *up)
292
{
293
}
294
274
#endif
295
#endif
275
296
276
#ifdef CONFIG_SMP
297
#ifdef CONFIG_SMP
(-)a/include/linux/rcupdate.h (+6 lines)
Lines 303-308 static inline void rcu_lock_acquire(struct lockdep_map *map) Link Here
303
	lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
303
	lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
304
}
304
}
305
305
306
static inline void rcu_try_lock_acquire(struct lockdep_map *map)
307
{
308
	lock_acquire(map, 0, 1, 2, 0, NULL, _THIS_IP_);
309
}
310
306
static inline void rcu_lock_release(struct lockdep_map *map)
311
static inline void rcu_lock_release(struct lockdep_map *map)
307
{
312
{
308
	lock_release(map, _THIS_IP_);
313
	lock_release(map, _THIS_IP_);
Lines 317-322 int rcu_read_lock_any_held(void); Link Here
317
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
322
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
318
323
319
# define rcu_lock_acquire(a)		do { } while (0)
324
# define rcu_lock_acquire(a)		do { } while (0)
325
# define rcu_try_lock_acquire(a)	do { } while (0)
320
# define rcu_lock_release(a)		do { } while (0)
326
# define rcu_lock_release(a)		do { } while (0)
321
327
322
static inline int rcu_read_lock_held(void)
328
static inline int rcu_read_lock_held(void)
(-)a/include/linux/sched.h (-5 / +11 lines)
Lines 911-916 struct task_struct { Link Here
911
	 * ->sched_remote_wakeup gets used, so it can be in this word.
911
	 * ->sched_remote_wakeup gets used, so it can be in this word.
912
	 */
912
	 */
913
	unsigned			sched_remote_wakeup:1;
913
	unsigned			sched_remote_wakeup:1;
914
#ifdef CONFIG_RT_MUTEXES
915
	unsigned			sched_rt_mutex:1;
916
#endif
914
917
915
	/* Bit to tell LSMs we're in execve(): */
918
	/* Bit to tell LSMs we're in execve(): */
916
	unsigned			in_execve:1;
919
	unsigned			in_execve:1;
Lines 1902-1907 static inline int dl_task_check_affinity(struct task_struct *p, const struct cpu Link Here
1902
}
1905
}
1903
#endif
1906
#endif
1904
1907
1908
extern bool task_is_pi_boosted(const struct task_struct *p);
1905
extern int yield_to(struct task_struct *p, bool preempt);
1909
extern int yield_to(struct task_struct *p, bool preempt);
1906
extern void set_user_nice(struct task_struct *p, long nice);
1910
extern void set_user_nice(struct task_struct *p, long nice);
1907
extern int task_prio(const struct task_struct *p);
1911
extern int task_prio(const struct task_struct *p);
Lines 2046-2062 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag, Link Here
2046
	update_ti_thread_flag(task_thread_info(tsk), flag, value);
2050
	update_ti_thread_flag(task_thread_info(tsk), flag, value);
2047
}
2051
}
2048
2052
2049
static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2053
static inline bool test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2050
{
2054
{
2051
	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2055
	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2052
}
2056
}
2053
2057
2054
static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2058
static inline bool test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2055
{
2059
{
2056
	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2060
	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2057
}
2061
}
2058
2062
2059
static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2063
static inline bool test_tsk_thread_flag(struct task_struct *tsk, int flag)
2060
{
2064
{
2061
	return test_ti_thread_flag(task_thread_info(tsk), flag);
2065
	return test_ti_thread_flag(task_thread_info(tsk), flag);
2062
}
2066
}
Lines 2069-2077 static inline void set_tsk_need_resched(struct task_struct *tsk) Link Here
2069
static inline void clear_tsk_need_resched(struct task_struct *tsk)
2073
static inline void clear_tsk_need_resched(struct task_struct *tsk)
2070
{
2074
{
2071
	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2075
	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2076
	if (IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO))
2077
		clear_tsk_thread_flag(tsk, TIF_NEED_RESCHED_LAZY);
2072
}
2078
}
2073
2079
2074
static inline int test_tsk_need_resched(struct task_struct *tsk)
2080
static inline bool test_tsk_need_resched(struct task_struct *tsk)
2075
{
2081
{
2076
	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2082
	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2077
}
2083
}
Lines 2252-2258 static inline int rwlock_needbreak(rwlock_t *lock) Link Here
2252
2258
2253
static __always_inline bool need_resched(void)
2259
static __always_inline bool need_resched(void)
2254
{
2260
{
2255
	return unlikely(tif_need_resched());
2261
	return unlikely(tif_need_resched_lazy() || tif_need_resched());
2256
}
2262
}
2257
2263
2258
/*
2264
/*
(-)a/include/linux/sched/idle.h (-4 / +4 lines)
Lines 63-69 static __always_inline bool __must_check current_set_polling_and_test(void) Link Here
63
	 */
63
	 */
64
	smp_mb__after_atomic();
64
	smp_mb__after_atomic();
65
65
66
	return unlikely(tif_need_resched());
66
	return unlikely(need_resched());
67
}
67
}
68
68
69
static __always_inline bool __must_check current_clr_polling_and_test(void)
69
static __always_inline bool __must_check current_clr_polling_and_test(void)
Lines 76-82 static __always_inline bool __must_check current_clr_polling_and_test(void) Link Here
76
	 */
76
	 */
77
	smp_mb__after_atomic();
77
	smp_mb__after_atomic();
78
78
79
	return unlikely(tif_need_resched());
79
	return unlikely(need_resched());
80
}
80
}
81
81
82
#else
82
#else
Lines 85-95 static inline void __current_clr_polling(void) { } Link Here
85
85
86
static inline bool __must_check current_set_polling_and_test(void)
86
static inline bool __must_check current_set_polling_and_test(void)
87
{
87
{
88
	return unlikely(tif_need_resched());
88
	return unlikely(need_resched());
89
}
89
}
90
static inline bool __must_check current_clr_polling_and_test(void)
90
static inline bool __must_check current_clr_polling_and_test(void)
91
{
91
{
92
	return unlikely(tif_need_resched());
92
	return unlikely(need_resched());
93
}
93
}
94
#endif
94
#endif
95
95
(-)a/include/linux/sched/rt.h (+4 lines)
Lines 30-35 static inline bool task_is_realtime(struct task_struct *tsk) Link Here
30
}
30
}
31
31
32
#ifdef CONFIG_RT_MUTEXES
32
#ifdef CONFIG_RT_MUTEXES
33
extern void rt_mutex_pre_schedule(void);
34
extern void rt_mutex_schedule(void);
35
extern void rt_mutex_post_schedule(void);
36
33
/*
37
/*
34
 * Must hold either p->pi_lock or task_rq(p)->lock.
38
 * Must hold either p->pi_lock or task_rq(p)->lock.
35
 */
39
 */
(-)a/include/linux/serial_8250.h (+2 lines)
Lines 204-209 void serial8250_init_port(struct uart_8250_port *up); Link Here
204
void serial8250_set_defaults(struct uart_8250_port *up);
204
void serial8250_set_defaults(struct uart_8250_port *up);
205
void serial8250_console_write(struct uart_8250_port *up, const char *s,
205
void serial8250_console_write(struct uart_8250_port *up, const char *s,
206
			      unsigned int count);
206
			      unsigned int count);
207
bool serial8250_console_write_atomic(struct uart_8250_port *up,
208
				     struct nbcon_write_context *wctxt);
207
int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
209
int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
208
int serial8250_console_exit(struct uart_port *port);
210
int serial8250_console_exit(struct uart_port *port);
209
211
(-)a/include/linux/serial_core.h (-6 / +99 lines)
Lines 588-593 struct uart_port { Link Here
588
	void			*private_data;		/* generic platform data pointer */
588
	void			*private_data;		/* generic platform data pointer */
589
};
589
};
590
590
591
/**
592
 * uart_port_lock - Lock the UART port
593
 * @up:		Pointer to UART port structure
594
 */
595
static inline void uart_port_lock(struct uart_port *up)
596
{
597
	spin_lock(&up->lock);
598
	nbcon_handle_port_lock(up);
599
}
600
601
/**
602
 * uart_port_lock_irq - Lock the UART port and disable interrupts
603
 * @up:		Pointer to UART port structure
604
 */
605
static inline void uart_port_lock_irq(struct uart_port *up)
606
{
607
	spin_lock_irq(&up->lock);
608
	nbcon_handle_port_lock(up);
609
}
610
611
/**
612
 * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
613
 * @up:		Pointer to UART port structure
614
 * @flags:	Pointer to interrupt flags storage
615
 */
616
static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
617
{
618
	spin_lock_irqsave(&up->lock, *flags);
619
	nbcon_handle_port_lock(up);
620
}
621
622
/**
623
 * uart_port_trylock - Try to lock the UART port
624
 * @up:		Pointer to UART port structure
625
 *
626
 * Returns: True if lock was acquired, false otherwise
627
 */
628
static inline bool uart_port_trylock(struct uart_port *up)
629
{
630
	if (!spin_trylock(&up->lock))
631
		return false;
632
633
	nbcon_handle_port_lock(up);
634
	return true;
635
}
636
637
/**
638
 * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
639
 * @up:		Pointer to UART port structure
640
 * @flags:	Pointer to interrupt flags storage
641
 *
642
 * Returns: True if lock was acquired, false otherwise
643
 */
644
static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
645
{
646
	if (!spin_trylock_irqsave(&up->lock, *flags))
647
		return false;
648
649
	nbcon_handle_port_lock(up);
650
	return true;
651
}
652
653
/**
654
 * uart_port_unlock - Unlock the UART port
655
 * @up:		Pointer to UART port structure
656
 */
657
static inline void uart_port_unlock(struct uart_port *up)
658
{
659
	nbcon_handle_port_unlock(up);
660
	spin_unlock(&up->lock);
661
}
662
663
/**
664
 * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
665
 * @up:		Pointer to UART port structure
666
 */
667
static inline void uart_port_unlock_irq(struct uart_port *up)
668
{
669
	nbcon_handle_port_unlock(up);
670
	spin_unlock_irq(&up->lock);
671
}
672
673
/**
674
 * uart_port_lock_irqrestore - Unlock the UART port, restore interrupts
675
 * @up:		Pointer to UART port structure
676
 * @flags:	The saved interrupt flags for restore
677
 */
678
static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
679
{
680
	nbcon_handle_port_unlock(up);
681
	spin_unlock_irqrestore(&up->lock, flags);
682
}
683
591
static inline int serial_port_in(struct uart_port *up, int offset)
684
static inline int serial_port_in(struct uart_port *up, int offset)
592
{
685
{
593
	return up->serial_in(up, offset);
686
	return up->serial_in(up, offset);
Lines 956-969 static inline void uart_unlock_and_check_sysrq(struct uart_port *port) Link Here
956
	u8 sysrq_ch;
1049
	u8 sysrq_ch;
957
1050
958
	if (!port->has_sysrq) {
1051
	if (!port->has_sysrq) {
959
		spin_unlock(&port->lock);
1052
		uart_port_unlock(port);
960
		return;
1053
		return;
961
	}
1054
	}
962
1055
963
	sysrq_ch = port->sysrq_ch;
1056
	sysrq_ch = port->sysrq_ch;
964
	port->sysrq_ch = 0;
1057
	port->sysrq_ch = 0;
965
1058
966
	spin_unlock(&port->lock);
1059
	uart_port_unlock(port);
967
1060
968
	if (sysrq_ch)
1061
	if (sysrq_ch)
969
		handle_sysrq(sysrq_ch);
1062
		handle_sysrq(sysrq_ch);
Lines 975-988 static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port Link Here
975
	u8 sysrq_ch;
1068
	u8 sysrq_ch;
976
1069
977
	if (!port->has_sysrq) {
1070
	if (!port->has_sysrq) {
978
		spin_unlock_irqrestore(&port->lock, flags);
1071
		uart_port_unlock_irqrestore(port, flags);
979
		return;
1072
		return;
980
	}
1073
	}
981
1074
982
	sysrq_ch = port->sysrq_ch;
1075
	sysrq_ch = port->sysrq_ch;
983
	port->sysrq_ch = 0;
1076
	port->sysrq_ch = 0;
984
1077
985
	spin_unlock_irqrestore(&port->lock, flags);
1078
	uart_port_unlock_irqrestore(port, flags);
986
1079
987
	if (sysrq_ch)
1080
	if (sysrq_ch)
988
		handle_sysrq(sysrq_ch);
1081
		handle_sysrq(sysrq_ch);
Lines 998-1009 static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch) Link Here
998
}
1091
}
999
static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
1092
static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
1000
{
1093
{
1001
	spin_unlock(&port->lock);
1094
	uart_port_unlock(port);
1002
}
1095
}
1003
static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
1096
static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
1004
		unsigned long flags)
1097
		unsigned long flags)
1005
{
1098
{
1006
	spin_unlock_irqrestore(&port->lock, flags);
1099
	uart_port_unlock_irqrestore(port, flags);
1007
}
1100
}
1008
#endif	/* CONFIG_MAGIC_SYSRQ_SERIAL */
1101
#endif	/* CONFIG_MAGIC_SYSRQ_SERIAL */
1009
1102
(-)a/include/linux/srcu.h (-1 / +1 lines)
Lines 229-235 static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp Link Here
229
229
230
	srcu_check_nmi_safety(ssp, true);
230
	srcu_check_nmi_safety(ssp, true);
231
	retval = __srcu_read_lock_nmisafe(ssp);
231
	retval = __srcu_read_lock_nmisafe(ssp);
232
	rcu_lock_acquire(&ssp->dep_map);
232
	rcu_try_lock_acquire(&ssp->dep_map);
233
	return retval;
233
	return retval;
234
}
234
}
235
235
(-)a/include/linux/thread_info.h (+24 lines)
Lines 59-64 enum syscall_work_bit { Link Here
59
59
60
#include <asm/thread_info.h>
60
#include <asm/thread_info.h>
61
61
62
#ifdef CONFIG_PREEMPT_BUILD_AUTO
63
# define TIF_NEED_RESCHED_LAZY		TIF_ARCH_RESCHED_LAZY
64
# define _TIF_NEED_RESCHED_LAZY		_TIF_ARCH_RESCHED_LAZY
65
# define TIF_NEED_RESCHED_LAZY_OFFSET	(TIF_NEED_RESCHED_LAZY - TIF_NEED_RESCHED)
66
#else
67
# define TIF_NEED_RESCHED_LAZY		TIF_NEED_RESCHED
68
# define _TIF_NEED_RESCHED_LAZY		_TIF_NEED_RESCHED
69
# define TIF_NEED_RESCHED_LAZY_OFFSET	0
70
#endif
71
62
#ifdef __KERNEL__
72
#ifdef __KERNEL__
63
73
64
#ifndef arch_set_restart_data
74
#ifndef arch_set_restart_data
Lines 185-190 static __always_inline bool tif_need_resched(void) Link Here
185
			     (unsigned long *)(&current_thread_info()->flags));
195
			     (unsigned long *)(&current_thread_info()->flags));
186
}
196
}
187
197
198
static __always_inline bool tif_need_resched_lazy(void)
199
{
200
	return IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) &&
201
		arch_test_bit(TIF_NEED_RESCHED_LAZY,
202
			      (unsigned long *)(&current_thread_info()->flags));
203
}
204
188
#else
205
#else
189
206
190
static __always_inline bool tif_need_resched(void)
207
static __always_inline bool tif_need_resched(void)
Lines 193-198 static __always_inline bool tif_need_resched(void) Link Here
193
			(unsigned long *)(&current_thread_info()->flags));
210
			(unsigned long *)(&current_thread_info()->flags));
194
}
211
}
195
212
213
static __always_inline bool tif_need_resched_lazy(void)
214
{
215
	return IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) &&
216
		test_bit(TIF_NEED_RESCHED_LAZY,
217
			 (unsigned long *)(&current_thread_info()->flags));
218
}
219
196
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
220
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
197
221
198
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
222
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
(-)a/include/linux/trace_events.h (-4 / +4 lines)
Lines 178-185 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status); Link Here
178
178
179
enum trace_flag_type {
179
enum trace_flag_type {
180
	TRACE_FLAG_IRQS_OFF		= 0x01,
180
	TRACE_FLAG_IRQS_OFF		= 0x01,
181
	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
181
	TRACE_FLAG_NEED_RESCHED		= 0x02,
182
	TRACE_FLAG_NEED_RESCHED		= 0x04,
182
	TRACE_FLAG_NEED_RESCHED_LAZY	= 0x04,
183
	TRACE_FLAG_HARDIRQ		= 0x08,
183
	TRACE_FLAG_HARDIRQ		= 0x08,
184
	TRACE_FLAG_SOFTIRQ		= 0x10,
184
	TRACE_FLAG_SOFTIRQ		= 0x10,
185
	TRACE_FLAG_PREEMPT_RESCHED	= 0x20,
185
	TRACE_FLAG_PREEMPT_RESCHED	= 0x20,
Lines 205-215 static inline unsigned int tracing_gen_ctx(void) Link Here
205
205
206
static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
206
static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
207
{
207
{
208
	return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
208
	return tracing_gen_ctx_irq_test(0);
209
}
209
}
210
static inline unsigned int tracing_gen_ctx(void)
210
static inline unsigned int tracing_gen_ctx(void)
211
{
211
{
212
	return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
212
	return tracing_gen_ctx_irq_test(0);
213
}
213
}
214
#endif
214
#endif
215
215
(-)a/kernel/Kconfig.preempt (-1 / +16 lines)
Lines 11-16 config PREEMPT_BUILD Link Here
11
	select PREEMPTION
11
	select PREEMPTION
12
	select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
12
	select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
13
13
14
config PREEMPT_BUILD_AUTO
15
	bool
16
	select PREEMPT_BUILD
17
18
config HAVE_PREEMPT_AUTO
19
	bool
20
14
choice
21
choice
15
	prompt "Preemption Model"
22
	prompt "Preemption Model"
16
	default PREEMPT_NONE
23
	default PREEMPT_NONE
Lines 67-75 config PREEMPT Link Here
67
	  embedded system with latency requirements in the milliseconds
74
	  embedded system with latency requirements in the milliseconds
68
	  range.
75
	  range.
69
76
77
config PREEMPT_AUTO
78
	bool "Automagic preemption mode with runtime tweaking support"
79
	depends on HAVE_PREEMPT_AUTO
80
	select PREEMPT_BUILD_AUTO
81
	help
82
	  Add some sensible blurb here
83
70
config PREEMPT_RT
84
config PREEMPT_RT
71
	bool "Fully Preemptible Kernel (Real-Time)"
85
	bool "Fully Preemptible Kernel (Real-Time)"
72
	depends on EXPERT && ARCH_SUPPORTS_RT
86
	depends on EXPERT && ARCH_SUPPORTS_RT
87
	select PREEMPT_BUILD_AUTO if HAVE_PREEMPT_AUTO
73
	select PREEMPTION
88
	select PREEMPTION
74
	help
89
	help
75
	  This option turns the kernel into a real-time kernel by replacing
90
	  This option turns the kernel into a real-time kernel by replacing
Lines 95-101 config PREEMPTION Link Here
95
110
96
config PREEMPT_DYNAMIC
111
config PREEMPT_DYNAMIC
97
	bool "Preemption behaviour defined on boot"
112
	bool "Preemption behaviour defined on boot"
98
	depends on HAVE_PREEMPT_DYNAMIC && !PREEMPT_RT
113
	depends on HAVE_PREEMPT_DYNAMIC && !PREEMPT_RT && !PREEMPT_AUTO
99
	select JUMP_LABEL if HAVE_PREEMPT_DYNAMIC_KEY
114
	select JUMP_LABEL if HAVE_PREEMPT_DYNAMIC_KEY
100
	select PREEMPT_BUILD
115
	select PREEMPT_BUILD
101
	default y if HAVE_PREEMPT_DYNAMIC_CALL
116
	default y if HAVE_PREEMPT_DYNAMIC_CALL
(-)a/kernel/entry/common.c (-2 / +2 lines)
Lines 155-161 static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, Link Here
155
155
156
		local_irq_enable_exit_to_user(ti_work);
156
		local_irq_enable_exit_to_user(ti_work);
157
157
158
		if (ti_work & _TIF_NEED_RESCHED)
158
		if (ti_work & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
159
			schedule();
159
			schedule();
160
160
161
		if (ti_work & _TIF_UPROBE)
161
		if (ti_work & _TIF_UPROBE)
Lines 385-391 void raw_irqentry_exit_cond_resched(void) Link Here
385
		rcu_irq_exit_check_preempt();
385
		rcu_irq_exit_check_preempt();
386
		if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
386
		if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
387
			WARN_ON_ONCE(!on_thread_stack());
387
			WARN_ON_ONCE(!on_thread_stack());
388
		if (need_resched())
388
		if (test_tsk_need_resched(current))
389
			preempt_schedule_irq();
389
			preempt_schedule_irq();
390
	}
390
	}
391
}
391
}
(-)a/kernel/entry/kvm.c (-1 / +1 lines)
Lines 13-19 static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work) Link Here
13
			return -EINTR;
13
			return -EINTR;
14
		}
14
		}
15
15
16
		if (ti_work & _TIF_NEED_RESCHED)
16
		if (ti_work & (_TIF_NEED_RESCHED | TIF_NEED_RESCHED_LAZY))
17
			schedule();
17
			schedule();
18
18
19
		if (ti_work & _TIF_NOTIFY_RESUME)
19
		if (ti_work & _TIF_NOTIFY_RESUME)
(-)a/kernel/futex/pi.c (-28 / +59 lines)
Lines 1-6 Link Here
1
// SPDX-License-Identifier: GPL-2.0-or-later
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
2
3
#include <linux/slab.h>
3
#include <linux/slab.h>
4
#include <linux/sched/rt.h>
4
#include <linux/sched/task.h>
5
#include <linux/sched/task.h>
5
6
6
#include "futex.h"
7
#include "futex.h"
Lines 610-638 int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, Link Here
610
/*
611
/*
611
 * Caller must hold a reference on @pi_state.
612
 * Caller must hold a reference on @pi_state.
612
 */
613
 */
613
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
614
static int wake_futex_pi(u32 __user *uaddr, u32 uval,
615
			 struct futex_pi_state *pi_state,
616
			 struct rt_mutex_waiter *top_waiter)
614
{
617
{
615
	struct rt_mutex_waiter *top_waiter;
616
	struct task_struct *new_owner;
618
	struct task_struct *new_owner;
617
	bool postunlock = false;
619
	bool postunlock = false;
618
	DEFINE_RT_WAKE_Q(wqh);
620
	DEFINE_RT_WAKE_Q(wqh);
619
	u32 curval, newval;
621
	u32 curval, newval;
620
	int ret = 0;
622
	int ret = 0;
621
623
622
	top_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
623
	if (WARN_ON_ONCE(!top_waiter)) {
624
		/*
625
		 * As per the comment in futex_unlock_pi() this should not happen.
626
		 *
627
		 * When this happens, give up our locks and try again, giving
628
		 * the futex_lock_pi() instance time to complete, either by
629
		 * waiting on the rtmutex or removing itself from the futex
630
		 * queue.
631
		 */
632
		ret = -EAGAIN;
633
		goto out_unlock;
634
	}
635
636
	new_owner = top_waiter->task;
624
	new_owner = top_waiter->task;
637
625
638
	/*
626
	/*
Lines 1002-1007 int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl Link Here
1002
		goto no_block;
990
		goto no_block;
1003
	}
991
	}
1004
992
993
	/*
994
	 * Must be done before we enqueue the waiter, here is unfortunately
995
	 * under the hb lock, but that *should* work because it does nothing.
996
	 */
997
	rt_mutex_pre_schedule();
998
1005
	rt_mutex_init_waiter(&rt_waiter);
999
	rt_mutex_init_waiter(&rt_waiter);
1006
1000
1007
	/*
1001
	/*
Lines 1039-1057 int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl Link Here
1039
	ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
1033
	ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
1040
1034
1041
cleanup:
1035
cleanup:
1042
	spin_lock(q.lock_ptr);
1043
	/*
1036
	/*
1044
	 * If we failed to acquire the lock (deadlock/signal/timeout), we must
1037
	 * If we failed to acquire the lock (deadlock/signal/timeout), we must
1045
	 * first acquire the hb->lock before removing the lock from the
1038
	 * must unwind the above, however we canont lock hb->lock because
1046
	 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
1039
	 * rt_mutex already has a waiter enqueued and hb->lock can itself try
1047
	 * lists consistent.
1040
	 * and enqueue an rt_waiter through rtlock.
1048
	 *
1041
	 *
1049
	 * In particular; it is important that futex_unlock_pi() can not
1042
	 * Doing the cleanup without holding hb->lock can cause inconsistent
1050
	 * observe this inconsistency.
1043
	 * state between hb and pi_state, but only in the direction of not
1044
	 * seeing a waiter that is leaving.
1045
	 *
1046
	 * See futex_unlock_pi(), it deals with this inconsistency.
1047
	 *
1048
	 * There be dragons here, since we must deal with the inconsistency on
1049
	 * the way out (here), it is impossible to detect/warn about the race
1050
	 * the other way around (missing an incoming waiter).
1051
	 *
1052
	 * What could possibly go wrong...
1051
	 */
1053
	 */
1052
	if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
1054
	if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
1053
		ret = 0;
1055
		ret = 0;
1054
1056
1057
	/*
1058
	 * Now that the rt_waiter has been dequeued, it is safe to use
1059
	 * spinlock/rtlock (which might enqueue its own rt_waiter) and fix up
1060
	 * the
1061
	 */
1062
	spin_lock(q.lock_ptr);
1063
	/*
1064
	 * Waiter is unqueued.
1065
	 */
1066
	rt_mutex_post_schedule();
1055
no_block:
1067
no_block:
1056
	/*
1068
	/*
1057
	 * Fixup the pi_state owner and possibly acquire the lock if we
1069
	 * Fixup the pi_state owner and possibly acquire the lock if we
Lines 1132-1137 int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) Link Here
1132
	top_waiter = futex_top_waiter(hb, &key);
1144
	top_waiter = futex_top_waiter(hb, &key);
1133
	if (top_waiter) {
1145
	if (top_waiter) {
1134
		struct futex_pi_state *pi_state = top_waiter->pi_state;
1146
		struct futex_pi_state *pi_state = top_waiter->pi_state;
1147
		struct rt_mutex_waiter *rt_waiter;
1135
1148
1136
		ret = -EINVAL;
1149
		ret = -EINVAL;
1137
		if (!pi_state)
1150
		if (!pi_state)
Lines 1144-1165 int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) Link Here
1144
		if (pi_state->owner != current)
1157
		if (pi_state->owner != current)
1145
			goto out_unlock;
1158
			goto out_unlock;
1146
1159
1147
		get_pi_state(pi_state);
1148
		/*
1160
		/*
1149
		 * By taking wait_lock while still holding hb->lock, we ensure
1161
		 * By taking wait_lock while still holding hb->lock, we ensure
1150
		 * there is no point where we hold neither; and therefore
1162
		 * there is no point where we hold neither; and thereby
1151
		 * wake_futex_p() must observe a state consistent with what we
1163
		 * wake_futex_pi() must observe any new waiters.
1152
		 * observed.
1164
		 *
1165
		 * Since the cleanup: case in futex_lock_pi() removes the
1166
		 * rt_waiter without holding hb->lock, it is possible for
1167
		 * wake_futex_pi() to not find a waiter while the above does,
1168
		 * in this case the waiter is on the way out and it can be
1169
		 * ignored.
1153
		 *
1170
		 *
1154
		 * In particular; this forces __rt_mutex_start_proxy() to
1171
		 * In particular; this forces __rt_mutex_start_proxy() to
1155
		 * complete such that we're guaranteed to observe the
1172
		 * complete such that we're guaranteed to observe the
1156
		 * rt_waiter. Also see the WARN in wake_futex_pi().
1173
		 * rt_waiter.
1157
		 */
1174
		 */
1158
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1175
		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1176
1177
		/*
1178
		 * Futex vs rt_mutex waiter state -- if there are no rt_mutex
1179
		 * waiters even though futex thinks there are, then the waiter
1180
		 * is leaving and the uncontended path is safe to take.
1181
		 */
1182
		rt_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
1183
		if (!rt_waiter) {
1184
			raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1185
			goto do_uncontended;
1186
		}
1187
1188
		get_pi_state(pi_state);
1159
		spin_unlock(&hb->lock);
1189
		spin_unlock(&hb->lock);
1160
1190
1161
		/* drops pi_state->pi_mutex.wait_lock */
1191
		/* drops pi_state->pi_mutex.wait_lock */
1162
		ret = wake_futex_pi(uaddr, uval, pi_state);
1192
		ret = wake_futex_pi(uaddr, uval, pi_state, rt_waiter);
1163
1193
1164
		put_pi_state(pi_state);
1194
		put_pi_state(pi_state);
1165
1195
Lines 1187-1192 int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) Link Here
1187
		return ret;
1217
		return ret;
1188
	}
1218
	}
1189
1219
1220
do_uncontended:
1190
	/*
1221
	/*
1191
	 * We have no kernel internal state, i.e. no waiters in the
1222
	 * We have no kernel internal state, i.e. no waiters in the
1192
	 * kernel. Waiters which are about to queue themselves are stuck
1223
	 * kernel. Waiters which are about to queue themselves are stuck
(-)a/kernel/futex/requeue.c (-2 / +4 lines)
Lines 850-860 int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, Link Here
850
		pi_mutex = &q.pi_state->pi_mutex;
850
		pi_mutex = &q.pi_state->pi_mutex;
851
		ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
851
		ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
852
852
853
		/* Current is not longer pi_blocked_on */
853
		/*
854
		spin_lock(q.lock_ptr);
854
		 * See futex_unlock_pi()'s cleanup: comment.
855
		 */
855
		if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
856
		if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
856
			ret = 0;
857
			ret = 0;
857
858
859
		spin_lock(q.lock_ptr);
858
		debug_rt_mutex_free_waiter(&rt_waiter);
860
		debug_rt_mutex_free_waiter(&rt_waiter);
859
		/*
861
		/*
860
		 * Fixup the pi_state owner and possibly acquire the lock if we
862
		 * Fixup the pi_state owner and possibly acquire the lock if we
(-)a/kernel/ksysfs.c (+12 lines)
Lines 179-184 KERNEL_ATTR_RO(crash_elfcorehdr_size); Link Here
179
179
180
#endif /* CONFIG_CRASH_CORE */
180
#endif /* CONFIG_CRASH_CORE */
181
181
182
#if defined(CONFIG_PREEMPT_RT)
183
static ssize_t realtime_show(struct kobject *kobj,
184
			     struct kobj_attribute *attr, char *buf)
185
{
186
	return sprintf(buf, "%d\n", 1);
187
}
188
KERNEL_ATTR_RO(realtime);
189
#endif
190
182
/* whether file capabilities are enabled */
191
/* whether file capabilities are enabled */
183
static ssize_t fscaps_show(struct kobject *kobj,
192
static ssize_t fscaps_show(struct kobject *kobj,
184
				  struct kobj_attribute *attr, char *buf)
193
				  struct kobj_attribute *attr, char *buf)
Lines 274-279 static struct attribute * kernel_attrs[] = { Link Here
274
#ifndef CONFIG_TINY_RCU
283
#ifndef CONFIG_TINY_RCU
275
	&rcu_expedited_attr.attr,
284
	&rcu_expedited_attr.attr,
276
	&rcu_normal_attr.attr,
285
	&rcu_normal_attr.attr,
286
#endif
287
#ifdef CONFIG_PREEMPT_RT
288
	&realtime_attr.attr,
277
#endif
289
#endif
278
	NULL
290
	NULL
279
};
291
};
(-)a/kernel/locking/lockdep.c (+7 lines)
Lines 56-61 Link Here
56
#include <linux/kprobes.h>
56
#include <linux/kprobes.h>
57
#include <linux/lockdep.h>
57
#include <linux/lockdep.h>
58
#include <linux/context_tracking.h>
58
#include <linux/context_tracking.h>
59
#include <linux/console.h>
59
60
60
#include <asm/sections.h>
61
#include <asm/sections.h>
61
62
Lines 3967-3975 static void Link Here
3967
print_usage_bug(struct task_struct *curr, struct held_lock *this,
3968
print_usage_bug(struct task_struct *curr, struct held_lock *this,
3968
		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
3969
		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
3969
{
3970
{
3971
	enum nbcon_prio prev_prio;
3972
3970
	if (!debug_locks_off() || debug_locks_silent)
3973
	if (!debug_locks_off() || debug_locks_silent)
3971
		return;
3974
		return;
3972
3975
3976
	prev_prio = nbcon_atomic_enter(NBCON_PRIO_EMERGENCY);
3977
3973
	pr_warn("\n");
3978
	pr_warn("\n");
3974
	pr_warn("================================\n");
3979
	pr_warn("================================\n");
3975
	pr_warn("WARNING: inconsistent lock state\n");
3980
	pr_warn("WARNING: inconsistent lock state\n");
Lines 3998-4003 print_usage_bug(struct task_struct *curr, struct held_lock *this, Link Here
3998
4003
3999
	pr_warn("\nstack backtrace:\n");
4004
	pr_warn("\nstack backtrace:\n");
4000
	dump_stack();
4005
	dump_stack();
4006
4007
	nbcon_atomic_exit(NBCON_PRIO_EMERGENCY, prev_prio);
4001
}
4008
}
4002
4009
4003
/*
4010
/*
(-)a/kernel/locking/rtmutex.c (-3 / +34 lines)
Lines 218-223 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, Link Here
218
	return try_cmpxchg_acquire(&lock->owner, &old, new);
218
	return try_cmpxchg_acquire(&lock->owner, &old, new);
219
}
219
}
220
220
221
static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
222
{
223
	return rt_mutex_cmpxchg_acquire(lock, NULL, current);
224
}
225
221
static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
226
static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
222
						     struct task_struct *old,
227
						     struct task_struct *old,
223
						     struct task_struct *new)
228
						     struct task_struct *new)
Lines 297-302 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, Link Here
297
302
298
}
303
}
299
304
305
static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock);
306
307
static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
308
{
309
	/*
310
	 * With debug enabled rt_mutex_cmpxchg trylock() will always fail.
311
	 *
312
	 * Avoid unconditionally taking the slow path by using
313
	 * rt_mutex_slow_trylock() which is covered by the debug code and can
314
	 * acquire a non-contended rtmutex.
315
	 */
316
	return rt_mutex_slowtrylock(lock);
317
}
318
300
static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
319
static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
301
						     struct task_struct *old,
320
						     struct task_struct *old,
302
						     struct task_struct *new)
321
						     struct task_struct *new)
Lines 1613-1619 static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, Link Here
1613
		raw_spin_unlock_irq(&lock->wait_lock);
1632
		raw_spin_unlock_irq(&lock->wait_lock);
1614
1633
1615
		if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
1634
		if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
1616
			schedule();
1635
			rt_mutex_schedule();
1617
1636
1618
		raw_spin_lock_irq(&lock->wait_lock);
1637
		raw_spin_lock_irq(&lock->wait_lock);
1619
		set_current_state(state);
1638
		set_current_state(state);
Lines 1642-1648 static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, Link Here
1642
	WARN(1, "rtmutex deadlock detected\n");
1661
	WARN(1, "rtmutex deadlock detected\n");
1643
	while (1) {
1662
	while (1) {
1644
		set_current_state(TASK_INTERRUPTIBLE);
1663
		set_current_state(TASK_INTERRUPTIBLE);
1645
		schedule();
1664
		rt_mutex_schedule();
1646
	}
1665
	}
1647
}
1666
}
1648
1667
Lines 1737-1742 static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, Link Here
1737
	unsigned long flags;
1756
	unsigned long flags;
1738
	int ret;
1757
	int ret;
1739
1758
1759
	/*
1760
	 * Do all pre-schedule work here, before we queue a waiter and invoke
1761
	 * PI -- any such work that trips on rtlock (PREEMPT_RT spinlock) would
1762
	 * otherwise recurse back into task_blocks_on_rt_mutex() through
1763
	 * rtlock_slowlock() and will then enqueue a second waiter for this
1764
	 * same task and things get really confusing real fast.
1765
	 */
1766
	rt_mutex_pre_schedule();
1767
1740
	/*
1768
	/*
1741
	 * Technically we could use raw_spin_[un]lock_irq() here, but this can
1769
	 * Technically we could use raw_spin_[un]lock_irq() here, but this can
1742
	 * be called in early boot if the cmpxchg() fast path is disabled
1770
	 * be called in early boot if the cmpxchg() fast path is disabled
Lines 1748-1753 static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, Link Here
1748
	raw_spin_lock_irqsave(&lock->wait_lock, flags);
1776
	raw_spin_lock_irqsave(&lock->wait_lock, flags);
1749
	ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
1777
	ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
1750
	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1778
	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1779
	rt_mutex_post_schedule();
1751
1780
1752
	return ret;
1781
	return ret;
1753
}
1782
}
Lines 1755-1761 static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, Link Here
1755
static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
1784
static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
1756
					   unsigned int state)
1785
					   unsigned int state)
1757
{
1786
{
1758
	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1787
	lockdep_assert(!current->pi_blocked_on);
1788
1789
	if (likely(rt_mutex_try_acquire(lock)))
1759
		return 0;
1790
		return 0;
1760
1791
1761
	return rt_mutex_slowlock(lock, NULL, state);
1792
	return rt_mutex_slowlock(lock, NULL, state);
(-)a/kernel/locking/rwbase_rt.c (+8 lines)
Lines 71-76 static int __sched __rwbase_read_lock(struct rwbase_rt *rwb, Link Here
71
	struct rt_mutex_base *rtm = &rwb->rtmutex;
71
	struct rt_mutex_base *rtm = &rwb->rtmutex;
72
	int ret;
72
	int ret;
73
73
74
	rwbase_pre_schedule();
74
	raw_spin_lock_irq(&rtm->wait_lock);
75
	raw_spin_lock_irq(&rtm->wait_lock);
75
76
76
	/*
77
	/*
Lines 125-136 static int __sched __rwbase_read_lock(struct rwbase_rt *rwb, Link Here
125
		rwbase_rtmutex_unlock(rtm);
126
		rwbase_rtmutex_unlock(rtm);
126
127
127
	trace_contention_end(rwb, ret);
128
	trace_contention_end(rwb, ret);
129
	rwbase_post_schedule();
128
	return ret;
130
	return ret;
129
}
131
}
130
132
131
static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
133
static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
132
					    unsigned int state)
134
					    unsigned int state)
133
{
135
{
136
	lockdep_assert(!current->pi_blocked_on);
137
134
	if (rwbase_read_trylock(rwb))
138
	if (rwbase_read_trylock(rwb))
135
		return 0;
139
		return 0;
136
140
Lines 237-242 static int __sched rwbase_write_lock(struct rwbase_rt *rwb, Link Here
237
	/* Force readers into slow path */
241
	/* Force readers into slow path */
238
	atomic_sub(READER_BIAS, &rwb->readers);
242
	atomic_sub(READER_BIAS, &rwb->readers);
239
243
244
	rwbase_pre_schedule();
245
240
	raw_spin_lock_irqsave(&rtm->wait_lock, flags);
246
	raw_spin_lock_irqsave(&rtm->wait_lock, flags);
241
	if (__rwbase_write_trylock(rwb))
247
	if (__rwbase_write_trylock(rwb))
242
		goto out_unlock;
248
		goto out_unlock;
Lines 248-253 static int __sched rwbase_write_lock(struct rwbase_rt *rwb, Link Here
248
		if (rwbase_signal_pending_state(state, current)) {
254
		if (rwbase_signal_pending_state(state, current)) {
249
			rwbase_restore_current_state();
255
			rwbase_restore_current_state();
250
			__rwbase_write_unlock(rwb, 0, flags);
256
			__rwbase_write_unlock(rwb, 0, flags);
257
			rwbase_post_schedule();
251
			trace_contention_end(rwb, -EINTR);
258
			trace_contention_end(rwb, -EINTR);
252
			return -EINTR;
259
			return -EINTR;
253
		}
260
		}
Lines 266-271 static int __sched rwbase_write_lock(struct rwbase_rt *rwb, Link Here
266
273
267
out_unlock:
274
out_unlock:
268
	raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
275
	raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
276
	rwbase_post_schedule();
269
	return 0;
277
	return 0;
270
}
278
}
271
279
(-)a/kernel/locking/rwsem.c (-1 / +7 lines)
Lines 1427-1434 static inline void __downgrade_write(struct rw_semaphore *sem) Link Here
1427
#define rwbase_signal_pending_state(state, current)	\
1427
#define rwbase_signal_pending_state(state, current)	\
1428
	signal_pending_state(state, current)
1428
	signal_pending_state(state, current)
1429
1429
1430
#define rwbase_pre_schedule()				\
1431
	rt_mutex_pre_schedule()
1432
1430
#define rwbase_schedule()				\
1433
#define rwbase_schedule()				\
1431
	schedule()
1434
	rt_mutex_schedule()
1435
1436
#define rwbase_post_schedule()				\
1437
	rt_mutex_post_schedule()
1432
1438
1433
#include "rwbase_rt.c"
1439
#include "rwbase_rt.c"
1434
1440
(-)a/kernel/locking/spinlock_rt.c (+6 lines)
Lines 37-42 Link Here
37
37
38
static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
38
static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
39
{
39
{
40
	lockdep_assert(!current->pi_blocked_on);
41
40
	if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
42
	if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
41
		rtlock_slowlock(rtm);
43
		rtlock_slowlock(rtm);
42
}
44
}
Lines 184-192 static __always_inline int rwbase_rtmutex_trylock(struct rt_mutex_base *rtm) Link Here
184
186
185
#define rwbase_signal_pending_state(state, current)	(0)
187
#define rwbase_signal_pending_state(state, current)	(0)
186
188
189
#define rwbase_pre_schedule()
190
187
#define rwbase_schedule()				\
191
#define rwbase_schedule()				\
188
	schedule_rtlock()
192
	schedule_rtlock()
189
193
194
#define rwbase_post_schedule()
195
190
#include "rwbase_rt.c"
196
#include "rwbase_rt.c"
191
/*
197
/*
192
 * The common functions which get wrapped into the rwlock API.
198
 * The common functions which get wrapped into the rwlock API.
(-)a/kernel/locking/ww_rt_mutex.c (-1 / +1 lines)
Lines 62-68 __ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx, Link Here
62
	}
62
	}
63
	mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
63
	mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
64
64
65
	if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
65
	if (likely(rt_mutex_try_acquire(&rtm->rtmutex))) {
66
		if (ww_ctx)
66
		if (ww_ctx)
67
			ww_mutex_set_context_fastpath(lock, ww_ctx);
67
			ww_mutex_set_context_fastpath(lock, ww_ctx);
68
		return 0;
68
		return 0;
(-)a/kernel/panic.c (+73 lines)
Lines 275-280 static void panic_other_cpus_shutdown(bool crash_kexec) Link Here
275
 */
275
 */
276
void panic(const char *fmt, ...)
276
void panic(const char *fmt, ...)
277
{
277
{
278
	enum nbcon_prio prev_prio;
278
	static char buf[1024];
279
	static char buf[1024];
279
	va_list args;
280
	va_list args;
280
	long i, i_next = 0, len;
281
	long i, i_next = 0, len;
Lines 322-327 void panic(const char *fmt, ...) Link Here
322
	if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
323
	if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
323
		panic_smp_self_stop();
324
		panic_smp_self_stop();
324
325
326
	prev_prio = nbcon_atomic_enter(NBCON_PRIO_PANIC);
327
325
	console_verbose();
328
	console_verbose();
326
	bust_spinlocks(1);
329
	bust_spinlocks(1);
327
	va_start(args, fmt);
330
	va_start(args, fmt);
Lines 382-387 void panic(const char *fmt, ...) Link Here
382
	if (_crash_kexec_post_notifiers)
385
	if (_crash_kexec_post_notifiers)
383
		__crash_kexec(NULL);
386
		__crash_kexec(NULL);
384
387
388
	nbcon_atomic_flush_all();
389
385
	console_unblank();
390
	console_unblank();
386
391
387
	/*
392
	/*
Lines 406-411 void panic(const char *fmt, ...) Link Here
406
		 * We can't use the "normal" timers since we just panicked.
411
		 * We can't use the "normal" timers since we just panicked.
407
		 */
412
		 */
408
		pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
413
		pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
414
		nbcon_atomic_flush_all();
409
415
410
		for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
416
		for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
411
			touch_nmi_watchdog();
417
			touch_nmi_watchdog();
Lines 424-429 void panic(const char *fmt, ...) Link Here
424
		 */
430
		 */
425
		if (panic_reboot_mode != REBOOT_UNDEFINED)
431
		if (panic_reboot_mode != REBOOT_UNDEFINED)
426
			reboot_mode = panic_reboot_mode;
432
			reboot_mode = panic_reboot_mode;
433
		nbcon_atomic_flush_all();
427
		emergency_restart();
434
		emergency_restart();
428
	}
435
	}
429
#ifdef __sparc__
436
#ifdef __sparc__
Lines 436-447 void panic(const char *fmt, ...) Link Here
436
	}
443
	}
437
#endif
444
#endif
438
#if defined(CONFIG_S390)
445
#if defined(CONFIG_S390)
446
	nbcon_atomic_flush_all();
439
	disabled_wait();
447
	disabled_wait();
440
#endif
448
#endif
441
	pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf);
449
	pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf);
442
450
443
	/* Do not scroll important messages printed above */
451
	/* Do not scroll important messages printed above */
444
	suppress_printk = 1;
452
	suppress_printk = 1;
453
454
	nbcon_atomic_exit(NBCON_PRIO_PANIC, prev_prio);
455
445
	local_irq_enable();
456
	local_irq_enable();
446
	for (i = 0; ; i += PANIC_TIMER_STEP) {
457
	for (i = 0; ; i += PANIC_TIMER_STEP) {
447
		touch_softlockup_watchdog();
458
		touch_softlockup_watchdog();
Lines 603-608 bool oops_may_print(void) Link Here
603
	return pause_on_oops_flag == 0;
614
	return pause_on_oops_flag == 0;
604
}
615
}
605
616
617
static int oops_printing_cpu = -1;
618
static int oops_nesting;
619
static enum nbcon_prio oops_prev_prio;
620
606
/*
621
/*
607
 * Called when the architecture enters its oops handler, before it prints
622
 * Called when the architecture enters its oops handler, before it prints
608
 * anything.  If this is the first CPU to oops, and it's oopsing the first
623
 * anything.  If this is the first CPU to oops, and it's oopsing the first
Lines 619-624 bool oops_may_print(void) Link Here
619
 */
634
 */
620
void oops_enter(void)
635
void oops_enter(void)
621
{
636
{
637
	enum nbcon_prio prev_prio;
638
	int cur_cpu = get_cpu();
639
	int old_cpu = -1;
640
641
	/*
642
	 * If this turns out to be the first CPU in oops, this is the
643
	 * beginning of the outermost atomic printing section. Otherwise
644
	 * it is the beginning of an inner atomic printing section.
645
	 */
646
	prev_prio = nbcon_atomic_enter(NBCON_PRIO_EMERGENCY);
647
648
	old_cpu = cmpxchg(&oops_printing_cpu, old_cpu, cur_cpu);
649
	if (old_cpu == -1) {
650
		/*
651
		 * This is the first CPU in oops so it will be the printer.
652
		 * Save the outermost @prev_prio in order to restore it on the
653
		 * outermost matching oops_exit(), when @oops_nesting == 0.
654
		 */
655
		oops_prev_prio = prev_prio;
656
657
		/*
658
		 * Enter an inner atomic printing section that ends at the end
659
		 * of this function. In this case, the nbcon_atomic_enter()
660
		 * above began the outermost atomic printing section.
661
		 */
662
		prev_prio = nbcon_atomic_enter(NBCON_PRIO_EMERGENCY);
663
	}
664
665
	/* Track nesting when this CPU is the printer. */
666
	if (old_cpu == -1 || old_cpu == cur_cpu)
667
		oops_nesting++;
668
622
	tracing_off();
669
	tracing_off();
623
	/* can't trust the integrity of the kernel anymore: */
670
	/* can't trust the integrity of the kernel anymore: */
624
	debug_locks_off();
671
	debug_locks_off();
Lines 626-631 void oops_enter(void) Link Here
626
673
627
	if (sysctl_oops_all_cpu_backtrace)
674
	if (sysctl_oops_all_cpu_backtrace)
628
		trigger_all_cpu_backtrace();
675
		trigger_all_cpu_backtrace();
676
677
	/* Exit inner atomic printing section. */
678
	nbcon_atomic_exit(NBCON_PRIO_EMERGENCY, prev_prio);
629
}
679
}
630
680
631
static void print_oops_end_marker(void)
681
static void print_oops_end_marker(void)
Lines 641-646 void oops_exit(void) Link Here
641
{
691
{
642
	do_oops_enter_exit();
692
	do_oops_enter_exit();
643
	print_oops_end_marker();
693
	print_oops_end_marker();
694
695
	/*
696
	 * Reading @oops_printing_cpu is a data race if this CPU is not the
697
	 * printer. But that is OK because in that situation the condition
698
	 * will correctly evaluate to false regardless which value was read.
699
	 */
700
	if (oops_printing_cpu == smp_processor_id()) {
701
		oops_nesting--;
702
		if (oops_nesting == 0) {
703
			oops_printing_cpu = -1;
704
705
			/* Exit outermost atomic printing section. */
706
			nbcon_atomic_exit(NBCON_PRIO_EMERGENCY, oops_prev_prio);
707
		}
708
	}
709
	put_cpu();
710
644
	kmsg_dump(KMSG_DUMP_OOPS);
711
	kmsg_dump(KMSG_DUMP_OOPS);
645
}
712
}
646
713
Lines 652-657 struct warn_args { Link Here
652
void __warn(const char *file, int line, void *caller, unsigned taint,
719
void __warn(const char *file, int line, void *caller, unsigned taint,
653
	    struct pt_regs *regs, struct warn_args *args)
720
	    struct pt_regs *regs, struct warn_args *args)
654
{
721
{
722
	enum nbcon_prio prev_prio;
723
724
	prev_prio = nbcon_atomic_enter(NBCON_PRIO_EMERGENCY);
725
655
	disable_trace_on_warning();
726
	disable_trace_on_warning();
656
727
657
	if (file)
728
	if (file)
Lines 682-687 void __warn(const char *file, int line, void *caller, unsigned taint, Link Here
682
753
683
	/* Just a warning, don't kill lockdep. */
754
	/* Just a warning, don't kill lockdep. */
684
	add_taint(taint, LOCKDEP_STILL_OK);
755
	add_taint(taint, LOCKDEP_STILL_OK);
756
757
	nbcon_atomic_exit(NBCON_PRIO_EMERGENCY, prev_prio);
685
}
758
}
686
759
687
#ifdef CONFIG_BUG
760
#ifdef CONFIG_BUG
(-)a/kernel/printk/Makefile (-1 / +1 lines)
Lines 1-6 Link Here
1
# SPDX-License-Identifier: GPL-2.0-only
1
# SPDX-License-Identifier: GPL-2.0-only
2
obj-y	= printk.o
2
obj-y	= printk.o
3
obj-$(CONFIG_PRINTK)	+= printk_safe.o
3
obj-$(CONFIG_PRINTK)	+= printk_safe.o nbcon.o
4
obj-$(CONFIG_A11Y_BRAILLE_CONSOLE)	+= braille.o
4
obj-$(CONFIG_A11Y_BRAILLE_CONSOLE)	+= braille.o
5
obj-$(CONFIG_PRINTK_INDEX)	+= index.o
5
obj-$(CONFIG_PRINTK_INDEX)	+= index.o
6
6
(-)a/kernel/printk/internal.h (+112 lines)
Lines 3-8 Link Here
3
 * internal.h - printk internal definitions
3
 * internal.h - printk internal definitions
4
 */
4
 */
5
#include <linux/percpu.h>
5
#include <linux/percpu.h>
6
#include <linux/console.h>
7
#include "printk_ringbuffer.h"
6
8
7
#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
9
#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
8
void __init printk_sysctl_init(void);
10
void __init printk_sysctl_init(void);
Lines 12-17 int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, Link Here
12
#define printk_sysctl_init() do { } while (0)
14
#define printk_sysctl_init() do { } while (0)
13
#endif
15
#endif
14
16
17
#define con_printk(lvl, con, fmt, ...)				\
18
	printk(lvl pr_fmt("%s%sconsole [%s%d] " fmt),		\
19
		(con->flags & CON_NBCON) ? "" : "legacy ",	\
20
		(con->flags & CON_BOOT) ? "boot" : "",		\
21
		con->name, con->index, ##__VA_ARGS__)
22
15
#ifdef CONFIG_PRINTK
23
#ifdef CONFIG_PRINTK
16
24
17
#ifdef CONFIG_PRINTK_CALLER
25
#ifdef CONFIG_PRINTK_CALLER
Lines 35-40 enum printk_info_flags { Link Here
35
	LOG_CONT	= 8,	/* text is a fragment of a continuation line */
43
	LOG_CONT	= 8,	/* text is a fragment of a continuation line */
36
};
44
};
37
45
46
extern struct printk_ringbuffer *prb;
47
extern bool printk_threads_enabled;
48
extern bool have_legacy_console;
49
extern bool have_boot_console;
50
51
/*
52
 * Specifies if the console lock/unlock dance is needed for console
53
 * printing. If @have_boot_console is true, the nbcon consoles will
54
 * be printed serially along with the legacy consoles because nbcon
55
 * consoles cannot print simultaneously with boot consoles.
56
 */
57
#define serialized_printing (have_legacy_console || have_boot_console)
58
38
__printf(4, 0)
59
__printf(4, 0)
39
int vprintk_store(int facility, int level,
60
int vprintk_store(int facility, int level,
40
		  const struct dev_printk_info *dev_info,
61
		  const struct dev_printk_info *dev_info,
Lines 61-72 void defer_console_output(void); Link Here
61
82
62
u16 printk_parse_prefix(const char *text, int *level,
83
u16 printk_parse_prefix(const char *text, int *level,
63
			enum printk_info_flags *flags);
84
			enum printk_info_flags *flags);
85
86
u64 nbcon_seq_read(struct console *con);
87
void nbcon_seq_force(struct console *con, u64 seq);
88
bool nbcon_alloc(struct console *con);
89
void nbcon_init(struct console *con);
90
void nbcon_free(struct console *con);
91
bool nbcon_console_emit_next_record(struct console *con);
92
void nbcon_kthread_create(struct console *con);
93
void nbcon_wake_threads(void);
94
void nbcon_legacy_kthread_create(void);
95
96
/*
97
 * Check if the given console is currently capable and allowed to print
98
 * records. Note that this function does not consider the current context,
99
 * which can also play a role in deciding if @con can be used to print
100
 * records.
101
 */
102
static inline bool console_is_usable(struct console *con, short flags, bool use_atomic)
103
{
104
	if (!(flags & CON_ENABLED))
105
		return false;
106
107
	if ((flags & CON_SUSPENDED))
108
		return false;
109
110
	if (flags & CON_NBCON) {
111
		if (use_atomic) {
112
			if (!con->write_atomic)
113
				return false;
114
		} else {
115
			if (!con->write_thread || !con->kthread)
116
				return false;
117
		}
118
	} else {
119
		if (!con->write)
120
			return false;
121
	}
122
123
	/*
124
	 * Console drivers may assume that per-cpu resources have been
125
	 * allocated. So unless they're explicitly marked as being able to
126
	 * cope (CON_ANYTIME) don't call them until this CPU is officially up.
127
	 */
128
	if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
129
		return false;
130
131
	return true;
132
}
133
134
/**
135
 * nbcon_kthread_wake - Wake up a printk thread
136
 * @con:        Console to operate on
137
 */
138
static inline void nbcon_kthread_wake(struct console *con)
139
{
140
	/*
141
	 * Guarantee any new records can be seen by tasks preparing to wait
142
	 * before this context checks if the rcuwait is empty.
143
	 *
144
	 * The full memory barrier in rcuwait_wake_up()  pairs with the full
145
	 * memory barrier within set_current_state() of
146
	 * ___rcuwait_wait_event(), which is called after prepare_to_rcuwait()
147
	 * adds the waiter but before it has checked the wait condition.
148
	 *
149
	 * This pairs with nbcon_kthread_func:A.
150
	 */
151
	rcuwait_wake_up(&con->rcuwait); /* LMM(nbcon_kthread_wake:A) */
152
}
153
64
#else
154
#else
65
155
66
#define PRINTK_PREFIX_MAX	0
156
#define PRINTK_PREFIX_MAX	0
67
#define PRINTK_MESSAGE_MAX	0
157
#define PRINTK_MESSAGE_MAX	0
68
#define PRINTKRB_RECORD_MAX	0
158
#define PRINTKRB_RECORD_MAX	0
69
159
160
static inline void nbcon_kthread_wake(struct console *con) { }
161
static inline void nbcon_kthread_create(struct console *con) { }
162
#define printk_threads_enabled (false)
163
#define serialized_printing (false)
164
70
/*
165
/*
71
 * In !PRINTK builds we still export console_sem
166
 * In !PRINTK builds we still export console_sem
72
 * semaphore and some of console functions (console_unlock()/etc.), so
167
 * semaphore and some of console functions (console_unlock()/etc.), so
Lines 76-83 u16 printk_parse_prefix(const char *text, int *level, Link Here
76
#define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
171
#define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
77
172
78
static inline bool printk_percpu_data_ready(void) { return false; }
173
static inline bool printk_percpu_data_ready(void) { return false; }
174
static inline u64 nbcon_seq_read(struct console *con) { return 0; }
175
static inline void nbcon_seq_force(struct console *con, u64 seq) { }
176
static inline bool nbcon_alloc(struct console *con) { return false; }
177
static inline void nbcon_init(struct console *con) { }
178
static inline void nbcon_free(struct console *con) { }
179
static bool nbcon_console_emit_next_record(struct console *con) { return false; }
180
181
static inline bool console_is_usable(struct console *con, short flags, bool use_atomic) { return false; }
182
79
#endif /* CONFIG_PRINTK */
183
#endif /* CONFIG_PRINTK */
80
184
185
extern struct printk_buffers printk_shared_pbufs;
186
81
/**
187
/**
82
 * struct printk_buffers - Buffers to read/format/output printk messages.
188
 * struct printk_buffers - Buffers to read/format/output printk messages.
83
 * @outbuf:	After formatting, contains text to output.
189
 * @outbuf:	After formatting, contains text to output.
Lines 105-107 struct printk_message { Link Here
105
};
211
};
106
212
107
bool other_cpu_in_panic(void);
213
bool other_cpu_in_panic(void);
214
bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
215
			     bool is_extended, bool may_supress);
216
217
#ifdef CONFIG_PRINTK
218
void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped);
219
#endif
(-)a/kernel/printk/nbcon.c (+1677 lines)
Line 0 Link Here
1
// SPDX-License-Identifier: GPL-2.0-only
2
// Copyright (C) 2022 Linutronix GmbH, John Ogness
3
// Copyright (C) 2022 Intel, Thomas Gleixner
4
5
#include <linux/kernel.h>
6
#include <linux/console.h>
7
#include <linux/delay.h>
8
#include <linux/kthread.h>
9
#include <linux/slab.h>
10
#include <linux/serial_core.h>
11
#include <linux/syscore_ops.h>
12
#include "printk_ringbuffer.h"
13
#include "internal.h"
14
/*
15
 * Printk console printing implementation for consoles which does not depend
16
 * on the legacy style console_lock mechanism.
17
 *
18
 * The state of the console is maintained in the "nbcon_state" atomic
19
 * variable.
20
 *
21
 * The console is locked when:
22
 *
23
 *   - The 'prio' field contains the priority of the context that owns the
24
 *     console. Only higher priority contexts are allowed to take over the
25
 *     lock. A value of 0 (NBCON_PRIO_NONE) means the console is not locked.
26
 *
27
 *   - The 'cpu' field denotes on which CPU the console is locked. It is used
28
 *     to prevent busy waiting on the same CPU. Also it informs the lock owner
29
 *     that it has lost the lock in a more complex scenario when the lock was
30
 *     taken over by a higher priority context, released, and taken on another
31
 *     CPU with the same priority as the interrupted owner.
32
 *
33
 * The acquire mechanism uses a few more fields:
34
 *
35
 *   - The 'req_prio' field is used by the handover approach to make the
36
 *     current owner aware that there is a context with a higher priority
37
 *     waiting for the friendly handover.
38
 *
39
 *   - The 'unsafe' field allows to take over the console in a safe way in the
40
 *     middle of emitting a message. The field is set only when accessing some
41
 *     shared resources or when the console device is manipulated. It can be
42
 *     cleared, for example, after emitting one character when the console
43
 *     device is in a consistent state.
44
 *
45
 *   - The 'unsafe_takeover' field is set when a hostile takeover took the
46
 *     console in an unsafe state. The console will stay in the unsafe state
47
 *     until re-initialized.
48
 *
49
 * The acquire mechanism uses three approaches:
50
 *
51
 *   1) Direct acquire when the console is not owned or is owned by a lower
52
 *      priority context and is in a safe state.
53
 *
54
 *   2) Friendly handover mechanism uses a request/grant handshake. It is used
55
 *      when the current owner has lower priority and the console is in an
56
 *      unsafe state.
57
 *
58
 *      The requesting context:
59
 *
60
 *        a) Sets its priority into the 'req_prio' field.
61
 *
62
 *        b) Waits (with a timeout) for the owning context to unlock the
63
 *           console.
64
 *
65
 *        c) Takes the lock and clears the 'req_prio' field.
66
 *
67
 *      The owning context:
68
 *
69
 *        a) Observes the 'req_prio' field set on exit from the unsafe
70
 *           console state.
71
 *
72
 *        b) Gives up console ownership by clearing the 'prio' field.
73
 *
74
 *   3) Unsafe hostile takeover allows to take over the lock even when the
75
 *      console is an unsafe state. It is used only in panic() by the final
76
 *      attempt to flush consoles in a try and hope mode.
77
 *
78
 *      Note that separate record buffers are used in panic(). As a result,
79
 *      the messages can be read and formatted without any risk even after
80
 *      using the hostile takeover in unsafe state.
81
 *
82
 * The release function simply clears the 'prio' field.
83
 *
84
 * All operations on @console::nbcon_state are atomic cmpxchg based to
85
 * handle concurrency.
86
 *
87
 * The acquire/release functions implement only minimal policies:
88
 *
89
 *   - Preference for higher priority contexts.
90
 *   - Protection of the panic CPU.
91
 *
92
 * All other policy decisions must be made at the call sites:
93
 *
94
 *   - What is marked as an unsafe section.
95
 *   - Whether to spin-wait if there is already an owner and the console is
96
 *     in an unsafe state.
97
 *   - Whether to attempt an unsafe hostile takeover.
98
 *
99
 * The design allows to implement the well known:
100
 *
101
 *     acquire()
102
 *     output_one_printk_record()
103
 *     release()
104
 *
105
 * The output of one printk record might be interrupted with a higher priority
106
 * context. The new owner is supposed to reprint the entire interrupted record
107
 * from scratch.
108
 */
109
110
/**
111
 * nbcon_state_set - Helper function to set the console state
112
 * @con:	Console to update
113
 * @new:	The new state to write
114
 *
115
 * Only to be used when the console is not yet or no longer visible in the
116
 * system. Otherwise use nbcon_state_try_cmpxchg().
117
 */
118
static inline void nbcon_state_set(struct console *con, struct nbcon_state *new)
119
{
120
	atomic_set(&ACCESS_PRIVATE(con, nbcon_state), new->atom);
121
}
122
123
/**
124
 * nbcon_state_read - Helper function to read the console state
125
 * @con:	Console to read
126
 * @state:	The state to store the result
127
 */
128
static inline void nbcon_state_read(struct console *con, struct nbcon_state *state)
129
{
130
	state->atom = atomic_read(&ACCESS_PRIVATE(con, nbcon_state));
131
}
132
133
/**
134
 * nbcon_state_try_cmpxchg() - Helper function for atomic_try_cmpxchg() on console state
135
 * @con:	Console to update
136
 * @cur:	Old/expected state
137
 * @new:	New state
138
 *
139
 * Return: True on success. False on fail and @cur is updated.
140
 */
141
static inline bool nbcon_state_try_cmpxchg(struct console *con, struct nbcon_state *cur,
142
					   struct nbcon_state *new)
143
{
144
	return atomic_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_state), &cur->atom, new->atom);
145
}
146
147
#ifdef CONFIG_64BIT
148
149
#define __seq_to_nbcon_seq(seq) (seq)
150
#define __nbcon_seq_to_seq(seq) (seq)
151
152
#else /* CONFIG_64BIT */
153
154
#define __seq_to_nbcon_seq(seq) ((u32)seq)
155
156
static inline u64 __nbcon_seq_to_seq(u32 nbcon_seq)
157
{
158
	u64 seq;
159
	u64 rb_next_seq;
160
161
	/*
162
	 * The provided sequence is only the lower 32 bits of the ringbuffer
163
	 * sequence. It needs to be expanded to 64bit. Get the next sequence
164
	 * number from the ringbuffer and fold it.
165
	 *
166
	 * Having a 32bit representation in the console is sufficient.
167
	 * If a console ever gets more than 2^31 records behind
168
	 * the ringbuffer then this is the least of the problems.
169
	 *
170
	 * Also the access to the ring buffer is always safe.
171
	 */
172
	rb_next_seq = prb_next_seq(prb);
173
	seq = rb_next_seq - ((u32)rb_next_seq - nbcon_seq);
174
175
	return seq;
176
}
177
178
#endif /* CONFIG_64BIT */
179
180
/**
181
 * nbcon_seq_read - Read the current console sequence
182
 * @con:	Console to read the sequence of
183
 *
184
 * Return:	Sequence number of the next record to print on @con.
185
 */
186
u64 nbcon_seq_read(struct console *con)
187
{
188
	unsigned long nbcon_seq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_seq));
189
190
	return __nbcon_seq_to_seq(nbcon_seq);
191
}
192
193
/**
194
 * nbcon_seq_force - Force console sequence to a specific value
195
 * @con:	Console to work on
196
 * @seq:	Sequence number value to set
197
 *
198
 * Only to be used during init (before registration) or in extreme situations
199
 * (such as panic with CONSOLE_REPLAY_ALL).
200
 */
201
void nbcon_seq_force(struct console *con, u64 seq)
202
{
203
	/*
204
	 * If the specified record no longer exists, the oldest available record
205
	 * is chosen. This is especially important on 32bit systems because only
206
	 * the lower 32 bits of the sequence number are stored. The upper 32 bits
207
	 * are derived from the sequence numbers available in the ringbuffer.
208
	 */
209
	u64 valid_seq = max_t(u64, seq, prb_first_valid_seq(prb));
210
211
	atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __seq_to_nbcon_seq(valid_seq));
212
213
	/* Clear con->seq since nbcon consoles use con->nbcon_seq instead. */
214
	con->seq = 0;
215
}
216
217
static void nbcon_context_seq_set(struct nbcon_context *ctxt)
218
{
219
	ctxt->seq = nbcon_seq_read(ctxt->console);
220
}
221
222
/**
223
 * nbcon_seq_try_update - Try to update the console sequence number
224
 * @ctxt:	Pointer to an acquire context that contains
225
 *		all information about the acquire mode
226
 * @new_seq:	The new sequence number to set
227
 *
228
 * @ctxt->seq is updated to the new value of @con::nbcon_seq (expanded to
229
 * the 64bit value). This could be a different value than @new_seq if
230
 * nbcon_seq_force() was used or the current context no longer owns the
231
 * console. In the later case, it will stop printing anyway.
232
 */
233
static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq)
234
{
235
	unsigned long nbcon_seq = __seq_to_nbcon_seq(ctxt->seq);
236
	struct console *con = ctxt->console;
237
238
	if (atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_seq), &nbcon_seq,
239
				    __seq_to_nbcon_seq(new_seq))) {
240
		ctxt->seq = new_seq;
241
	} else {
242
		ctxt->seq = nbcon_seq_read(con);
243
	}
244
}
245
246
bool printk_threads_enabled __ro_after_init;
247
248
/**
249
 * nbcon_context_try_acquire_direct - Try to acquire directly
250
 * @ctxt:	The context of the caller
251
 * @cur:	The current console state
252
 *
253
 * Acquire the console when it is released. Also acquire the console when
254
 * the current owner has a lower priority and the console is in a safe state.
255
 *
256
 * Return:	0 on success. Otherwise, an error code on failure. Also @cur
257
 *		is updated to the latest state when failed to modify it.
258
 *
259
 * Errors:
260
 *
261
 *	-EPERM:		A panic is in progress and this is not the panic CPU.
262
 *			Or the current owner or waiter has the same or higher
263
 *			priority. No acquire method can be successful in
264
 *			this case.
265
 *
266
 *	-EBUSY:		The current owner has a lower priority but the console
267
 *			in an unsafe state. The caller should try using
268
 *			the handover acquire method.
269
 */
270
static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt,
271
					    struct nbcon_state *cur)
272
{
273
	unsigned int cpu = smp_processor_id();
274
	struct console *con = ctxt->console;
275
	struct nbcon_state new;
276
277
	do {
278
		if (other_cpu_in_panic())
279
			return -EPERM;
280
281
		if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio)
282
			return -EPERM;
283
284
		if (cur->unsafe)
285
			return -EBUSY;
286
287
		/*
288
		 * The console should never be safe for a direct acquire
289
		 * if an unsafe hostile takeover has ever happened.
290
		 */
291
		WARN_ON_ONCE(cur->unsafe_takeover);
292
293
		new.atom = cur->atom;
294
		new.prio	= ctxt->prio;
295
		new.req_prio	= NBCON_PRIO_NONE;
296
		new.unsafe	= cur->unsafe_takeover;
297
		new.cpu		= cpu;
298
299
	} while (!nbcon_state_try_cmpxchg(con, cur, &new));
300
301
	return 0;
302
}
303
304
static bool nbcon_waiter_matches(struct nbcon_state *cur, int expected_prio)
305
{
306
	/*
307
	 * The request context is well defined by the @req_prio because:
308
	 *
309
	 * - Only a context with a higher priority can take over the request.
310
	 * - There are only three priorities.
311
	 * - Only one CPU is allowed to request PANIC priority.
312
	 * - Lower priorities are ignored during panic() until reboot.
313
	 *
314
	 * As a result, the following scenario is *not* possible:
315
	 *
316
	 * 1. Another context with a higher priority directly takes ownership.
317
	 * 2. The higher priority context releases the ownership.
318
	 * 3. A lower priority context takes the ownership.
319
	 * 4. Another context with the same priority as this context
320
	 *    creates a request and starts waiting.
321
	 */
322
323
	return (cur->req_prio == expected_prio);
324
}
325
326
/**
327
 * nbcon_context_try_acquire_requested - Try to acquire after having
328
 *					 requested a handover
329
 * @ctxt:	The context of the caller
330
 * @cur:	The current console state
331
 *
332
 * This is a helper function for nbcon_context_try_acquire_handover().
333
 * It is called when the console is in an unsafe state. The current
334
 * owner will release the console on exit from the unsafe region.
335
 *
336
 * Return:	0 on success and @cur is updated to the new console state.
337
 *		Otherwise an error code on failure.
338
 *
339
 * Errors:
340
 *
341
 *	-EPERM:		A panic is in progress and this is not the panic CPU
342
 *			or this context is no longer the waiter.
343
 *
344
 *	-EBUSY:		The console is still locked. The caller should
345
 *			continue waiting.
346
 *
347
 * Note: The caller must still remove the request when an error has occurred
348
 *       except when this context is no longer the waiter.
349
 */
350
static int nbcon_context_try_acquire_requested(struct nbcon_context *ctxt,
351
					       struct nbcon_state *cur)
352
{
353
	unsigned int cpu = smp_processor_id();
354
	struct console *con = ctxt->console;
355
	struct nbcon_state new;
356
357
	/* Note that the caller must still remove the request! */
358
	if (other_cpu_in_panic())
359
		return -EPERM;
360
361
	/*
362
	 * Note that the waiter will also change if there was an unsafe
363
	 * hostile takeover.
364
	 */
365
	if (!nbcon_waiter_matches(cur, ctxt->prio))
366
		return -EPERM;
367
368
	/* If still locked, caller should continue waiting. */
369
	if (cur->prio != NBCON_PRIO_NONE)
370
		return -EBUSY;
371
372
	/*
373
	 * The previous owner should have never released ownership
374
	 * in an unsafe region.
375
	 */
376
	WARN_ON_ONCE(cur->unsafe);
377
378
	new.atom = cur->atom;
379
	new.prio	= ctxt->prio;
380
	new.req_prio	= NBCON_PRIO_NONE;
381
	new.unsafe	= cur->unsafe_takeover;
382
	new.cpu		= cpu;
383
384
	if (!nbcon_state_try_cmpxchg(con, cur, &new)) {
385
		/*
386
		 * The acquire could fail only when it has been taken
387
		 * over by a higher priority context.
388
		 */
389
		WARN_ON_ONCE(nbcon_waiter_matches(cur, ctxt->prio));
390
		return -EPERM;
391
	}
392
393
	/* Handover success. This context now owns the console. */
394
	return 0;
395
}
396
397
/**
398
 * nbcon_context_try_acquire_handover - Try to acquire via handover
399
 * @ctxt:	The context of the caller
400
 * @cur:	The current console state
401
 *
402
 * The function must be called only when the context has higher priority
403
 * than the current owner and the console is in an unsafe state.
404
 * It is the case when nbcon_context_try_acquire_direct() returns -EBUSY.
405
 *
406
 * The function sets "req_prio" field to make the current owner aware of
407
 * the request. Then it waits until the current owner releases the console,
408
 * or an even higher context takes over the request, or timeout expires.
409
 *
410
 * The current owner checks the "req_prio" field on exit from the unsafe
411
 * region and releases the console. It does not touch the "req_prio" field
412
 * so that the console stays reserved for the waiter.
413
 *
414
 * Return:	0 on success. Otherwise, an error code on failure. Also @cur
415
 *		is updated to the latest state when failed to modify it.
416
 *
417
 * Errors:
418
 *
419
 *	-EPERM:		A panic is in progress and this is not the panic CPU.
420
 *			Or a higher priority context has taken over the
421
 *			console or the handover request.
422
 *
423
 *	-EBUSY:		The current owner is on the same CPU so that the hand
424
 *			shake could not work. Or the current owner is not
425
 *			willing to wait (zero timeout). Or the console does
426
 *			not enter the safe state before timeout passed. The
427
 *			caller might still use the unsafe hostile takeover
428
 *			when allowed.
429
 *
430
 *	-EAGAIN:	@cur has changed when creating the handover request.
431
 *			The caller should retry with direct acquire.
432
 */
433
static int nbcon_context_try_acquire_handover(struct nbcon_context *ctxt,
434
					      struct nbcon_state *cur)
435
{
436
	unsigned int cpu = smp_processor_id();
437
	struct console *con = ctxt->console;
438
	struct nbcon_state new;
439
	int timeout;
440
	int request_err = -EBUSY;
441
442
	/*
443
	 * Check that the handover is called when the direct acquire failed
444
	 * with -EBUSY.
445
	 */
446
	WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
447
	WARN_ON_ONCE(!cur->unsafe);
448
449
	/* Handover is not possible on the same CPU. */
450
	if (cur->cpu == cpu)
451
		return -EBUSY;
452
453
	/*
454
	 * Console stays unsafe after an unsafe takeover until re-initialized.
455
	 * Waiting is not going to help in this case.
456
	 */
457
	if (cur->unsafe_takeover)
458
		return -EBUSY;
459
460
	/* Is the caller willing to wait? */
461
	if (ctxt->spinwait_max_us == 0)
462
		return -EBUSY;
463
464
	/*
465
	 * Setup a request for the handover. The caller should try to acquire
466
	 * the console directly when the current state has been modified.
467
	 */
468
	new.atom = cur->atom;
469
	new.req_prio = ctxt->prio;
470
	if (!nbcon_state_try_cmpxchg(con, cur, &new))
471
		return -EAGAIN;
472
473
	cur->atom = new.atom;
474
475
	/* Wait until there is no owner and then acquire the console. */
476
	for (timeout = ctxt->spinwait_max_us; timeout >= 0; timeout--) {
477
		/* On successful acquire, this request is cleared. */
478
		request_err = nbcon_context_try_acquire_requested(ctxt, cur);
479
		if (!request_err)
480
			return 0;
481
482
		/*
483
		 * If the acquire should be aborted, it must be ensured
484
		 * that the request is removed before returning to caller.
485
		 */
486
		if (request_err == -EPERM)
487
			break;
488
489
		udelay(1);
490
491
		/* Re-read the state because some time has passed. */
492
		nbcon_state_read(con, cur);
493
	}
494
495
	/* Timed out or aborted. Carefully remove handover request. */
496
	do {
497
		/*
498
		 * No need to remove request if there is a new waiter. This
499
		 * can only happen if a higher priority context has taken over
500
		 * the console or the handover request.
501
		 */
502
		if (!nbcon_waiter_matches(cur, ctxt->prio))
503
			return -EPERM;
504
505
		/* Unset request for handover. */
506
		new.atom = cur->atom;
507
		new.req_prio = NBCON_PRIO_NONE;
508
		if (nbcon_state_try_cmpxchg(con, cur, &new)) {
509
			/*
510
			 * Request successfully unset. Report failure of
511
			 * acquiring via handover.
512
			 */
513
			cur->atom = new.atom;
514
			return request_err;
515
		}
516
517
		/*
518
		 * Unable to remove request. Try to acquire in case
519
		 * the owner has released the lock.
520
		 */
521
	} while (nbcon_context_try_acquire_requested(ctxt, cur));
522
523
	/* Lucky timing. The acquire succeeded while removing the request. */
524
	return 0;
525
}
526
527
/**
528
 * nbcon_context_try_acquire_hostile - Acquire via unsafe hostile takeover
529
 * @ctxt:	The context of the caller
530
 * @cur:	The current console state
531
 *
532
 * Acquire the console even in the unsafe state.
533
 *
534
 * It can be permitted by setting the 'allow_unsafe_takeover' field only
535
 * by the final attempt to flush messages in panic().
536
 *
537
 * Return:	0 on success. -EPERM when not allowed by the context.
538
 */
539
static int nbcon_context_try_acquire_hostile(struct nbcon_context *ctxt,
540
					     struct nbcon_state *cur)
541
{
542
	unsigned int cpu = smp_processor_id();
543
	struct console *con = ctxt->console;
544
	struct nbcon_state new;
545
546
	if (!ctxt->allow_unsafe_takeover)
547
		return -EPERM;
548
549
	/* Ensure caller is allowed to perform unsafe hostile takeovers. */
550
	if (WARN_ON_ONCE(ctxt->prio != NBCON_PRIO_PANIC))
551
		return -EPERM;
552
553
	/*
554
	 * Check that try_acquire_direct() and try_acquire_handover() returned
555
	 * -EBUSY in the right situation.
556
	 */
557
	WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
558
	WARN_ON_ONCE(cur->unsafe != true);
559
560
	do {
561
		new.atom = cur->atom;
562
		new.cpu			= cpu;
563
		new.prio		= ctxt->prio;
564
		new.unsafe		|= cur->unsafe_takeover;
565
		new.unsafe_takeover	|= cur->unsafe;
566
567
	} while (!nbcon_state_try_cmpxchg(con, cur, &new));
568
569
	return 0;
570
}
571
572
static struct printk_buffers panic_nbcon_pbufs;
573
574
/**
575
 * nbcon_context_try_acquire - Try to acquire nbcon console
576
 * @ctxt:	The context of the caller
577
 *
578
 * Return:	True if the console was acquired. False otherwise.
579
 *
580
 * If the caller allowed an unsafe hostile takeover, on success the
581
 * caller should check the current console state to see if it is
582
 * in an unsafe state. Otherwise, on success the caller may assume
583
 * the console is not in an unsafe state.
584
 */
585
static bool nbcon_context_try_acquire(struct nbcon_context *ctxt)
586
{
587
	unsigned int cpu = smp_processor_id();
588
	struct console *con = ctxt->console;
589
	struct nbcon_state cur;
590
	int err;
591
592
	nbcon_state_read(con, &cur);
593
try_again:
594
	err = nbcon_context_try_acquire_direct(ctxt, &cur);
595
	if (err != -EBUSY)
596
		goto out;
597
598
	err = nbcon_context_try_acquire_handover(ctxt, &cur);
599
	if (err == -EAGAIN)
600
		goto try_again;
601
	if (err != -EBUSY)
602
		goto out;
603
604
	err = nbcon_context_try_acquire_hostile(ctxt, &cur);
605
out:
606
	if (err)
607
		return false;
608
609
	/* Acquire succeeded. */
610
611
	/* Assign the appropriate buffer for this context. */
612
	if (atomic_read(&panic_cpu) == cpu)
613
		ctxt->pbufs = &panic_nbcon_pbufs;
614
	else
615
		ctxt->pbufs = con->pbufs;
616
617
	/* Set the record sequence for this context to print. */
618
	ctxt->seq = nbcon_seq_read(ctxt->console);
619
620
	return true;
621
}
622
623
static bool nbcon_owner_matches(struct nbcon_state *cur, int expected_cpu,
624
				int expected_prio)
625
{
626
	/*
627
	 * Since consoles can only be acquired by higher priorities,
628
	 * owning contexts are uniquely identified by @prio. However,
629
	 * since contexts can unexpectedly lose ownership, it is
630
	 * possible that later another owner appears with the same
631
	 * priority. For this reason @cpu is also needed.
632
	 */
633
634
	if (cur->prio != expected_prio)
635
		return false;
636
637
	if (cur->cpu != expected_cpu)
638
		return false;
639
640
	return true;
641
}
642
643
/**
644
 * nbcon_context_release - Release the console
645
 * @ctxt:	The nbcon context from nbcon_context_try_acquire()
646
 */
647
static void nbcon_context_release(struct nbcon_context *ctxt)
648
{
649
	unsigned int cpu = smp_processor_id();
650
	struct console *con = ctxt->console;
651
	struct nbcon_state cur;
652
	struct nbcon_state new;
653
654
	nbcon_state_read(con, &cur);
655
656
	do {
657
		if (!nbcon_owner_matches(&cur, cpu, ctxt->prio))
658
			break;
659
660
		new.atom = cur.atom;
661
		new.prio = NBCON_PRIO_NONE;
662
663
		/*
664
		 * If @unsafe_takeover is set, it is kept set so that
665
		 * the state remains permanently unsafe.
666
		 */
667
		new.unsafe |= cur.unsafe_takeover;
668
669
	} while (!nbcon_state_try_cmpxchg(con, &cur, &new));
670
671
	ctxt->pbufs = NULL;
672
}
673
674
/**
675
 * nbcon_context_can_proceed - Check whether ownership can proceed
676
 * @ctxt:	The nbcon context from nbcon_context_try_acquire()
677
 * @cur:	The current console state
678
 *
679
 * Return:	True if this context still owns the console. False if
680
 *		ownership was handed over or taken.
681
 *
682
 * Must be invoked when entering the unsafe state to make sure that it still
683
 * owns the lock. Also must be invoked when exiting the unsafe context
684
 * to eventually free the lock for a higher priority context which asked
685
 * for the friendly handover.
686
 *
687
 * It can be called inside an unsafe section when the console is just
688
 * temporary in safe state instead of exiting and entering the unsafe
689
 * state.
690
 *
691
 * Also it can be called in the safe context before doing an expensive
692
 * safe operation. It does not make sense to do the operation when
693
 * a higher priority context took the lock.
694
 *
695
 * When this function returns false then the calling context no longer owns
696
 * the console and is no longer allowed to go forward. In this case it must
697
 * back out immediately and carefully. The buffer content is also no longer
698
 * trusted since it no longer belongs to the calling context.
699
 */
700
static bool nbcon_context_can_proceed(struct nbcon_context *ctxt, struct nbcon_state *cur)
701
{
702
	unsigned int cpu = smp_processor_id();
703
704
	/* Make sure this context still owns the console. */
705
	if (!nbcon_owner_matches(cur, cpu, ctxt->prio))
706
		return false;
707
708
	/* The console owner can proceed if there is no waiter. */
709
	if (cur->req_prio == NBCON_PRIO_NONE)
710
		return true;
711
712
	/*
713
	 * A console owner within an unsafe region is always allowed to
714
	 * proceed, even if there are waiters. It can perform a handover
715
	 * when exiting the unsafe region. Otherwise the waiter will
716
	 * need to perform an unsafe hostile takeover.
717
	 */
718
	if (cur->unsafe)
719
		return true;
720
721
	/* Waiters always have higher priorities than owners. */
722
	WARN_ON_ONCE(cur->req_prio <= cur->prio);
723
724
	/*
725
	 * Having a safe point for take over and eventually a few
726
	 * duplicated characters or a full line is way better than a
727
	 * hostile takeover. Post processing can take care of the garbage.
728
	 * Release and hand over.
729
	 */
730
	nbcon_context_release(ctxt);
731
732
	/*
733
	 * It is not clear whether the waiter really took over ownership. The
734
	 * outermost callsite must make the final decision whether console
735
	 * ownership is needed for it to proceed. If yes, it must reacquire
736
	 * ownership (possibly hostile) before carefully proceeding.
737
	 *
738
	 * The calling context no longer owns the console so go back all the
739
	 * way instead of trying to implement reacquire heuristics in tons of
740
	 * places.
741
	 */
742
	return false;
743
}
744
745
/**
746
 * nbcon_can_proceed - Check whether ownership can proceed
747
 * @wctxt:	The write context that was handed to the write function
748
 *
749
 * Return:	True if this context still owns the console. False if
750
 *		ownership was handed over or taken.
751
 *
752
 * It is used in nbcon_enter_unsafe() to make sure that it still owns the
753
 * lock. Also it is used in nbcon_exit_unsafe() to eventually free the lock
754
 * for a higher priority context which asked for the friendly handover.
755
 *
756
 * It can be called inside an unsafe section when the console is just
757
 * temporary in safe state instead of exiting and entering the unsafe state.
758
 *
759
 * Also it can be called in the safe context before doing an expensive safe
760
 * operation. It does not make sense to do the operation when a higher
761
 * priority context took the lock.
762
 *
763
 * When this function returns false then the calling context no longer owns
764
 * the console and is no longer allowed to go forward. In this case it must
765
 * back out immediately and carefully. The buffer content is also no longer
766
 * trusted since it no longer belongs to the calling context.
767
 */
768
bool nbcon_can_proceed(struct nbcon_write_context *wctxt)
769
{
770
	struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
771
	struct console *con = ctxt->console;
772
	struct nbcon_state cur;
773
774
	nbcon_state_read(con, &cur);
775
776
	return nbcon_context_can_proceed(ctxt, &cur);
777
}
778
EXPORT_SYMBOL_GPL(nbcon_can_proceed);
779
780
#define nbcon_context_enter_unsafe(c)	__nbcon_context_update_unsafe(c, true)
781
#define nbcon_context_exit_unsafe(c)	__nbcon_context_update_unsafe(c, false)
782
783
/**
784
 * __nbcon_context_update_unsafe - Update the unsafe bit in @con->nbcon_state
785
 * @ctxt:	The nbcon context from nbcon_context_try_acquire()
786
 * @unsafe:	The new value for the unsafe bit
787
 *
788
 * Return:	True if the unsafe state was updated and this context still
789
 *		owns the console. Otherwise false if ownership was handed
790
 *		over or taken.
791
 *
792
 * This function allows console owners to modify the unsafe status of the
793
 * console.
794
 *
795
 * When this function returns false then the calling context no longer owns
796
 * the console and is no longer allowed to go forward. In this case it must
797
 * back out immediately and carefully. The buffer content is also no longer
798
 * trusted since it no longer belongs to the calling context.
799
 *
800
 * Internal helper to avoid duplicated code.
801
 */
802
static bool __nbcon_context_update_unsafe(struct nbcon_context *ctxt, bool unsafe)
803
{
804
	struct console *con = ctxt->console;
805
	struct nbcon_state cur;
806
	struct nbcon_state new;
807
808
	nbcon_state_read(con, &cur);
809
810
	do {
811
		/*
812
		 * The unsafe bit must not be cleared if an
813
		 * unsafe hostile takeover has occurred.
814
		 */
815
		if (!unsafe && cur.unsafe_takeover)
816
			goto out;
817
818
		if (!nbcon_context_can_proceed(ctxt, &cur))
819
			return false;
820
821
		new.atom = cur.atom;
822
		new.unsafe = unsafe;
823
	} while (!nbcon_state_try_cmpxchg(con, &cur, &new));
824
825
	cur.atom = new.atom;
826
out:
827
	return nbcon_context_can_proceed(ctxt, &cur);
828
}
829
830
/**
831
 * nbcon_enter_unsafe - Enter an unsafe region in the driver
832
 * @wctxt:	The write context that was handed to the write function
833
 *
834
 * Return:	True if this context still owns the console. False if
835
 *		ownership was handed over or taken.
836
 *
837
 * When this function returns false then the calling context no longer owns
838
 * the console and is no longer allowed to go forward. In this case it must
839
 * back out immediately and carefully. The buffer content is also no longer
840
 * trusted since it no longer belongs to the calling context.
841
 */
842
bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt)
843
{
844
	struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
845
846
	return nbcon_context_enter_unsafe(ctxt);
847
}
848
EXPORT_SYMBOL_GPL(nbcon_enter_unsafe);
849
850
/**
851
 * nbcon_exit_unsafe - Exit an unsafe region in the driver
852
 * @wctxt:	The write context that was handed to the write function
853
 *
854
 * Return:	True if this context still owns the console. False if
855
 *		ownership was handed over or taken.
856
 *
857
 * When this function returns false then the calling context no longer owns
858
 * the console and is no longer allowed to go forward. In this case it must
859
 * back out immediately and carefully. The buffer content is also no longer
860
 * trusted since it no longer belongs to the calling context.
861
 */
862
bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt)
863
{
864
	struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
865
866
	return nbcon_context_exit_unsafe(ctxt);
867
}
868
EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
869
870
/**
871
 * nbcon_emit_next_record - Emit a record in the acquired context
872
 * @wctxt:	The write context that will be handed to the write function
873
 * @in_kthread:	True if called from kthread printer context.
874
 *
875
 * Return:	True if this context still owns the console. False if
876
 *		ownership was handed over or taken.
877
 *
878
 * When this function returns false then the calling context no longer owns
879
 * the console and is no longer allowed to go forward. In this case it must
880
 * back out immediately and carefully. The buffer content is also no longer
881
 * trusted since it no longer belongs to the calling context. If the caller
882
 * wants to do more it must reacquire the console first.
883
 *
884
 * When true is returned, @wctxt->ctxt.backlog indicates whether there are
885
 * still records pending in the ringbuffer,
886
 */
887
static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt, bool in_kthread)
888
{
889
	struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
890
	struct console *con = ctxt->console;
891
	bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
892
	struct printk_message pmsg = {
893
		.pbufs = ctxt->pbufs,
894
	};
895
	unsigned long con_dropped;
896
	struct nbcon_state cur;
897
	unsigned long dropped;
898
	bool done;
899
900
	/*
901
	 * The printk buffers are filled within an unsafe section. This
902
	 * prevents NBCON_PRIO_NORMAL and NBCON_PRIO_EMERGENCY from
903
	 * clobbering each other.
904
	 */
905
906
	if (!nbcon_context_enter_unsafe(ctxt))
907
		return false;
908
909
	ctxt->backlog = printk_get_next_message(&pmsg, ctxt->seq, is_extended, true);
910
	if (!ctxt->backlog)
911
		return nbcon_context_exit_unsafe(ctxt);
912
913
	/*
914
	 * @con->dropped is not protected in case of an unsafe hostile
915
	 * takeover. In that situation the update can be racy so
916
	 * annotate it accordingly.
917
	 */
918
	con_dropped = data_race(READ_ONCE(con->dropped));
919
920
	dropped = con_dropped + pmsg.dropped;
921
	if (dropped && !is_extended)
922
		console_prepend_dropped(&pmsg, dropped);
923
924
	if (!nbcon_context_exit_unsafe(ctxt))
925
		return false;
926
927
	/* For skipped records just update seq/dropped in @con. */
928
	if (pmsg.outbuf_len == 0)
929
		goto update_con;
930
931
	/* Initialize the write context for driver callbacks. */
932
	wctxt->outbuf = &pmsg.pbufs->outbuf[0];
933
	wctxt->len = pmsg.outbuf_len;
934
	nbcon_state_read(con, &cur);
935
	wctxt->unsafe_takeover = cur.unsafe_takeover;
936
937
	if (!in_kthread && con->write_atomic) {
938
		done = con->write_atomic(con, wctxt);
939
	} else if (in_kthread && con->write_thread && con->kthread) {
940
		done = con->write_thread(con, wctxt);
941
	} else {
942
		nbcon_context_release(ctxt);
943
		WARN_ON_ONCE(1);
944
		done = false;
945
	}
946
947
	/* If not done, the emit was aborted. */
948
	if (!done)
949
		return false;
950
951
	/*
952
	 * Since any dropped message was successfully output, reset the
953
	 * dropped count for the console.
954
	 */
955
	dropped = 0;
956
update_con:
957
	/*
958
	 * The dropped count and the sequence number are updated within an
959
	 * unsafe section. This limits update races to the panic context and
960
	 * allows the panic context to win.
961
	 */
962
963
	if (!nbcon_context_enter_unsafe(ctxt))
964
		return false;
965
966
	if (dropped != con_dropped) {
967
		/* Counterpart to the READ_ONCE() above. */
968
		WRITE_ONCE(con->dropped, dropped);
969
	}
970
971
	nbcon_seq_try_update(ctxt, pmsg.seq + 1);
972
973
	return nbcon_context_exit_unsafe(ctxt);
974
}
975
976
/**
977
 * nbcon_kthread_should_wakeup - Check whether the printk thread should wakeup
978
 * @con:	Console to operate on
979
 * @ctxt:	The acquire context that contains the state
980
 *		at console_acquire()
981
 *
982
 * Returns: True if the thread should shutdown or if the console is allowed to
983
 * print and a record is available. False otherwise
984
 *
985
 * After the thread wakes up, it must first check if it should shutdown before
986
 * attempting any printing.
987
 */
988
static bool nbcon_kthread_should_wakeup(struct console *con, struct nbcon_context *ctxt)
989
{
990
	struct nbcon_state cur;
991
	bool is_usable;
992
	short flags;
993
	int cookie;
994
995
	if (kthread_should_stop())
996
		return true;
997
998
	cookie = console_srcu_read_lock();
999
	flags = console_srcu_read_flags(con);
1000
	is_usable = console_is_usable(con, flags, false);
1001
	console_srcu_read_unlock(cookie);
1002
1003
	if (!is_usable)
1004
		return false;
1005
1006
	nbcon_state_read(con, &cur);
1007
1008
	/*
1009
	 * Atomic printing is running on some other CPU. The owner
1010
	 * will wake the console thread on unlock if necessary.
1011
	 */
1012
	if (cur.prio != NBCON_PRIO_NONE)
1013
		return false;
1014
1015
	/* Bring the sequence in @ctxt up to date */
1016
	nbcon_context_seq_set(ctxt);
1017
1018
	return prb_read_valid(prb, ctxt->seq, NULL);
1019
}
1020
1021
/**
1022
 * nbcon_kthread_func - The printk thread function
1023
 * @__console:	Console to operate on
1024
 */
1025
static int nbcon_kthread_func(void *__console)
1026
{
1027
	struct console *con = __console;
1028
	struct nbcon_write_context wctxt = {
1029
		.ctxt.console	= con,
1030
		.ctxt.prio	= NBCON_PRIO_NORMAL,
1031
	};
1032
	struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
1033
	struct uart_port *port = NULL;
1034
	unsigned long flags;
1035
	short con_flags;
1036
	bool backlog;
1037
	int cookie;
1038
	int ret;
1039
1040
	if (con->uart_port)
1041
		port = con->uart_port(con);
1042
1043
wait_for_event:
1044
	/*
1045
	 * Guarantee this task is visible on the rcuwait before
1046
	 * checking the wake condition.
1047
	 *
1048
	 * The full memory barrier within set_current_state() of
1049
	 * ___rcuwait_wait_event() pairs with the full memory
1050
	 * barrier within rcuwait_has_sleeper().
1051
	 *
1052
	 * This pairs with rcuwait_has_sleeper:A and nbcon_kthread_wake:A.
1053
	 */
1054
	ret = rcuwait_wait_event(&con->rcuwait,
1055
				 nbcon_kthread_should_wakeup(con, ctxt),
1056
				 TASK_INTERRUPTIBLE); /* LMM(nbcon_kthread_func:A) */
1057
1058
	if (kthread_should_stop())
1059
		return 0;
1060
1061
	/* Wait was interrupted by a spurious signal, go back to sleep. */
1062
	if (ret)
1063
		goto wait_for_event;
1064
1065
	do {
1066
		backlog = false;
1067
1068
		cookie = console_srcu_read_lock();
1069
1070
		con_flags = console_srcu_read_flags(con);
1071
1072
		if (console_is_usable(con, con_flags, false)) {
1073
			/*
1074
			 * Ensure this stays on the CPU to make handover and
1075
			 * takeover possible.
1076
			 */
1077
			if (port)
1078
				spin_lock_irqsave(&port->lock, flags);
1079
			else
1080
				migrate_disable();
1081
1082
			if (nbcon_context_try_acquire(ctxt)) {
1083
				/*
1084
				 * If the emit fails, this context is no
1085
				 * longer the owner.
1086
				 */
1087
				if (nbcon_emit_next_record(&wctxt, true)) {
1088
					nbcon_context_release(ctxt);
1089
					backlog = ctxt->backlog;
1090
				}
1091
			}
1092
1093
			if (port)
1094
				spin_unlock_irqrestore(&port->lock, flags);
1095
			else
1096
				migrate_enable();
1097
		}
1098
1099
		console_srcu_read_unlock(cookie);
1100
1101
		cond_resched();
1102
1103
	} while (backlog);
1104
1105
	goto wait_for_event;
1106
}
1107
1108
/**
1109
 * nbcon_irq_work - irq work to wake printk thread
1110
 * @irq_work:	The irq work to operate on
1111
 */
1112
static void nbcon_irq_work(struct irq_work *irq_work)
1113
{
1114
	struct console *con = container_of(irq_work, struct console, irq_work);
1115
1116
	nbcon_kthread_wake(con);
1117
}
1118
1119
static inline bool rcuwait_has_sleeper(struct rcuwait *w)
1120
{
1121
	bool has_sleeper;
1122
1123
	rcu_read_lock();
1124
	/*
1125
	 * Guarantee any new records can be seen by tasks preparing to wait
1126
	 * before this context checks if the rcuwait is empty.
1127
	 *
1128
	 * This full memory barrier pairs with the full memory barrier within
1129
	 * set_current_state() of ___rcuwait_wait_event(), which is called
1130
	 * after prepare_to_rcuwait() adds the waiter but before it has
1131
	 * checked the wait condition.
1132
	 *
1133
	 * This pairs with nbcon_kthread_func:A.
1134
	 */
1135
	smp_mb(); /* LMM(rcuwait_has_sleeper:A) */
1136
	has_sleeper = !!rcu_dereference(w->task);
1137
	rcu_read_unlock();
1138
1139
	return has_sleeper;
1140
}
1141
1142
/**
1143
 * nbcon_wake_threads - Wake up printing threads using irq_work
1144
 */
1145
void nbcon_wake_threads(void)
1146
{
1147
	struct console *con;
1148
	int cookie;
1149
1150
	cookie = console_srcu_read_lock();
1151
	for_each_console_srcu(con) {
1152
		/*
1153
		 * Only schedule irq_work if the printing thread is
1154
		 * actively waiting. If not waiting, the thread will
1155
		 * notice by itself that it has work to do.
1156
		 */
1157
		if (con->kthread && rcuwait_has_sleeper(&con->rcuwait))
1158
			irq_work_queue(&con->irq_work);
1159
	}
1160
	console_srcu_read_unlock(cookie);
1161
}
1162
1163
/**
1164
 * struct nbcon_cpu_state - Per CPU printk context state
1165
 * @prio:	The current context priority level
1166
 * @nesting:	Per priority nest counter
1167
 */
1168
struct nbcon_cpu_state {
1169
	enum nbcon_prio		prio;
1170
	int			nesting[NBCON_PRIO_MAX];
1171
};
1172
1173
static DEFINE_PER_CPU(struct nbcon_cpu_state, nbcon_pcpu_state);
1174
static struct nbcon_cpu_state early_nbcon_pcpu_state __initdata;
1175
1176
/**
1177
 * nbcon_get_cpu_state - Get the per CPU console state pointer
1178
 *
1179
 * Returns either a pointer to the per CPU state of the current CPU or to
1180
 * the init data state during early boot.
1181
 */
1182
static __ref struct nbcon_cpu_state *nbcon_get_cpu_state(void)
1183
{
1184
	if (!printk_percpu_data_ready())
1185
		return &early_nbcon_pcpu_state;
1186
1187
	return this_cpu_ptr(&nbcon_pcpu_state);
1188
}
1189
1190
/**
1191
 * nbcon_atomic_emit_one - Print one record for a console in atomic mode
1192
 * @wctxt:			An initialized write context struct to use
1193
 *				for this context
1194
 *
1195
 * Returns false if the given console could not print a record or there are
1196
 * no more records to print, otherwise true.
1197
 *
1198
 * This is an internal helper to handle the locking of the console before
1199
 * calling nbcon_emit_next_record().
1200
 */
1201
static bool nbcon_atomic_emit_one(struct nbcon_write_context *wctxt)
1202
{
1203
	struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
1204
1205
	if (!nbcon_context_try_acquire(ctxt))
1206
		return false;
1207
1208
	/*
1209
	 * nbcon_emit_next_record() returns false when the console was
1210
	 * handed over or taken over. In both cases the context is no
1211
	 * longer valid.
1212
	 */
1213
	if (!nbcon_emit_next_record(wctxt, false))
1214
		return false;
1215
1216
	nbcon_context_release(ctxt);
1217
1218
	return prb_read_valid(prb, ctxt->seq, NULL);
1219
}
1220
1221
/**
1222
 * nbcon_console_emit_next_record - Print one record for an nbcon console
1223
 *					in atomic mode
1224
 * @con:	The console to print on
1225
 *
1226
 * Return:	True if a record could be printed, otherwise false.
1227
 * Context:	Any context where migration is disabled.
1228
 *
1229
 * This function is meant to be called by console_flush_all() to atomically
1230
 * print records on nbcon consoles. Essentially it is the nbcon version of
1231
 * console_emit_next_record().
1232
 *
1233
 * This function also returns false if the current CPU is in an elevated
1234
 * atomic priority state in order to allow the CPU to get all of the
1235
 * emergency messages into the ringbuffer first.
1236
 */
1237
bool nbcon_console_emit_next_record(struct console *con)
1238
{
1239
	struct uart_port *port = con->uart_port(con);
1240
	static DEFINE_SPINLOCK(shared_spinlock);
1241
	struct nbcon_cpu_state *cpu_state;
1242
	bool progress = false;
1243
	unsigned long flags;
1244
1245
	/*
1246
	 * If there is no port lock available, fallback to a shared
1247
	 * spinlock. This serves to provide the necessary type of
1248
	 * migration/preemption disabling while printing.
1249
	 */
1250
	if (port)
1251
		spin_lock_irqsave(&port->lock, flags);
1252
	else
1253
		spin_lock_irqsave(&shared_spinlock, flags);
1254
1255
	cpu_state = nbcon_get_cpu_state();
1256
1257
	/*
1258
	 * Atomic printing from console_flush_all() only occurs if this
1259
	 * CPU is not in an elevated atomic priority state. If it is, the
1260
	 * atomic printing will occur when this CPU exits that state. This
1261
	 * allows a set of emergency messages to be completely stored in
1262
	 * the ringbuffer before this CPU begins flushing.
1263
	 */
1264
	if (cpu_state->prio <= NBCON_PRIO_NORMAL) {
1265
		struct nbcon_write_context wctxt = { };
1266
		struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
1267
1268
		ctxt->console	= con;
1269
		ctxt->prio	= NBCON_PRIO_NORMAL;
1270
1271
		progress = nbcon_atomic_emit_one(&wctxt);
1272
	}
1273
1274
	if (port)
1275
		spin_unlock_irqrestore(&port->lock, flags);
1276
	else
1277
		spin_unlock_irqrestore(&shared_spinlock, flags);
1278
1279
	return progress;
1280
}
1281
1282
/**
1283
 * __nbcon_atomic_flush_all - Flush all nbcon consoles in atomic mode
1284
 * @allow_unsafe_takeover:	True, to allow unsafe hostile takeovers
1285
 */
1286
static void __nbcon_atomic_flush_all(bool allow_unsafe_takeover)
1287
{
1288
	struct nbcon_write_context wctxt = { };
1289
	struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
1290
	struct nbcon_cpu_state *cpu_state;
1291
	struct console *con;
1292
	bool any_progress;
1293
	int cookie;
1294
1295
	cpu_state = nbcon_get_cpu_state();
1296
1297
	/*
1298
	 * Let the outermost flush of this priority print. This avoids
1299
	 * nasty hackery for nested WARN() where the printing itself
1300
	 * generates one and ensures such nested messages are stored to
1301
	 * the ringbuffer before any printing resumes.
1302
	 *
1303
	 * cpu_state->prio <= NBCON_PRIO_NORMAL is not subject to nesting
1304
	 * and can proceed in order to allow any atomic printing for
1305
	 * regular kernel messages.
1306
	 */
1307
	if (cpu_state->prio > NBCON_PRIO_NORMAL &&
1308
	    cpu_state->nesting[cpu_state->prio] != 1)
1309
		return;
1310
1311
	do {
1312
		any_progress = false;
1313
1314
		cookie = console_srcu_read_lock();
1315
		for_each_console_srcu(con) {
1316
			short flags = console_srcu_read_flags(con);
1317
			bool progress;
1318
1319
			if (!(flags & CON_NBCON))
1320
				continue;
1321
1322
			if (!console_is_usable(con, flags, true))
1323
				continue;
1324
1325
			memset(ctxt, 0, sizeof(*ctxt));
1326
			ctxt->console			= con;
1327
			ctxt->spinwait_max_us		= 2000;
1328
			ctxt->prio			= cpu_state->prio;
1329
			ctxt->allow_unsafe_takeover	= allow_unsafe_takeover;
1330
1331
			progress = nbcon_atomic_emit_one(&wctxt);
1332
			if (!progress)
1333
				continue;
1334
			any_progress = true;
1335
		}
1336
		console_srcu_read_unlock(cookie);
1337
	} while (any_progress);
1338
}
1339
1340
/**
1341
 * nbcon_atomic_flush_all - Flush all nbcon consoles in atomic mode
1342
 *
1343
 * Context:	Any context where migration is disabled.
1344
 */
1345
void nbcon_atomic_flush_all(void)
1346
{
1347
	__nbcon_atomic_flush_all(false);
1348
}
1349
1350
/**
1351
 * nbcon_atomic_enter - Enter a context that enforces atomic printing
1352
 * @prio:	Priority of the context
1353
 *
1354
 * Return:	The previous priority that needs to be fed into
1355
 *		the corresponding nbcon_atomic_exit()
1356
 * Context:	Any context. Disables preemption.
1357
 *
1358
 * When within an atomic printing section, no atomic printing occurs. This
1359
 * is to allow all emergency messages to be dumped into the ringbuffer before
1360
 * flushing the ringbuffer. The actual atomic printing occurs when exiting
1361
 * the outermost atomic printing section.
1362
 */
1363
enum nbcon_prio nbcon_atomic_enter(enum nbcon_prio prio)
1364
{
1365
	struct nbcon_cpu_state *cpu_state;
1366
	enum nbcon_prio prev_prio;
1367
1368
	preempt_disable();
1369
1370
	cpu_state = nbcon_get_cpu_state();
1371
1372
	prev_prio = cpu_state->prio;
1373
	if (prio > prev_prio)
1374
		cpu_state->prio = prio;
1375
1376
	/*
1377
	 * Increment the nesting on @cpu_state->prio (instead of
1378
	 * @prio) so that a WARN() nested within a panic printout
1379
	 * does not attempt to scribble state.
1380
	 */
1381
	cpu_state->nesting[cpu_state->prio]++;
1382
1383
	return prev_prio;
1384
}
1385
1386
/**
1387
 * nbcon_atomic_exit - Exit a context that enforces atomic printing
1388
 * @prio:	Priority of the context to leave
1389
 * @prev_prio:	Priority of the previous context for restore
1390
 *
1391
 * Context:	Any context. Enables preemption.
1392
 *
1393
 * @prev_prio is the priority returned by the corresponding
1394
 * nbcon_atomic_enter().
1395
 */
1396
void nbcon_atomic_exit(enum nbcon_prio prio, enum nbcon_prio prev_prio)
1397
{
1398
	struct nbcon_cpu_state *cpu_state;
1399
	u64 next_seq = prb_next_seq(prb);
1400
1401
	__nbcon_atomic_flush_all(false);
1402
1403
	cpu_state = nbcon_get_cpu_state();
1404
1405
	if (cpu_state->prio == NBCON_PRIO_PANIC)
1406
		__nbcon_atomic_flush_all(true);
1407
1408
	/*
1409
	 * Undo the nesting of nbcon_atomic_enter() at the CPU state
1410
	 * priority.
1411
	 */
1412
	cpu_state->nesting[cpu_state->prio]--;
1413
1414
	/*
1415
	 * Restore the previous priority, which was returned by
1416
	 * nbcon_atomic_enter().
1417
	 */
1418
	cpu_state->prio = prev_prio;
1419
1420
	if (cpu_state->nesting[cpu_state->prio] == 0 &&
1421
	    prb_read_valid(prb, next_seq, NULL)) {
1422
		nbcon_wake_threads();
1423
	}
1424
1425
	preempt_enable();
1426
}
1427
1428
/**
1429
 * nbcon_kthread_stop - Stop a printk thread
1430
 * @con:	Console to operate on
1431
 */
1432
static void nbcon_kthread_stop(struct console *con)
1433
{
1434
	lockdep_assert_console_list_lock_held();
1435
1436
	if (!con->kthread)
1437
		return;
1438
1439
	kthread_stop(con->kthread);
1440
	con->kthread = NULL;
1441
}
1442
1443
/**
1444
 * nbcon_kthread_create - Create a printk thread
1445
 * @con:	Console to operate on
1446
 *
1447
 * If it fails, let the console proceed. The atomic part might
1448
 * be usable and useful.
1449
 */
1450
void nbcon_kthread_create(struct console *con)
1451
{
1452
	struct task_struct *kt;
1453
1454
	lockdep_assert_console_list_lock_held();
1455
1456
	if (!(con->flags & CON_NBCON) || !con->write_thread)
1457
		return;
1458
1459
	if (!printk_threads_enabled || con->kthread)
1460
		return;
1461
1462
	/*
1463
	 * Printer threads cannot be started as long as any boot console is
1464
	 * registered because there is no way to synchronize the hardware
1465
	 * registers between boot console code and regular console code.
1466
	 */
1467
	if (have_boot_console)
1468
		return;
1469
1470
	kt = kthread_run(nbcon_kthread_func, con, "pr/%s%d", con->name, con->index);
1471
	if (IS_ERR(kt)) {
1472
		con_printk(KERN_ERR, con, "failed to start printing thread\n");
1473
		return;
1474
	}
1475
1476
	con->kthread = kt;
1477
1478
	/*
1479
	 * It is important that console printing threads are scheduled
1480
	 * shortly after a printk call and with generous runtime budgets.
1481
	 */
1482
	sched_set_normal(con->kthread, -20);
1483
}
1484
1485
static int __init printk_setup_threads(void)
1486
{
1487
	struct console *con;
1488
1489
	console_list_lock();
1490
	printk_threads_enabled = true;
1491
	for_each_console(con)
1492
		nbcon_kthread_create(con);
1493
	if (IS_ENABLED(CONFIG_PREEMPT_RT) && serialized_printing)
1494
		nbcon_legacy_kthread_create();
1495
	console_list_unlock();
1496
	return 0;
1497
}
1498
early_initcall(printk_setup_threads);
1499
1500
/**
1501
 * nbcon_alloc - Allocate buffers needed by the nbcon console
1502
 * @con:	Console to allocate buffers for
1503
 *
1504
 * Return:	True on success. False otherwise and the console cannot
1505
 *		be used.
1506
 *
1507
 * This is not part of nbcon_init() because buffer allocation must
1508
 * be performed earlier in the console registration process.
1509
 */
1510
bool nbcon_alloc(struct console *con)
1511
{
1512
	if (con->flags & CON_BOOT) {
1513
		/*
1514
		 * Boot console printing is synchronized with legacy console
1515
		 * printing, so boot consoles can share the same global printk
1516
		 * buffers.
1517
		 */
1518
		con->pbufs = &printk_shared_pbufs;
1519
	} else {
1520
		con->pbufs = kmalloc(sizeof(*con->pbufs), GFP_KERNEL);
1521
		if (!con->pbufs) {
1522
			con_printk(KERN_ERR, con, "failed to allocate printing buffer\n");
1523
			return false;
1524
		}
1525
	}
1526
1527
	return true;
1528
}
1529
1530
/**
1531
 * nbcon_init - Initialize the nbcon console specific data
1532
 * @con:	Console to initialize
1533
 *
1534
 * nbcon_alloc() *must* be called and succeed before this function
1535
 * is called.
1536
 *
1537
 * This function expects that the legacy @con->seq has been set.
1538
 */
1539
void nbcon_init(struct console *con)
1540
{
1541
	struct nbcon_state state = { };
1542
1543
	/* nbcon_alloc() must have been called and successful! */
1544
	BUG_ON(!con->pbufs);
1545
1546
	rcuwait_init(&con->rcuwait);
1547
	init_irq_work(&con->irq_work, nbcon_irq_work);
1548
	nbcon_seq_force(con, con->seq);
1549
	nbcon_state_set(con, &state);
1550
	nbcon_kthread_create(con);
1551
}
1552
1553
/**
1554
 * nbcon_free - Free and cleanup the nbcon console specific data
1555
 * @con:	Console to free/cleanup nbcon data
1556
 */
1557
void nbcon_free(struct console *con)
1558
{
1559
	struct nbcon_state state = { };
1560
1561
	nbcon_kthread_stop(con);
1562
	nbcon_state_set(con, &state);
1563
1564
	/* Boot consoles share global printk buffers. */
1565
	if (!(con->flags & CON_BOOT))
1566
		kfree(con->pbufs);
1567
1568
	con->pbufs = NULL;
1569
}
1570
1571
static inline bool uart_is_nbcon(struct uart_port *up)
1572
{
1573
	int cookie;
1574
	bool ret;
1575
1576
	if (!uart_console(up))
1577
		return false;
1578
1579
	cookie = console_srcu_read_lock();
1580
	ret = (console_srcu_read_flags(up->cons) & CON_NBCON);
1581
	console_srcu_read_unlock(cookie);
1582
	return ret;
1583
}
1584
1585
/**
1586
 * nbcon_handle_port_lock - The second half of the port locking wrapper
1587
 * @up:		The uart port whose @lock was locked
1588
 *
1589
 * The uart_port_lock() wrappers will first lock the spin_lock @up->lock.
1590
 * Then this function is called to implement nbcon-specific processing.
1591
 *
1592
 * If @up is an nbcon console, this console will be acquired and marked as
1593
 * unsafe. Otherwise this function does nothing.
1594
 */
1595
void nbcon_handle_port_lock(struct uart_port *up)
1596
{
1597
	struct console *con = up->cons;
1598
	struct nbcon_context ctxt;
1599
1600
	if (!uart_is_nbcon(up))
1601
		return;
1602
1603
	WARN_ON_ONCE(con->locked_port);
1604
1605
	do {
1606
		do {
1607
			memset(&ctxt, 0, sizeof(ctxt));
1608
			ctxt.console	= con;
1609
			ctxt.prio	= NBCON_PRIO_NORMAL;
1610
		} while (!nbcon_context_try_acquire(&ctxt));
1611
1612
	} while (!nbcon_context_enter_unsafe(&ctxt));
1613
1614
	con->locked_port = true;
1615
}
1616
EXPORT_SYMBOL_GPL(nbcon_handle_port_lock);
1617
1618
/**
1619
 * nbcon_handle_port_unlock - The first half of the port unlocking wrapper
1620
 * @up:		The uart port whose @lock is about to be unlocked
1621
 *
1622
 * The uart_port_unlock() wrappers will first call this function to implement
1623
 * nbcon-specific processing. Then afterwards the uart_port_unlock() wrappers
1624
 * will unlock the spin_lock @up->lock.
1625
 *
1626
 * If @up is an nbcon console, the console will be marked as safe and
1627
 * released. Otherwise this function does nothing.
1628
 */
1629
void nbcon_handle_port_unlock(struct uart_port *up)
1630
{
1631
	struct console *con = up->cons;
1632
	struct nbcon_context ctxt = {
1633
		.console	= con,
1634
		.prio		= NBCON_PRIO_NORMAL,
1635
	};
1636
1637
	if (!uart_is_nbcon(up))
1638
		return;
1639
1640
	WARN_ON_ONCE(!con->locked_port);
1641
1642
	if (nbcon_context_exit_unsafe(&ctxt))
1643
		nbcon_context_release(&ctxt);
1644
1645
	con->locked_port = false;
1646
}
1647
EXPORT_SYMBOL_GPL(nbcon_handle_port_unlock);
1648
1649
/**
1650
 * printk_kthread_shutdown - shutdown all threaded printers
1651
 *
1652
 * On system shutdown all threaded printers are stopped. This allows printk
1653
 * to transition back to atomic printing, thus providing a robust mechanism
1654
 * for the final shutdown/reboot messages to be output.
1655
 */
1656
static void printk_kthread_shutdown(void)
1657
{
1658
	struct console *con;
1659
1660
	console_list_lock();
1661
	for_each_console(con) {
1662
		if (con->flags & CON_NBCON)
1663
			nbcon_kthread_stop(con);
1664
	}
1665
	console_list_unlock();
1666
}
1667
1668
static struct syscore_ops printk_syscore_ops = {
1669
	.shutdown = printk_kthread_shutdown,
1670
};
1671
1672
static int __init printk_init_ops(void)
1673
{
1674
	register_syscore_ops(&printk_syscore_ops);
1675
	return 0;
1676
}
1677
device_initcall(printk_init_ops);
(-)a/kernel/printk/printk.c (-157 / +405 lines)
Lines 102-113 DEFINE_STATIC_SRCU(console_srcu); Link Here
102
 */
102
 */
103
int __read_mostly suppress_printk;
103
int __read_mostly suppress_printk;
104
104
105
/*
106
 * During panic, heavy printk by other CPUs can delay the
107
 * panic and risk deadlock on console resources.
108
 */
109
static int __read_mostly suppress_panic_printk;
110
111
#ifdef CONFIG_LOCKDEP
105
#ifdef CONFIG_LOCKDEP
112
static struct lockdep_map console_lock_dep_map = {
106
static struct lockdep_map console_lock_dep_map = {
113
	.name = "console_lock"
107
	.name = "console_lock"
Lines 288-293 EXPORT_SYMBOL(console_list_unlock); Link Here
288
 * Return: A cookie to pass to console_srcu_read_unlock().
282
 * Return: A cookie to pass to console_srcu_read_unlock().
289
 */
283
 */
290
int console_srcu_read_lock(void)
284
int console_srcu_read_lock(void)
285
	__acquires(&console_srcu)
291
{
286
{
292
	return srcu_read_lock_nmisafe(&console_srcu);
287
	return srcu_read_lock_nmisafe(&console_srcu);
293
}
288
}
Lines 301-306 EXPORT_SYMBOL(console_srcu_read_lock); Link Here
301
 * Counterpart to console_srcu_read_lock()
296
 * Counterpart to console_srcu_read_lock()
302
 */
297
 */
303
void console_srcu_read_unlock(int cookie)
298
void console_srcu_read_unlock(int cookie)
299
	__releases(&console_srcu)
304
{
300
{
305
	srcu_read_unlock_nmisafe(&console_srcu, cookie);
301
	srcu_read_unlock_nmisafe(&console_srcu, cookie);
306
}
302
}
Lines 444-451 static int console_msg_format = MSG_FORMAT_DEFAULT; Link Here
444
/* syslog_lock protects syslog_* variables and write access to clear_seq. */
440
/* syslog_lock protects syslog_* variables and write access to clear_seq. */
445
static DEFINE_MUTEX(syslog_lock);
441
static DEFINE_MUTEX(syslog_lock);
446
442
443
/*
444
 * Specifies if a legacy console is registered. See serialized_printing
445
 * for details.
446
 */
447
bool have_legacy_console;
448
449
/*
450
 * Specifies if a boot console is registered. See serialized_printing
451
 * for details.
452
 */
453
bool have_boot_console;
454
447
#ifdef CONFIG_PRINTK
455
#ifdef CONFIG_PRINTK
456
/*
457
 * During panic, heavy printk by other CPUs can delay the
458
 * panic and risk deadlock on console resources.
459
 */
460
static int __read_mostly suppress_panic_printk;
461
448
DECLARE_WAIT_QUEUE_HEAD(log_wait);
462
DECLARE_WAIT_QUEUE_HEAD(log_wait);
463
464
static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
465
449
/* All 3 protected by @syslog_lock. */
466
/* All 3 protected by @syslog_lock. */
450
/* the next printk record to read by syslog(READ) or /proc/kmsg */
467
/* the next printk record to read by syslog(READ) or /proc/kmsg */
451
static u64 syslog_seq;
468
static u64 syslog_seq;
Lines 494-500 _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS, Link Here
494
511
495
static struct printk_ringbuffer printk_rb_dynamic;
512
static struct printk_ringbuffer printk_rb_dynamic;
496
513
497
static struct printk_ringbuffer *prb = &printk_rb_static;
514
struct printk_ringbuffer *prb = &printk_rb_static;
498
515
499
/*
516
/*
500
 * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
517
 * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
Lines 698-706 static ssize_t msg_print_ext_body(char *buf, size_t size, Link Here
698
	return len;
715
	return len;
699
}
716
}
700
717
701
static bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
702
				    bool is_extended, bool may_supress);
703
704
/* /dev/kmsg - userspace message inject/listen interface */
718
/* /dev/kmsg - userspace message inject/listen interface */
705
struct devkmsg_user {
719
struct devkmsg_user {
706
	atomic64_t seq;
720
	atomic64_t seq;
Lines 2267-2274 asmlinkage int vprintk_emit(int facility, int level, Link Here
2267
			    const struct dev_printk_info *dev_info,
2281
			    const struct dev_printk_info *dev_info,
2268
			    const char *fmt, va_list args)
2282
			    const char *fmt, va_list args)
2269
{
2283
{
2284
	bool print_direct = serialized_printing && !IS_ENABLED(CONFIG_PREEMPT_RT);
2270
	int printed_len;
2285
	int printed_len;
2271
	bool in_sched = false;
2272
2286
2273
	/* Suppress unimportant messages after panic happens */
2287
	/* Suppress unimportant messages after panic happens */
2274
	if (unlikely(suppress_printk))
2288
	if (unlikely(suppress_printk))
Lines 2280-2294 asmlinkage int vprintk_emit(int facility, int level, Link Here
2280
2294
2281
	if (level == LOGLEVEL_SCHED) {
2295
	if (level == LOGLEVEL_SCHED) {
2282
		level = LOGLEVEL_DEFAULT;
2296
		level = LOGLEVEL_DEFAULT;
2283
		in_sched = true;
2297
		/* If called from the scheduler, we can not call up(). */
2298
		print_direct = false;
2284
	}
2299
	}
2285
2300
2286
	printk_delay(level);
2301
	printk_delay(level);
2287
2302
2288
	printed_len = vprintk_store(facility, level, dev_info, fmt, args);
2303
	printed_len = vprintk_store(facility, level, dev_info, fmt, args);
2289
2304
2290
	/* If called from the scheduler, we can not call up(). */
2305
	nbcon_wake_threads();
2291
	if (!in_sched) {
2306
2307
	if (print_direct) {
2292
		/*
2308
		/*
2293
		 * The caller may be holding system-critical or
2309
		 * The caller may be holding system-critical or
2294
		 * timing-sensitive locks. Disable preemption during
2310
		 * timing-sensitive locks. Disable preemption during
Lines 2306-2317 asmlinkage int vprintk_emit(int facility, int level, Link Here
2306
		if (console_trylock_spinning())
2322
		if (console_trylock_spinning())
2307
			console_unlock();
2323
			console_unlock();
2308
		preempt_enable();
2324
		preempt_enable();
2309
	}
2310
2325
2311
	if (in_sched)
2326
	} else {
2312
		defer_console_output();
2327
		defer_console_output();
2313
	else
2328
	}
2314
		wake_up_klogd();
2315
2329
2316
	return printed_len;
2330
	return printed_len;
2317
}
2331
}
Lines 2339-2344 EXPORT_SYMBOL(_printk); Link Here
2339
static bool pr_flush(int timeout_ms, bool reset_on_progress);
2353
static bool pr_flush(int timeout_ms, bool reset_on_progress);
2340
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
2354
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
2341
2355
2356
static struct task_struct *nbcon_legacy_kthread;
2357
2358
static inline void wake_up_legacy_kthread(void)
2359
{
2360
	if (nbcon_legacy_kthread)
2361
		wake_up_interruptible(&legacy_wait);
2362
}
2363
2342
#else /* CONFIG_PRINTK */
2364
#else /* CONFIG_PRINTK */
2343
2365
2344
#define printk_time		false
2366
#define printk_time		false
Lines 2349-2373 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre Link Here
2349
2371
2350
static u64 syslog_seq;
2372
static u64 syslog_seq;
2351
2373
2352
static size_t record_print_text(const struct printk_record *r,
2353
				bool syslog, bool time)
2354
{
2355
	return 0;
2356
}
2357
static ssize_t info_print_ext_header(char *buf, size_t size,
2358
				     struct printk_info *info)
2359
{
2360
	return 0;
2361
}
2362
static ssize_t msg_print_ext_body(char *buf, size_t size,
2363
				  char *text, size_t text_len,
2364
				  struct dev_printk_info *dev_info) { return 0; }
2365
static void console_lock_spinning_enable(void) { }
2366
static int console_lock_spinning_disable_and_check(int cookie) { return 0; }
2367
static bool suppress_message_printing(int level) { return false; }
2368
static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
2374
static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
2369
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
2375
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
2370
2376
static inline void nbcon_legacy_kthread_create(void) { }
2377
static inline void wake_up_legacy_kthread(void) { }
2371
#endif /* CONFIG_PRINTK */
2378
#endif /* CONFIG_PRINTK */
2372
2379
2373
#ifdef CONFIG_EARLY_PRINTK
2380
#ifdef CONFIG_EARLY_PRINTK
Lines 2575-2580 void suspend_console(void) Link Here
2575
void resume_console(void)
2582
void resume_console(void)
2576
{
2583
{
2577
	struct console *con;
2584
	struct console *con;
2585
	short flags;
2586
	int cookie;
2578
2587
2579
	if (!console_suspend_enabled)
2588
	if (!console_suspend_enabled)
2580
		return;
2589
		return;
Lines 2591-2596 void resume_console(void) Link Here
2591
	 */
2600
	 */
2592
	synchronize_srcu(&console_srcu);
2601
	synchronize_srcu(&console_srcu);
2593
2602
2603
	/*
2604
	 * Since this runs in task context, wake the threaded printers
2605
	 * directly rather than scheduling irq_work to do it.
2606
	 */
2607
2608
	cookie = console_srcu_read_lock();
2609
	for_each_console_srcu(con) {
2610
		flags = console_srcu_read_flags(con);
2611
		if (flags & CON_NBCON)
2612
			nbcon_kthread_wake(con);
2613
	}
2614
	console_srcu_read_unlock(cookie);
2615
2616
	wake_up_legacy_kthread();
2617
2594
	pr_flush(1000, true);
2618
	pr_flush(1000, true);
2595
}
2619
}
2596
2620
Lines 2605-2611 void resume_console(void) Link Here
2605
 */
2629
 */
2606
static int console_cpu_notify(unsigned int cpu)
2630
static int console_cpu_notify(unsigned int cpu)
2607
{
2631
{
2608
	if (!cpuhp_tasks_frozen) {
2632
	if (!cpuhp_tasks_frozen && serialized_printing &&
2633
	    !IS_ENABLED(CONFIG_PREEMPT_RT)) {
2609
		/* If trylock fails, someone else is doing the printing */
2634
		/* If trylock fails, someone else is doing the printing */
2610
		if (console_trylock())
2635
		if (console_trylock())
2611
			console_unlock();
2636
			console_unlock();
Lines 2682-2723 int is_console_locked(void) Link Here
2682
}
2707
}
2683
EXPORT_SYMBOL(is_console_locked);
2708
EXPORT_SYMBOL(is_console_locked);
2684
2709
2685
/*
2686
 * Check if the given console is currently capable and allowed to print
2687
 * records.
2688
 *
2689
 * Requires the console_srcu_read_lock.
2690
 */
2691
static inline bool console_is_usable(struct console *con)
2692
{
2693
	short flags = console_srcu_read_flags(con);
2694
2695
	if (!(flags & CON_ENABLED))
2696
		return false;
2697
2698
	if ((flags & CON_SUSPENDED))
2699
		return false;
2700
2701
	if (!con->write)
2702
		return false;
2703
2704
	/*
2705
	 * Console drivers may assume that per-cpu resources have been
2706
	 * allocated. So unless they're explicitly marked as being able to
2707
	 * cope (CON_ANYTIME) don't call them until this CPU is officially up.
2708
	 */
2709
	if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
2710
		return false;
2711
2712
	return true;
2713
}
2714
2715
static void __console_unlock(void)
2710
static void __console_unlock(void)
2716
{
2711
{
2717
	console_locked = 0;
2712
	console_locked = 0;
2718
	up_console_sem();
2713
	up_console_sem();
2719
}
2714
}
2720
2715
2716
#ifdef CONFIG_PRINTK
2717
2721
/*
2718
/*
2722
 * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message". This
2719
 * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message". This
2723
 * is achieved by shifting the existing message over and inserting the dropped
2720
 * is achieved by shifting the existing message over and inserting the dropped
Lines 2732-2739 static void __console_unlock(void) Link Here
2732
 *
2729
 *
2733
 * If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated.
2730
 * If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated.
2734
 */
2731
 */
2735
#ifdef CONFIG_PRINTK
2732
void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
2736
static void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
2737
{
2733
{
2738
	struct printk_buffers *pbufs = pmsg->pbufs;
2734
	struct printk_buffers *pbufs = pmsg->pbufs;
2739
	const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
2735
	const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
Lines 2764-2772 static void console_prepend_dropped(struct printk_message *pmsg, unsigned long d Link Here
2764
	memcpy(outbuf, scratchbuf, len);
2760
	memcpy(outbuf, scratchbuf, len);
2765
	pmsg->outbuf_len += len;
2761
	pmsg->outbuf_len += len;
2766
}
2762
}
2767
#else
2768
#define console_prepend_dropped(pmsg, dropped)
2769
#endif /* CONFIG_PRINTK */
2770
2763
2771
/*
2764
/*
2772
 * Read and format the specified record (or a later record if the specified
2765
 * Read and format the specified record (or a later record if the specified
Lines 2787-2794 static void console_prepend_dropped(struct printk_message *pmsg, unsigned long d Link Here
2787
 * of @pmsg are valid. (See the documentation of struct printk_message
2780
 * of @pmsg are valid. (See the documentation of struct printk_message
2788
 * for information about the @pmsg fields.)
2781
 * for information about the @pmsg fields.)
2789
 */
2782
 */
2790
static bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
2783
bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
2791
				    bool is_extended, bool may_suppress)
2784
			     bool is_extended, bool may_suppress)
2792
{
2785
{
2793
	static int panic_console_dropped;
2786
	static int panic_console_dropped;
2794
2787
Lines 2846-2851 static bool printk_get_next_message(struct printk_message *pmsg, u64 seq, Link Here
2846
	return true;
2839
	return true;
2847
}
2840
}
2848
2841
2842
/*
2843
 * Used as the printk buffers for non-panic, serialized console printing.
2844
 * This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles.
2845
 * Its usage requires the console_lock held.
2846
 */
2847
struct printk_buffers printk_shared_pbufs;
2848
2849
/*
2849
/*
2850
 * Print one record for the given console. The record printed is whatever
2850
 * Print one record for the given console. The record printed is whatever
2851
 * record is the next available record for the given console.
2851
 * record is the next available record for the given console.
Lines 2863-2874 static bool printk_get_next_message(struct printk_message *pmsg, u64 seq, Link Here
2863
 */
2863
 */
2864
static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
2864
static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
2865
{
2865
{
2866
	static struct printk_buffers pbufs;
2867
2868
	bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
2866
	bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
2869
	char *outbuf = &pbufs.outbuf[0];
2867
	char *outbuf = &printk_shared_pbufs.outbuf[0];
2870
	struct printk_message pmsg = {
2868
	struct printk_message pmsg = {
2871
		.pbufs = &pbufs,
2869
		.pbufs = &printk_shared_pbufs,
2872
	};
2870
	};
2873
	unsigned long flags;
2871
	unsigned long flags;
2874
2872
Lines 2890-2924 static bool console_emit_next_record(struct console *con, bool *handover, int co Link Here
2890
		con->dropped = 0;
2888
		con->dropped = 0;
2891
	}
2889
	}
2892
2890
2893
	/*
2891
	if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
2894
	 * While actively printing out messages, if another printk()
2892
		/*
2895
	 * were to occur on another CPU, it may wait for this one to
2893
		 * While actively printing out messages, if another printk()
2896
	 * finish. This task can not be preempted if there is a
2894
		 * were to occur on another CPU, it may wait for this one to
2897
	 * waiter waiting to take over.
2895
		 * finish. This task can not be preempted if there is a
2898
	 *
2896
		 * waiter waiting to take over.
2899
	 * Interrupts are disabled because the hand over to a waiter
2897
		 *
2900
	 * must not be interrupted until the hand over is completed
2898
		 * Interrupts are disabled because the hand over to a waiter
2901
	 * (@console_waiter is cleared).
2899
		 * must not be interrupted until the hand over is completed
2902
	 */
2900
		 * (@console_waiter is cleared).
2903
	printk_safe_enter_irqsave(flags);
2901
		 */
2904
	console_lock_spinning_enable();
2902
		printk_safe_enter_irqsave(flags);
2903
		console_lock_spinning_enable();
2905
2904
2906
	/* Do not trace print latency. */
2905
		/* Do not trace print latency. */
2907
	stop_critical_timings();
2906
		stop_critical_timings();
2907
	}
2908
2908
2909
	/* Write everything out to the hardware. */
2909
	/* Write everything out to the hardware. */
2910
	con->write(con, outbuf, pmsg.outbuf_len);
2910
	con->write(con, outbuf, pmsg.outbuf_len);
2911
2911
2912
	start_critical_timings();
2913
2914
	con->seq = pmsg.seq + 1;
2912
	con->seq = pmsg.seq + 1;
2915
2913
2916
	*handover = console_lock_spinning_disable_and_check(cookie);
2914
	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
2917
	printk_safe_exit_irqrestore(flags);
2915
		*handover = false;
2916
	} else {
2917
		start_critical_timings();
2918
2919
		*handover = console_lock_spinning_disable_and_check(cookie);
2920
2921
		printk_safe_exit_irqrestore(flags);
2922
	}
2918
skip:
2923
skip:
2919
	return true;
2924
	return true;
2920
}
2925
}
2921
2926
2927
#else
2928
2929
static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
2930
{
2931
	*handover = false;
2932
	return false;
2933
}
2934
2935
#endif /* CONFIG_PRINTK */
2936
2922
/*
2937
/*
2923
 * Print out all remaining records to all consoles.
2938
 * Print out all remaining records to all consoles.
2924
 *
2939
 *
Lines 2957-2980 static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove Link Here
2957
2972
2958
		cookie = console_srcu_read_lock();
2973
		cookie = console_srcu_read_lock();
2959
		for_each_console_srcu(con) {
2974
		for_each_console_srcu(con) {
2975
			short flags = console_srcu_read_flags(con);
2976
			u64 printk_seq;
2960
			bool progress;
2977
			bool progress;
2961
2978
2962
			if (!console_is_usable(con))
2979
			/*
2980
			 * console_flush_all() is only for legacy consoles,
2981
			 * unless the nbcon console has no kthread printer.
2982
			 */
2983
			if ((flags & CON_NBCON) && con->kthread)
2984
				continue;
2985
2986
			if (!console_is_usable(con, flags, true))
2963
				continue;
2987
				continue;
2964
			any_usable = true;
2988
			any_usable = true;
2965
2989
2966
			progress = console_emit_next_record(con, handover, cookie);
2990
			if (flags & CON_NBCON) {
2991
				progress = nbcon_console_emit_next_record(con);
2992
				printk_seq = nbcon_seq_read(con);
2993
			} else {
2994
				progress = console_emit_next_record(con, handover, cookie);
2967
2995
2968
			/*
2996
				/*
2969
			 * If a handover has occurred, the SRCU read lock
2997
				 * If a handover has occurred, the SRCU read
2970
			 * is already released.
2998
				 * lock is already released.
2971
			 */
2999
				 */
2972
			if (*handover)
3000
				if (*handover)
2973
				return false;
3001
					return false;
3002
3003
				printk_seq = con->seq;
3004
			}
2974
3005
2975
			/* Track the next of the highest seq flushed. */
3006
			/* Track the next of the highest seq flushed. */
2976
			if (con->seq > *next_seq)
3007
			if (printk_seq > *next_seq)
2977
				*next_seq = con->seq;
3008
				*next_seq = printk_seq;
2978
3009
2979
			if (!progress)
3010
			if (!progress)
2980
				continue;
3011
				continue;
Lines 2997-3015 static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove Link Here
2997
	return false;
3028
	return false;
2998
}
3029
}
2999
3030
3000
/**
3031
static u64 console_flush_and_unlock(void)
3001
 * console_unlock - unblock the console subsystem from printing
3002
 *
3003
 * Releases the console_lock which the caller holds to block printing of
3004
 * the console subsystem.
3005
 *
3006
 * While the console_lock was held, console output may have been buffered
3007
 * by printk().  If this is the case, console_unlock(); emits
3008
 * the output prior to releasing the lock.
3009
 *
3010
 * console_unlock(); may be called from any context.
3011
 */
3012
void console_unlock(void)
3013
{
3032
{
3014
	bool do_cond_resched;
3033
	bool do_cond_resched;
3015
	bool handover;
3034
	bool handover;
Lines 3052-3057 void console_unlock(void) Link Here
3052
		 * fails, another context is already handling the printing.
3071
		 * fails, another context is already handling the printing.
3053
		 */
3072
		 */
3054
	} while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
3073
	} while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
3074
3075
	return next_seq;
3076
}
3077
3078
/**
3079
 * console_unlock - unblock the console subsystem from printing
3080
 *
3081
 * Releases the console_lock which the caller holds to block printing of
3082
 * the console subsystem.
3083
 *
3084
 * While the console_lock was held, console output may have been buffered
3085
 * by printk().  If this is the case, console_unlock(); emits
3086
 * the output prior to releasing the lock.
3087
 *
3088
 * console_unlock(); may be called from any context.
3089
 */
3090
void console_unlock(void)
3091
{
3092
	/*
3093
	 * PREEMPT_RT relies on kthread and atomic consoles for printing.
3094
	 * It never attempts to print from console_unlock().
3095
	 */
3096
	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
3097
		__console_unlock();
3098
		return;
3099
	}
3100
3101
	console_flush_and_unlock();
3055
}
3102
}
3056
EXPORT_SYMBOL(console_unlock);
3103
EXPORT_SYMBOL(console_unlock);
3057
3104
Lines 3163-3168 void console_flush_on_panic(enum con_flush_mode mode) Link Here
3163
3210
3164
	if (mode == CONSOLE_REPLAY_ALL) {
3211
	if (mode == CONSOLE_REPLAY_ALL) {
3165
		struct console *c;
3212
		struct console *c;
3213
		short flags;
3166
		int cookie;
3214
		int cookie;
3167
		u64 seq;
3215
		u64 seq;
3168
3216
Lines 3170-3185 void console_flush_on_panic(enum con_flush_mode mode) Link Here
3170
3218
3171
		cookie = console_srcu_read_lock();
3219
		cookie = console_srcu_read_lock();
3172
		for_each_console_srcu(c) {
3220
		for_each_console_srcu(c) {
3173
			/*
3221
			flags = console_srcu_read_flags(c);
3174
			 * This is an unsynchronized assignment, but the
3222
3175
			 * kernel is in "hope and pray" mode anyway.
3223
			if (flags & CON_NBCON) {
3176
			 */
3224
				nbcon_seq_force(c, seq);
3177
			c->seq = seq;
3225
			} else {
3226
				/*
3227
				 * This is an unsynchronized assignment. On
3228
				 * panic legacy consoles are only best effort.
3229
				 */
3230
				c->seq = seq;
3231
			}
3178
		}
3232
		}
3179
		console_srcu_read_unlock(cookie);
3233
		console_srcu_read_unlock(cookie);
3180
	}
3234
	}
3181
3235
3182
	console_flush_all(false, &next_seq, &handover);
3236
	nbcon_atomic_flush_all();
3237
3238
	if (serialized_printing)
3239
		console_flush_all(false, &next_seq, &handover);
3183
}
3240
}
3184
3241
3185
/*
3242
/*
Lines 3236-3248 EXPORT_SYMBOL(console_stop); Link Here
3236
3293
3237
void console_start(struct console *console)
3294
void console_start(struct console *console)
3238
{
3295
{
3296
	short flags;
3297
3239
	console_list_lock();
3298
	console_list_lock();
3240
	console_srcu_write_flags(console, console->flags | CON_ENABLED);
3299
	console_srcu_write_flags(console, console->flags | CON_ENABLED);
3300
	flags = console->flags;
3241
	console_list_unlock();
3301
	console_list_unlock();
3302
3303
	/*
3304
	 * Ensure that all SRCU list walks have completed. The related
3305
	 * printing context must be able to see it is enabled so that
3306
	 * it is guaranteed to wake up and resume printing.
3307
	 */
3308
	synchronize_srcu(&console_srcu);
3309
3310
	if (flags & CON_NBCON)
3311
		nbcon_kthread_wake(console);
3312
	else
3313
		wake_up_legacy_kthread();
3314
3242
	__pr_flush(console, 1000, true);
3315
	__pr_flush(console, 1000, true);
3243
}
3316
}
3244
EXPORT_SYMBOL(console_start);
3317
EXPORT_SYMBOL(console_start);
3245
3318
3319
#ifdef CONFIG_PRINTK
3320
static bool printer_should_wake(u64 seq)
3321
{
3322
	bool available = false;
3323
	struct console *con;
3324
	int cookie;
3325
3326
	if (kthread_should_stop())
3327
		return true;
3328
3329
	cookie = console_srcu_read_lock();
3330
	for_each_console_srcu(con) {
3331
		short flags = console_srcu_read_flags(con);
3332
3333
		if (flags & CON_NBCON)
3334
			continue;
3335
		if (!console_is_usable(con, flags, true))
3336
			continue;
3337
		/*
3338
		 * It is safe to read @seq because only this
3339
		 * thread context updates @seq.
3340
		 */
3341
		if (prb_read_valid(prb, con->seq, NULL)) {
3342
			available = true;
3343
			break;
3344
		}
3345
	}
3346
	console_srcu_read_unlock(cookie);
3347
3348
	return available;
3349
}
3350
3351
static int nbcon_legacy_kthread_func(void *unused)
3352
{
3353
	u64 seq = 0;
3354
	int error;
3355
3356
	for (;;) {
3357
		error = wait_event_interruptible(legacy_wait, printer_should_wake(seq));
3358
3359
		if (kthread_should_stop())
3360
			break;
3361
3362
		if (error)
3363
			continue;
3364
3365
		console_lock();
3366
		seq = console_flush_and_unlock();
3367
	}
3368
	return 0;
3369
}
3370
3371
void nbcon_legacy_kthread_create(void)
3372
{
3373
	struct task_struct *kt;
3374
3375
	lockdep_assert_held(&console_mutex);
3376
3377
	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
3378
		return;
3379
3380
	if (!printk_threads_enabled || nbcon_legacy_kthread)
3381
		return;
3382
3383
	kt = kthread_run(nbcon_legacy_kthread_func, NULL, "pr/legacy");
3384
	if (IS_ERR(kt)) {
3385
		pr_err("unable to start legacy printing thread\n");
3386
		return;
3387
	}
3388
3389
	nbcon_legacy_kthread = kt;
3390
3391
	/*
3392
	 * It is important that console printing threads are scheduled
3393
	 * shortly after a printk call and with generous runtime budgets.
3394
	 */
3395
	sched_set_normal(nbcon_legacy_kthread, -20);
3396
}
3397
#endif /* CONFIG_PRINTK */
3398
3246
static int __read_mostly keep_bootcon;
3399
static int __read_mostly keep_bootcon;
3247
3400
3248
static int __init keep_bootcon_setup(char *str)
3401
static int __init keep_bootcon_setup(char *str)
Lines 3326-3336 static void try_enable_default_console(struct console *newcon) Link Here
3326
		newcon->flags |= CON_CONSDEV;
3479
		newcon->flags |= CON_CONSDEV;
3327
}
3480
}
3328
3481
3329
#define con_printk(lvl, con, fmt, ...)			\
3330
	printk(lvl pr_fmt("%sconsole [%s%d] " fmt),	\
3331
	       (con->flags & CON_BOOT) ? "boot" : "",	\
3332
	       con->name, con->index, ##__VA_ARGS__)
3333
3334
static void console_init_seq(struct console *newcon, bool bootcon_registered)
3482
static void console_init_seq(struct console *newcon, bool bootcon_registered)
3335
{
3483
{
3336
	struct console *con;
3484
	struct console *con;
Lines 3444-3449 void register_console(struct console *newcon) Link Here
3444
		goto unlock;
3592
		goto unlock;
3445
	}
3593
	}
3446
3594
3595
	if (newcon->flags & CON_NBCON) {
3596
		/*
3597
		 * Ensure the nbcon console buffers can be allocated
3598
		 * before modifying any global data.
3599
		 */
3600
		if (!nbcon_alloc(newcon))
3601
			goto unlock;
3602
	}
3603
3447
	/*
3604
	/*
3448
	 * See if we want to enable this console driver by default.
3605
	 * See if we want to enable this console driver by default.
3449
	 *
3606
	 *
Lines 3471-3478 void register_console(struct console *newcon) Link Here
3471
		err = try_enable_preferred_console(newcon, false);
3628
		err = try_enable_preferred_console(newcon, false);
3472
3629
3473
	/* printk() messages are not printed to the Braille console. */
3630
	/* printk() messages are not printed to the Braille console. */
3474
	if (err || newcon->flags & CON_BRL)
3631
	if (err || newcon->flags & CON_BRL) {
3632
		if (newcon->flags & CON_NBCON)
3633
			nbcon_free(newcon);
3475
		goto unlock;
3634
		goto unlock;
3635
	}
3476
3636
3477
	/*
3637
	/*
3478
	 * If we have a bootconsole, and are switching to a real console,
3638
	 * If we have a bootconsole, and are switching to a real console,
Lines 3488-3493 void register_console(struct console *newcon) Link Here
3488
	newcon->dropped = 0;
3648
	newcon->dropped = 0;
3489
	console_init_seq(newcon, bootcon_registered);
3649
	console_init_seq(newcon, bootcon_registered);
3490
3650
3651
	if (newcon->flags & CON_NBCON) {
3652
		nbcon_init(newcon);
3653
	} else {
3654
		have_legacy_console = true;
3655
		nbcon_legacy_kthread_create();
3656
	}
3657
3658
	if (newcon->flags & CON_BOOT)
3659
		have_boot_console = true;
3660
3491
	/*
3661
	/*
3492
	 * Put this console in the list - keep the
3662
	 * Put this console in the list - keep the
3493
	 * preferred driver at the head of the list.
3663
	 * preferred driver at the head of the list.
Lines 3540-3545 EXPORT_SYMBOL(register_console); Link Here
3540
/* Must be called under console_list_lock(). */
3710
/* Must be called under console_list_lock(). */
3541
static int unregister_console_locked(struct console *console)
3711
static int unregister_console_locked(struct console *console)
3542
{
3712
{
3713
	bool is_legacy_con = !(console->flags & CON_NBCON);
3714
	bool is_boot_con = (console->flags & CON_BOOT);
3715
	struct console *c;
3543
	int res;
3716
	int res;
3544
3717
3545
	lockdep_assert_console_list_lock_held();
3718
	lockdep_assert_console_list_lock_held();
Lines 3579-3589 static int unregister_console_locked(struct console *console) Link Here
3579
	 */
3752
	 */
3580
	synchronize_srcu(&console_srcu);
3753
	synchronize_srcu(&console_srcu);
3581
3754
3755
	if (console->flags & CON_NBCON)
3756
		nbcon_free(console);
3757
3582
	console_sysfs_notify();
3758
	console_sysfs_notify();
3583
3759
3584
	if (console->exit)
3760
	if (console->exit)
3585
		res = console->exit(console);
3761
		res = console->exit(console);
3586
3762
3763
	/*
3764
	 * If the current console was a boot and/or legacy console, the
3765
	 * related global flags might need to be updated.
3766
	 */
3767
	if (is_boot_con || is_legacy_con) {
3768
		bool found_boot_con = false;
3769
		bool found_legacy_con = false;
3770
3771
		for_each_console(c) {
3772
			if (c->flags & CON_BOOT)
3773
				found_boot_con = true;
3774
			if (!(c->flags & CON_NBCON))
3775
				found_legacy_con = true;
3776
		}
3777
		if (!found_boot_con)
3778
			have_boot_console = false;
3779
		if (!found_legacy_con)
3780
			have_legacy_console = false;
3781
	}
3782
3783
	/*
3784
	 * When the last boot console unregisters, start up the
3785
	 * printing threads.
3786
	 */
3787
	if (is_boot_con && !have_boot_console) {
3788
		for_each_console(c)
3789
			nbcon_kthread_create(c);
3790
	}
3791
3792
#ifdef CONFIG_PRINTK
3793
	if (!serialized_printing && nbcon_legacy_kthread) {
3794
		kthread_stop(nbcon_legacy_kthread);
3795
		nbcon_legacy_kthread = NULL;
3796
	}
3797
#endif
3798
3587
	return res;
3799
	return res;
3588
}
3800
}
3589
3801
Lines 3732-3737 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre Link Here
3732
	struct console *c;
3944
	struct console *c;
3733
	u64 last_diff = 0;
3945
	u64 last_diff = 0;
3734
	u64 printk_seq;
3946
	u64 printk_seq;
3947
	short flags;
3948
	bool locked;
3735
	int cookie;
3949
	int cookie;
3736
	u64 diff;
3950
	u64 diff;
3737
	u64 seq;
3951
	u64 seq;
Lines 3740-3772 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre Link Here
3740
3954
3741
	seq = prb_next_seq(prb);
3955
	seq = prb_next_seq(prb);
3742
3956
3743
	/* Flush the consoles so that records up to @seq are printed. */
3957
	/*
3744
	console_lock();
3958
	 * Flush the consoles so that records up to @seq are printed.
3745
	console_unlock();
3959
	 * Otherwise this function will just wait for the threaded printers
3960
	 * to print up to @seq.
3961
	 */
3962
	if (serialized_printing && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
3963
		console_lock();
3964
		console_unlock();
3965
	}
3746
3966
3747
	for (;;) {
3967
	for (;;) {
3968
		locked = false;
3748
		diff = 0;
3969
		diff = 0;
3749
3970
3750
		/*
3971
		if (serialized_printing) {
3751
		 * Hold the console_lock to guarantee safe access to
3972
			/*
3752
		 * console->seq. Releasing console_lock flushes more
3973
			 * Hold the console_lock to guarantee safe access to
3753
		 * records in case @seq is still not printed on all
3974
			 * console->seq. Releasing console_lock flushes more
3754
		 * usable consoles.
3975
			 * records in case @seq is still not printed on all
3755
		 */
3976
			 * usable consoles.
3756
		console_lock();
3977
			 */
3978
			console_lock();
3979
			locked = true;
3980
		}
3757
3981
3758
		cookie = console_srcu_read_lock();
3982
		cookie = console_srcu_read_lock();
3759
		for_each_console_srcu(c) {
3983
		for_each_console_srcu(c) {
3760
			if (con && con != c)
3984
			if (con && con != c)
3761
				continue;
3985
				continue;
3986
3987
			flags = console_srcu_read_flags(c);
3988
3762
			/*
3989
			/*
3763
			 * If consoles are not usable, it cannot be expected
3990
			 * If consoles are not usable, it cannot be expected
3764
			 * that they make forward progress, so only increment
3991
			 * that they make forward progress, so only increment
3765
			 * @diff for usable consoles.
3992
			 * @diff for usable consoles.
3766
			 */
3993
			 */
3767
			if (!console_is_usable(c))
3994
			if (!console_is_usable(c, flags, true) &&
3995
			    !console_is_usable(c, flags, false)) {
3768
				continue;
3996
				continue;
3769
			printk_seq = c->seq;
3997
			}
3998
3999
			if (flags & CON_NBCON) {
4000
				printk_seq = nbcon_seq_read(c);
4001
			} else {
4002
				WARN_ON_ONCE(!locked);
4003
				printk_seq = c->seq;
4004
			}
4005
3770
			if (printk_seq < seq)
4006
			if (printk_seq < seq)
3771
				diff += seq - printk_seq;
4007
				diff += seq - printk_seq;
3772
		}
4008
		}
Lines 3775-3796 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre Link Here
3775
		if (diff != last_diff && reset_on_progress)
4011
		if (diff != last_diff && reset_on_progress)
3776
			remaining = timeout_ms;
4012
			remaining = timeout_ms;
3777
4013
3778
		console_unlock();
4014
		if (locked)
4015
			console_unlock();
3779
4016
3780
		/* Note: @diff is 0 if there are no usable consoles. */
4017
		/* Note: @diff is 0 if there are no usable consoles. */
3781
		if (diff == 0 || remaining == 0)
4018
		if (diff == 0 || remaining == 0)
3782
			break;
4019
			break;
3783
4020
3784
		if (remaining < 0) {
4021
		msleep(1);
3785
			/* no timeout limit */
4022
3786
			msleep(100);
4023
		/* If @remaining < 0, there is no timeout limit. */
3787
		} else if (remaining < 100) {
4024
		if (remaining > 0)
3788
			msleep(remaining);
4025
			remaining--;
3789
			remaining = 0;
3790
		} else {
3791
			msleep(100);
3792
			remaining -= 100;
3793
		}
3794
4026
3795
		last_diff = diff;
4027
		last_diff = diff;
3796
	}
4028
	}
Lines 3831-3839 static void wake_up_klogd_work_func(struct irq_work *irq_work) Link Here
3831
	int pending = this_cpu_xchg(printk_pending, 0);
4063
	int pending = this_cpu_xchg(printk_pending, 0);
3832
4064
3833
	if (pending & PRINTK_PENDING_OUTPUT) {
4065
	if (pending & PRINTK_PENDING_OUTPUT) {
3834
		/* If trylock fails, someone else is doing the printing */
4066
		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
3835
		if (console_trylock())
4067
			wake_up_interruptible(&legacy_wait);
3836
			console_unlock();
4068
		} else {
4069
			/*
4070
			 * If trylock fails, some other context
4071
			 * will do the printing.
4072
			 */
4073
			if (console_trylock())
4074
				console_unlock();
4075
		}
3837
	}
4076
	}
3838
4077
3839
	if (pending & PRINTK_PENDING_WAKEUP)
4078
	if (pending & PRINTK_PENDING_WAKEUP)
Lines 3901-3911 void defer_console_output(void) Link Here
3901
	 * New messages may have been added directly to the ringbuffer
4140
	 * New messages may have been added directly to the ringbuffer
3902
	 * using vprintk_store(), so wake any waiters as well.
4141
	 * using vprintk_store(), so wake any waiters as well.
3903
	 */
4142
	 */
3904
	__wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
4143
	int val = PRINTK_PENDING_WAKEUP;
4144
4145
	if (serialized_printing)
4146
		val |= PRINTK_PENDING_OUTPUT;
4147
	__wake_up_klogd(val);
3905
}
4148
}
3906
4149
3907
void printk_trigger_flush(void)
4150
void printk_trigger_flush(void)
3908
{
4151
{
4152
	preempt_disable();
4153
	nbcon_atomic_flush_all();
4154
	preempt_enable();
4155
4156
	nbcon_wake_threads();
3909
	defer_console_output();
4157
	defer_console_output();
3910
}
4158
}
3911
4159
(-)a/kernel/printk/printk_safe.c (+12 lines)
Lines 26-31 void __printk_safe_exit(void) Link Here
26
	this_cpu_dec(printk_context);
26
	this_cpu_dec(printk_context);
27
}
27
}
28
28
29
void __printk_deferred_enter(void)
30
{
31
	cant_migrate();
32
	this_cpu_inc(printk_context);
33
}
34
35
void __printk_deferred_exit(void)
36
{
37
	cant_migrate();
38
	this_cpu_dec(printk_context);
39
}
40
29
asmlinkage int vprintk(const char *fmt, va_list args)
41
asmlinkage int vprintk(const char *fmt, va_list args)
30
{
42
{
31
#ifdef CONFIG_KGDB_KDB
43
#ifdef CONFIG_KGDB_KDB
(-)a/kernel/rcu/rcutorture.c (+6 lines)
Lines 2408-2413 static int rcutorture_booster_init(unsigned int cpu) Link Here
2408
		WARN_ON_ONCE(!t);
2408
		WARN_ON_ONCE(!t);
2409
		sp.sched_priority = 2;
2409
		sp.sched_priority = 2;
2410
		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
2410
		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
2411
#ifdef CONFIG_PREEMPT_RT
2412
		t = per_cpu(timersd, cpu);
2413
		WARN_ON_ONCE(!t);
2414
		sp.sched_priority = 2;
2415
		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
2416
#endif
2411
	}
2417
	}
2412
2418
2413
	/* Don't allow time recalculation while creating a new task. */
2419
	/* Don't allow time recalculation while creating a new task. */
(-)a/kernel/rcu/tree_stall.h (+6 lines)
Lines 8-13 Link Here
8
 */
8
 */
9
9
10
#include <linux/kvm_para.h>
10
#include <linux/kvm_para.h>
11
#include <linux/console.h>
11
12
12
//////////////////////////////////////////////////////////////////////////////
13
//////////////////////////////////////////////////////////////////////////////
13
//
14
//
Lines 582-587 static void rcu_check_gp_kthread_expired_fqs_timer(void) Link Here
582
583
583
static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
584
static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
584
{
585
{
586
	enum nbcon_prio prev_prio;
585
	int cpu;
587
	int cpu;
586
	unsigned long flags;
588
	unsigned long flags;
587
	unsigned long gpa;
589
	unsigned long gpa;
Lines 597-602 static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps) Link Here
597
	if (rcu_stall_is_suppressed())
599
	if (rcu_stall_is_suppressed())
598
		return;
600
		return;
599
601
602
	prev_prio = nbcon_atomic_enter(NBCON_PRIO_EMERGENCY);
603
600
	/*
604
	/*
601
	 * OK, time to rat on our buddy...
605
	 * OK, time to rat on our buddy...
602
	 * See Documentation/RCU/stallwarn.rst for info on how to debug
606
	 * See Documentation/RCU/stallwarn.rst for info on how to debug
Lines 651-656 static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps) Link Here
651
	panic_on_rcu_stall();
655
	panic_on_rcu_stall();
652
656
653
	rcu_force_quiescent_state();  /* Kick them all. */
657
	rcu_force_quiescent_state();  /* Kick them all. */
658
659
	nbcon_atomic_exit(NBCON_PRIO_EMERGENCY, prev_prio);
654
}
660
}
655
661
656
static void print_cpu_stall(unsigned long gps)
662
static void print_cpu_stall(unsigned long gps)
(-)a/kernel/sched/core.c (-29 / +101 lines)
Lines 898-911 static inline void hrtick_rq_init(struct rq *rq) Link Here
898
898
899
#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
899
#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
900
/*
900
/*
901
 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
901
 * Atomically set TIF_NEED_RESCHED[_LAZY] and test for TIF_POLLING_NRFLAG,
902
 * this avoids any races wrt polling state changes and thereby avoids
902
 * this avoids any races wrt polling state changes and thereby avoids
903
 * spurious IPIs.
903
 * spurious IPIs.
904
 */
904
 */
905
static inline bool set_nr_and_not_polling(struct task_struct *p)
905
static inline bool set_nr_and_not_polling(struct task_struct *p, int tif_bit)
906
{
906
{
907
	struct thread_info *ti = task_thread_info(p);
907
	struct thread_info *ti = task_thread_info(p);
908
	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
908
909
	return !(fetch_or(&ti->flags, 1 << tif_bit) & _TIF_POLLING_NRFLAG);
909
}
910
}
910
911
911
/*
912
/*
Lines 922-928 static bool set_nr_if_polling(struct task_struct *p) Link Here
922
	for (;;) {
923
	for (;;) {
923
		if (!(val & _TIF_POLLING_NRFLAG))
924
		if (!(val & _TIF_POLLING_NRFLAG))
924
			return false;
925
			return false;
925
		if (val & _TIF_NEED_RESCHED)
926
		if (val & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
926
			return true;
927
			return true;
927
		if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
928
		if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
928
			break;
929
			break;
Lines 931-939 static bool set_nr_if_polling(struct task_struct *p) Link Here
931
}
932
}
932
933
933
#else
934
#else
934
static inline bool set_nr_and_not_polling(struct task_struct *p)
935
static inline bool set_nr_and_not_polling(struct task_struct *p, int tif_bit)
935
{
936
{
936
	set_tsk_need_resched(p);
937
	set_tsk_thread_flag(p, tif_bit);
937
	return true;
938
	return true;
938
}
939
}
939
940
Lines 1038-1065 void wake_up_q(struct wake_q_head *head) Link Here
1038
 * might also involve a cross-CPU call to trigger the scheduler on
1039
 * might also involve a cross-CPU call to trigger the scheduler on
1039
 * the target CPU.
1040
 * the target CPU.
1040
 */
1041
 */
1041
void resched_curr(struct rq *rq)
1042
static void __resched_curr(struct rq *rq, int lazy)
1042
{
1043
{
1044
	int cpu, tif_bit = TIF_NEED_RESCHED + lazy;
1043
	struct task_struct *curr = rq->curr;
1045
	struct task_struct *curr = rq->curr;
1044
	int cpu;
1045
1046
1046
	lockdep_assert_rq_held(rq);
1047
	lockdep_assert_rq_held(rq);
1047
1048
1048
	if (test_tsk_need_resched(curr))
1049
	if (unlikely(test_tsk_thread_flag(curr, tif_bit)))
1049
		return;
1050
		return;
1050
1051
1051
	cpu = cpu_of(rq);
1052
	cpu = cpu_of(rq);
1052
1053
1053
	if (cpu == smp_processor_id()) {
1054
	if (cpu == smp_processor_id()) {
1054
		set_tsk_need_resched(curr);
1055
		set_tsk_thread_flag(curr, tif_bit);
1055
		set_preempt_need_resched();
1056
		if (!lazy)
1057
			set_preempt_need_resched();
1056
		return;
1058
		return;
1057
	}
1059
	}
1058
1060
1059
	if (set_nr_and_not_polling(curr))
1061
	if (set_nr_and_not_polling(curr, tif_bit)) {
1060
		smp_send_reschedule(cpu);
1062
		if (!lazy)
1061
	else
1063
			smp_send_reschedule(cpu);
1064
	} else {
1062
		trace_sched_wake_idle_without_ipi(cpu);
1065
		trace_sched_wake_idle_without_ipi(cpu);
1066
	}
1067
}
1068
1069
void resched_curr(struct rq *rq)
1070
{
1071
	__resched_curr(rq, 0);
1072
}
1073
1074
void resched_curr_lazy(struct rq *rq)
1075
{
1076
	int lazy = IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) && !sched_feat(FORCE_NEED_RESCHED) ?
1077
		TIF_NEED_RESCHED_LAZY_OFFSET : 0;
1078
1079
	if (lazy && unlikely(test_tsk_thread_flag(rq->curr, TIF_NEED_RESCHED)))
1080
		return;
1081
1082
	__resched_curr(rq, lazy);
1063
}
1083
}
1064
1084
1065
void resched_cpu(int cpu)
1085
void resched_cpu(int cpu)
Lines 1132-1138 static void wake_up_idle_cpu(int cpu) Link Here
1132
	if (cpu == smp_processor_id())
1152
	if (cpu == smp_processor_id())
1133
		return;
1153
		return;
1134
1154
1135
	if (set_nr_and_not_polling(rq->idle))
1155
	if (set_nr_and_not_polling(rq->idle, TIF_NEED_RESCHED))
1136
		smp_send_reschedule(cpu);
1156
		smp_send_reschedule(cpu);
1137
	else
1157
	else
1138
		trace_sched_wake_idle_without_ipi(cpu);
1158
		trace_sched_wake_idle_without_ipi(cpu);
Lines 6720-6729 void __noreturn do_task_dead(void) Link Here
6720
6740
6721
static inline void sched_submit_work(struct task_struct *tsk)
6741
static inline void sched_submit_work(struct task_struct *tsk)
6722
{
6742
{
6743
	static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
6723
	unsigned int task_flags;
6744
	unsigned int task_flags;
6724
6745
6725
	if (task_is_running(tsk))
6746
	/*
6726
		return;
6747
	 * Establish LD_WAIT_CONFIG context to ensure none of the code called
6748
	 * will use a blocking primitive -- which would lead to recursion.
6749
	 */
6750
	lock_map_acquire_try(&sched_map);
6727
6751
6728
	task_flags = tsk->flags;
6752
	task_flags = tsk->flags;
6729
	/*
6753
	/*
Lines 6749-6754 static inline void sched_submit_work(struct task_struct *tsk) Link Here
6749
	 * make sure to submit it to avoid deadlocks.
6773
	 * make sure to submit it to avoid deadlocks.
6750
	 */
6774
	 */
6751
	blk_flush_plug(tsk->plug, true);
6775
	blk_flush_plug(tsk->plug, true);
6776
6777
	lock_map_release(&sched_map);
6752
}
6778
}
6753
6779
6754
static void sched_update_worker(struct task_struct *tsk)
6780
static void sched_update_worker(struct task_struct *tsk)
Lines 6761-6776 static void sched_update_worker(struct task_struct *tsk) Link Here
6761
	}
6787
	}
6762
}
6788
}
6763
6789
6790
static __always_inline void __schedule_loop(unsigned int sched_mode)
6791
{
6792
	do {
6793
		preempt_disable();
6794
		__schedule(sched_mode);
6795
		sched_preempt_enable_no_resched();
6796
	} while (need_resched());
6797
}
6798
6764
asmlinkage __visible void __sched schedule(void)
6799
asmlinkage __visible void __sched schedule(void)
6765
{
6800
{
6766
	struct task_struct *tsk = current;
6801
	struct task_struct *tsk = current;
6767
6802
6768
	sched_submit_work(tsk);
6803
#ifdef CONFIG_RT_MUTEXES
6769
	do {
6804
	lockdep_assert(!tsk->sched_rt_mutex);
6770
		preempt_disable();
6805
#endif
6771
		__schedule(SM_NONE);
6806
6772
		sched_preempt_enable_no_resched();
6807
	if (!task_is_running(tsk))
6773
	} while (need_resched());
6808
		sched_submit_work(tsk);
6809
	__schedule_loop(SM_NONE);
6774
	sched_update_worker(tsk);
6810
	sched_update_worker(tsk);
6775
}
6811
}
6776
EXPORT_SYMBOL(schedule);
6812
EXPORT_SYMBOL(schedule);
Lines 6834-6844 void __sched schedule_preempt_disabled(void) Link Here
6834
#ifdef CONFIG_PREEMPT_RT
6870
#ifdef CONFIG_PREEMPT_RT
6835
void __sched notrace schedule_rtlock(void)
6871
void __sched notrace schedule_rtlock(void)
6836
{
6872
{
6837
	do {
6873
	__schedule_loop(SM_RTLOCK_WAIT);
6838
		preempt_disable();
6839
		__schedule(SM_RTLOCK_WAIT);
6840
		sched_preempt_enable_no_resched();
6841
	} while (need_resched());
6842
}
6874
}
6843
NOKPROBE_SYMBOL(schedule_rtlock);
6875
NOKPROBE_SYMBOL(schedule_rtlock);
6844
#endif
6876
#endif
Lines 7034-7039 static void __setscheduler_prio(struct task_struct *p, int prio) Link Here
7034
7066
7035
#ifdef CONFIG_RT_MUTEXES
7067
#ifdef CONFIG_RT_MUTEXES
7036
7068
7069
/*
7070
 * Would be more useful with typeof()/auto_type but they don't mix with
7071
 * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7072
 * name such that if someone were to implement this function we get to compare
7073
 * notes.
7074
 */
7075
#define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
7076
7077
void rt_mutex_pre_schedule(void)
7078
{
7079
	lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
7080
	sched_submit_work(current);
7081
}
7082
7083
void rt_mutex_schedule(void)
7084
{
7085
	lockdep_assert(current->sched_rt_mutex);
7086
	__schedule_loop(SM_NONE);
7087
}
7088
7089
void rt_mutex_post_schedule(void)
7090
{
7091
	sched_update_worker(current);
7092
	lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
7093
}
7094
7037
static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
7095
static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
7038
{
7096
{
7039
	if (pi_task)
7097
	if (pi_task)
Lines 8872-8878 static void __init preempt_dynamic_init(void) Link Here
8872
		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
8930
		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
8873
		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
8931
		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
8874
	}									 \
8932
	}									 \
8875
	EXPORT_SYMBOL_GPL(preempt_model_##mode)
8876
8933
8877
PREEMPT_MODEL_ACCESSOR(none);
8934
PREEMPT_MODEL_ACCESSOR(none);
8878
PREEMPT_MODEL_ACCESSOR(voluntary);
8935
PREEMPT_MODEL_ACCESSOR(voluntary);
Lines 8884-8889 static inline void preempt_dynamic_init(void) { } Link Here
8884
8941
8885
#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
8942
#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
8886
8943
8944
/*
8945
 * task_is_pi_boosted - Check if task has been PI boosted.
8946
 * @p:	Task to check.
8947
 *
8948
 * Return true if task is subject to priority inheritance.
8949
 */
8950
bool task_is_pi_boosted(const struct task_struct *p)
8951
{
8952
	int prio = p->prio;
8953
8954
	if (!rt_prio(prio))
8955
		return false;
8956
	return prio != p->normal_prio;
8957
}
8958
8887
/**
8959
/**
8888
 * yield - yield the current processor to other threads.
8960
 * yield - yield the current processor to other threads.
8889
 *
8961
 *
(-)a/kernel/sched/debug.c (+19 lines)
Lines 333-338 static const struct file_operations sched_debug_fops = { Link Here
333
	.release	= seq_release,
333
	.release	= seq_release,
334
};
334
};
335
335
336
static ssize_t sched_hog_write(struct file *filp, const char __user *ubuf,
337
			       size_t cnt, loff_t *ppos)
338
{
339
	unsigned long end = jiffies + 60 * HZ;
340
341
	for (; time_before(jiffies, end) && !signal_pending(current);)
342
		cpu_relax();
343
344
	return cnt;
345
}
346
347
static const struct file_operations sched_hog_fops = {
348
	.write		= sched_hog_write,
349
	.open		= simple_open,
350
	.llseek		= default_llseek,
351
};
352
336
static struct dentry *debugfs_sched;
353
static struct dentry *debugfs_sched;
337
354
338
static __init int sched_init_debug(void)
355
static __init int sched_init_debug(void)
Lines 374-379 static __init int sched_init_debug(void) Link Here
374
391
375
	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
392
	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
376
393
394
	debugfs_create_file("hog", 0200, debugfs_sched, NULL, &sched_hog_fops);
395
377
	return 0;
396
	return 0;
378
}
397
}
379
late_initcall(sched_init_debug);
398
late_initcall(sched_init_debug);
(-)a/kernel/sched/fair.c (-15 / +31 lines)
Lines 1016-1023 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se); Link Here
1016
 * XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i
1016
 * XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i
1017
 * this is probably good enough.
1017
 * this is probably good enough.
1018
 */
1018
 */
1019
static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
1019
static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se, bool tick)
1020
{
1020
{
1021
	struct rq *rq = rq_of(cfs_rq);
1022
1021
	if ((s64)(se->vruntime - se->deadline) < 0)
1023
	if ((s64)(se->vruntime - se->deadline) < 0)
1022
		return;
1024
		return;
1023
1025
Lines 1036-1045 static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) Link Here
1036
	/*
1038
	/*
1037
	 * The task has consumed its request, reschedule.
1039
	 * The task has consumed its request, reschedule.
1038
	 */
1040
	 */
1039
	if (cfs_rq->nr_running > 1) {
1041
	if (cfs_rq->nr_running < 2)
1040
		resched_curr(rq_of(cfs_rq));
1042
		return;
1041
		clear_buddies(cfs_rq, se);
1043
1044
	if (!IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) || sched_feat(FORCE_NEED_RESCHED)) {
1045
		resched_curr(rq);
1046
	} else {
1047
		/* Did the task ignore the lazy reschedule request? */
1048
		if (tick && test_tsk_thread_flag(rq->curr, TIF_NEED_RESCHED_LAZY))
1049
			resched_curr(rq);
1050
		else
1051
			resched_curr_lazy(rq);
1042
	}
1052
	}
1053
	clear_buddies(cfs_rq, se);
1043
}
1054
}
1044
1055
1045
#include "pelt.h"
1056
#include "pelt.h"
Lines 1147-1153 static void update_tg_load_avg(struct cfs_rq *cfs_rq) Link Here
1147
/*
1158
/*
1148
 * Update the current task's runtime statistics.
1159
 * Update the current task's runtime statistics.
1149
 */
1160
 */
1150
static void update_curr(struct cfs_rq *cfs_rq)
1161
static void __update_curr(struct cfs_rq *cfs_rq, bool tick)
1151
{
1162
{
1152
	struct sched_entity *curr = cfs_rq->curr;
1163
	struct sched_entity *curr = cfs_rq->curr;
1153
	u64 now = rq_clock_task(rq_of(cfs_rq));
1164
	u64 now = rq_clock_task(rq_of(cfs_rq));
Lines 1322-1328 Link Here
1322
	update_burst_penalty(curr);
1322
	update_burst_penalty(curr);
1323
#endif // CONFIG_SCHED_BORE
1323
#endif // CONFIG_SCHED_BORE
1324
	curr->vruntime += max(1ULL, calc_delta_fair(delta_exec, curr));
1324
	curr->vruntime += max(1ULL, calc_delta_fair(delta_exec, curr));
1325
	update_deadline(cfs_rq, curr);
1325
	update_deadline(cfs_rq, curr, tick);
1326
	update_min_vruntime(cfs_rq);
1326
	update_min_vruntime(cfs_rq);
1327
1327
1328
	if (entity_is_task(curr)) {
1328
	if (entity_is_task(curr)) {
Lines 1188-1193 static void update_curr(struct cfs_rq *cfs_rq) Link Here
1188
	account_cfs_rq_runtime(cfs_rq, delta_exec);
1199
	account_cfs_rq_runtime(cfs_rq, delta_exec);
1189
}
1200
}
1190
1201
1202
static inline void update_curr(struct cfs_rq *cfs_rq)
1203
{
1204
	__update_curr(cfs_rq, false);
1205
}
1206
1191
static void update_curr_fair(struct rq *rq)
1207
static void update_curr_fair(struct rq *rq)
1192
{
1208
{
1193
	update_curr(cfs_rq_of(&rq->curr->se));
1209
	update_curr(cfs_rq_of(&rq->curr->se));
Lines 5308-5314 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) Link Here
5308
	/*
5324
	/*
5309
	 * Update run-time statistics of the 'current'.
5325
	 * Update run-time statistics of the 'current'.
5310
	 */
5326
	 */
5311
	update_curr(cfs_rq);
5327
	__update_curr(cfs_rq, true);
5312
5328
5313
	/*
5329
	/*
5314
	 * Ensure that runnable average is periodically updated.
5330
	 * Ensure that runnable average is periodically updated.
Lines 5322-5328 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) Link Here
5322
	 * validating it and just reschedule.
5338
	 * validating it and just reschedule.
5323
	 */
5339
	 */
5324
	if (queued) {
5340
	if (queued) {
5325
		resched_curr(rq_of(cfs_rq));
5341
		resched_curr_lazy(rq_of(cfs_rq));
5326
		return;
5342
		return;
5327
	}
5343
	}
5328
	/*
5344
	/*
Lines 5468-5474 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) Link Here
5468
	 * hierarchy can be throttled
5484
	 * hierarchy can be throttled
5469
	 */
5485
	 */
5470
	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
5486
	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
5471
		resched_curr(rq_of(cfs_rq));
5487
		resched_curr_lazy(rq_of(cfs_rq));
5472
}
5488
}
5473
5489
5474
static __always_inline
5490
static __always_inline
Lines 5728-5734 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) Link Here
5728
5744
5729
	/* Determine whether we need to wake up potentially idle CPU: */
5745
	/* Determine whether we need to wake up potentially idle CPU: */
5730
	if (rq->curr == rq->idle && rq->cfs.nr_running)
5746
	if (rq->curr == rq->idle && rq->cfs.nr_running)
5731
		resched_curr(rq);
5747
		resched_curr_lazy(rq);
5732
}
5748
}
5733
5749
5734
#ifdef CONFIG_SMP
5750
#ifdef CONFIG_SMP
Lines 6433-6439 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) Link Here
6433
6449
6434
		if (delta < 0) {
6450
		if (delta < 0) {
6435
			if (task_current(rq, p))
6451
			if (task_current(rq, p))
6436
				resched_curr(rq);
6452
				resched_curr_lazy(rq);
6437
			return;
6453
			return;
6438
		}
6454
		}
6439
		hrtick_start(rq, delta);
6455
		hrtick_start(rq, delta);
Lines 8086-8092 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ Link Here
8086
	 * prevents us from potentially nominating it as a false LAST_BUDDY
8102
	 * prevents us from potentially nominating it as a false LAST_BUDDY
8087
	 * below.
8103
	 * below.
8088
	 */
8104
	 */
8089
	if (test_tsk_need_resched(curr))
8105
	if (need_resched())
8090
		return;
8106
		return;
8091
8107
8092
	/* Idle tasks are by definition preempted by non-idle tasks. */
8108
	/* Idle tasks are by definition preempted by non-idle tasks. */
Lines 8128-8134 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ Link Here
8128
	return;
8144
	return;
8129
8145
8130
preempt:
8146
preempt:
8131
	resched_curr(rq);
8147
	resched_curr_lazy(rq);
8132
}
8148
}
8133
8149
8134
#ifdef CONFIG_SMP
8150
#ifdef CONFIG_SMP
Lines 12279-12285 static inline void task_tick_core(struct rq *rq, struct task_struct *curr) Link Here
12279
	 */
12295
	 */
12280
	if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
12296
	if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
12281
	    __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
12297
	    __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
12282
		resched_curr(rq);
12298
		resched_curr_lazy(rq);
12283
}
12299
}
12284
12300
12285
/*
12301
/*
Lines 12444-12450 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) Link Here
12444
	 */
12460
	 */
12445
	if (task_current(rq, p)) {
12461
	if (task_current(rq, p)) {
12446
		if (p->prio > oldprio)
12462
		if (p->prio > oldprio)
12447
			resched_curr(rq);
12463
			resched_curr_lazy(rq);
12448
	} else
12464
	} else
12449
		check_preempt_curr(rq, p, 0);
12465
		check_preempt_curr(rq, p, 0);
12450
}
12466
}
(-)a/kernel/sched/features.h (+2 lines)
Lines 89-91 SCHED_FEAT(UTIL_EST_FASTUP, true) Link Here
89
SCHED_FEAT(LATENCY_WARN, false)
89
SCHED_FEAT(LATENCY_WARN, false)
90
90
91
SCHED_FEAT(HZ_BW, true)
91
SCHED_FEAT(HZ_BW, true)
92
93
SCHED_FEAT(FORCE_NEED_RESCHED, false)
(-)a/kernel/sched/idle.c (-2 / +1 lines)
Lines 57-64 static noinline int __cpuidle cpu_idle_poll(void) Link Here
57
	ct_cpuidle_enter();
57
	ct_cpuidle_enter();
58
58
59
	raw_local_irq_enable();
59
	raw_local_irq_enable();
60
	while (!tif_need_resched() &&
60
	while (!need_resched() && (cpu_idle_force_poll || tick_check_broadcast_expired()))
61
	       (cpu_idle_force_poll || tick_check_broadcast_expired()))
62
		cpu_relax();
61
		cpu_relax();
63
	raw_local_irq_disable();
62
	raw_local_irq_disable();
64
63
(-)a/kernel/sched/rt.c (-1 / +4 lines)
Lines 2247-2254 static int rto_next_cpu(struct root_domain *rd) Link Here
2247
2247
2248
		rd->rto_cpu = cpu;
2248
		rd->rto_cpu = cpu;
2249
2249
2250
		if (cpu < nr_cpu_ids)
2250
		if (cpu < nr_cpu_ids) {
2251
			if (!has_pushable_tasks(cpu_rq(cpu)))
2252
				continue;
2251
			return cpu;
2253
			return cpu;
2254
		}
2252
2255
2253
		rd->rto_cpu = -1;
2256
		rd->rto_cpu = -1;
2254
2257
(-)a/kernel/sched/sched.h (+1 lines)
Lines 2435-2440 extern void init_sched_fair_class(void); Link Here
2435
extern void reweight_task(struct task_struct *p, int prio);
2435
extern void reweight_task(struct task_struct *p, int prio);
2436
2436
2437
extern void resched_curr(struct rq *rq);
2437
extern void resched_curr(struct rq *rq);
2438
extern void resched_curr_lazy(struct rq *rq);
2438
extern void resched_cpu(int cpu);
2439
extern void resched_cpu(int cpu);
2439
2440
2440
extern struct rt_bandwidth def_rt_bandwidth;
2441
extern struct rt_bandwidth def_rt_bandwidth;
(-)a/kernel/signal.c (-5 / +25 lines)
Lines 2329-2343 static int ptrace_stop(int exit_code, int why, unsigned long message, Link Here
2329
		do_notify_parent_cldstop(current, false, why);
2329
		do_notify_parent_cldstop(current, false, why);
2330
2330
2331
	/*
2331
	/*
2332
	 * Don't want to allow preemption here, because
2332
	 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2333
	 * sys_ptrace() needs this task to be inactive.
2333
	 * One a PREEMPTION kernel this can result in preemption requirement
2334
	 * which will be fulfilled after read_unlock() and the ptracer will be
2335
	 * put on the CPU.
2336
	 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2337
	 * this task wait in schedule(). If this task gets preempted then it
2338
	 * remains enqueued on the runqueue. The ptracer will observe this and
2339
	 * then sleep for a delay of one HZ tick. In the meantime this task
2340
	 * gets scheduled, enters schedule() and will wait for the ptracer.
2334
	 *
2341
	 *
2335
	 * XXX: implement read_unlock_no_resched().
2342
	 * This preemption point is not bad from correctness point of view but
2343
	 * extends the runtime by one HZ tick time due to the ptracer's sleep.
2344
	 * The preempt-disable section ensures that there will be no preemption
2345
	 * between unlock and schedule() and so improving the performance since
2346
	 * the ptracer has no reason to sleep.
2347
	 *
2348
	 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2349
	 * Therefore the task can be preempted (after
2350
	 * do_notify_parent_cldstop()) before unlocking tasklist_lock so there
2351
	 * is no benefit in doing this. The optimisation is harmful on
2352
	 * PEEMPT_RT because the spinlock_t (in cgroup_enter_frozen()) must not
2353
	 * be acquired with disabled preemption.
2336
	 */
2354
	 */
2337
	preempt_disable();
2355
	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2356
		preempt_disable();
2338
	read_unlock(&tasklist_lock);
2357
	read_unlock(&tasklist_lock);
2339
	cgroup_enter_frozen();
2358
	cgroup_enter_frozen();
2340
	preempt_enable_no_resched();
2359
	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2360
		preempt_enable_no_resched();
2341
	schedule();
2361
	schedule();
2342
	cgroup_leave_frozen(true);
2362
	cgroup_leave_frozen(true);
2343
2363
(-)a/kernel/softirq.c (-1 / +94 lines)
Lines 247-252 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) Link Here
247
}
247
}
248
EXPORT_SYMBOL(__local_bh_enable_ip);
248
EXPORT_SYMBOL(__local_bh_enable_ip);
249
249
250
void softirq_preempt(void)
251
{
252
	if (WARN_ON_ONCE(!preemptible()))
253
		return;
254
255
	if (WARN_ON_ONCE(__this_cpu_read(softirq_ctrl.cnt) != SOFTIRQ_OFFSET))
256
		return;
257
258
	__local_bh_enable(SOFTIRQ_OFFSET, true);
259
	/* preemption point */
260
	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
261
}
262
250
/*
263
/*
251
 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
264
 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
252
 * to acquire the per CPU local lock for reentrancy protection.
265
 * to acquire the per CPU local lock for reentrancy protection.
Lines 619-624 static inline void tick_irq_exit(void) Link Here
619
#endif
632
#endif
620
}
633
}
621
634
635
#ifdef CONFIG_PREEMPT_RT
636
DEFINE_PER_CPU(struct task_struct *, timersd);
637
DEFINE_PER_CPU(unsigned long, pending_timer_softirq);
638
639
static void wake_timersd(void)
640
{
641
        struct task_struct *tsk = __this_cpu_read(timersd);
642
643
        if (tsk)
644
                wake_up_process(tsk);
645
}
646
647
#else
648
649
static inline void wake_timersd(void) { }
650
651
#endif
652
622
static inline void __irq_exit_rcu(void)
653
static inline void __irq_exit_rcu(void)
623
{
654
{
624
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
655
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
Lines 631-636 static inline void __irq_exit_rcu(void) Link Here
631
	if (!in_interrupt() && local_softirq_pending())
662
	if (!in_interrupt() && local_softirq_pending())
632
		invoke_softirq();
663
		invoke_softirq();
633
664
665
	if (IS_ENABLED(CONFIG_PREEMPT_RT) && local_pending_timers() &&
666
	    !(in_nmi() | in_hardirq()))
667
		wake_timersd();
668
634
	tick_irq_exit();
669
	tick_irq_exit();
635
}
670
}
636
671
Lines 963-974 static struct smp_hotplug_thread softirq_threads = { Link Here
963
	.thread_comm		= "ksoftirqd/%u",
998
	.thread_comm		= "ksoftirqd/%u",
964
};
999
};
965
1000
1001
#ifdef CONFIG_PREEMPT_RT
1002
static void timersd_setup(unsigned int cpu)
1003
{
1004
        sched_set_fifo_low(current);
1005
}
1006
1007
static int timersd_should_run(unsigned int cpu)
1008
{
1009
        return local_pending_timers();
1010
}
1011
1012
static void run_timersd(unsigned int cpu)
1013
{
1014
	unsigned int timer_si;
1015
1016
	ksoftirqd_run_begin();
1017
1018
	timer_si = local_pending_timers();
1019
	__this_cpu_write(pending_timer_softirq, 0);
1020
	or_softirq_pending(timer_si);
1021
1022
	__do_softirq();
1023
1024
	ksoftirqd_run_end();
1025
}
1026
1027
static void raise_ktimers_thread(unsigned int nr)
1028
{
1029
	trace_softirq_raise(nr);
1030
	__this_cpu_or(pending_timer_softirq, 1 << nr);
1031
}
1032
1033
void raise_hrtimer_softirq(void)
1034
{
1035
	raise_ktimers_thread(HRTIMER_SOFTIRQ);
1036
}
1037
1038
void raise_timer_softirq(void)
1039
{
1040
	unsigned long flags;
1041
1042
	local_irq_save(flags);
1043
	raise_ktimers_thread(TIMER_SOFTIRQ);
1044
	wake_timersd();
1045
	local_irq_restore(flags);
1046
}
1047
1048
static struct smp_hotplug_thread timer_threads = {
1049
        .store                  = &timersd,
1050
        .setup                  = timersd_setup,
1051
        .thread_should_run      = timersd_should_run,
1052
        .thread_fn              = run_timersd,
1053
        .thread_comm            = "ktimers/%u",
1054
};
1055
#endif
1056
966
static __init int spawn_ksoftirqd(void)
1057
static __init int spawn_ksoftirqd(void)
967
{
1058
{
968
	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
1059
	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
969
				  takeover_tasklets);
1060
				  takeover_tasklets);
970
	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
1061
	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
971
1062
#ifdef CONFIG_PREEMPT_RT
1063
	BUG_ON(smpboot_register_percpu_thread(&timer_threads));
1064
#endif
972
	return 0;
1065
	return 0;
973
}
1066
}
974
early_initcall(spawn_ksoftirqd);
1067
early_initcall(spawn_ksoftirqd);
(-)a/kernel/time/hrtimer.c (-2 / +2 lines)
Lines 1808-1814 void hrtimer_interrupt(struct clock_event_device *dev) Link Here
1808
	if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1808
	if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1809
		cpu_base->softirq_expires_next = KTIME_MAX;
1809
		cpu_base->softirq_expires_next = KTIME_MAX;
1810
		cpu_base->softirq_activated = 1;
1810
		cpu_base->softirq_activated = 1;
1811
		raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1811
		raise_hrtimer_softirq();
1812
	}
1812
	}
1813
1813
1814
	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1814
	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
Lines 1921-1927 void hrtimer_run_queues(void) Link Here
1921
	if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1921
	if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1922
		cpu_base->softirq_expires_next = KTIME_MAX;
1922
		cpu_base->softirq_expires_next = KTIME_MAX;
1923
		cpu_base->softirq_activated = 1;
1923
		cpu_base->softirq_activated = 1;
1924
		raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1924
		raise_hrtimer_softirq();
1925
	}
1925
	}
1926
1926
1927
	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1927
	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
(-)a/kernel/time/tick-sched.c (-1 / +1 lines)
Lines 795-801 static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) Link Here
795
795
796
static inline bool local_timer_softirq_pending(void)
796
static inline bool local_timer_softirq_pending(void)
797
{
797
{
798
	return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
798
	return local_pending_timers() & BIT(TIMER_SOFTIRQ);
799
}
799
}
800
800
801
static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
801
static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
(-)a/kernel/time/timer.c (-2 / +9 lines)
Lines 1470-1478 static inline void timer_base_unlock_expiry(struct timer_base *base) Link Here
1470
 */
1470
 */
1471
static void timer_sync_wait_running(struct timer_base *base)
1471
static void timer_sync_wait_running(struct timer_base *base)
1472
{
1472
{
1473
	if (atomic_read(&base->timer_waiters)) {
1473
	bool need_preempt;
1474
1475
	need_preempt = task_is_pi_boosted(current);
1476
	if (need_preempt || atomic_read(&base->timer_waiters)) {
1474
		raw_spin_unlock_irq(&base->lock);
1477
		raw_spin_unlock_irq(&base->lock);
1475
		spin_unlock(&base->expiry_lock);
1478
		spin_unlock(&base->expiry_lock);
1479
1480
		if (need_preempt)
1481
			softirq_preempt();
1482
1476
		spin_lock(&base->expiry_lock);
1483
		spin_lock(&base->expiry_lock);
1477
		raw_spin_lock_irq(&base->lock);
1484
		raw_spin_lock_irq(&base->lock);
1478
	}
1485
	}
Lines 2054-2060 static void run_local_timers(void) Link Here
2054
		if (time_before(jiffies, base->next_expiry))
2061
		if (time_before(jiffies, base->next_expiry))
2055
			return;
2062
			return;
2056
	}
2063
	}
2057
	raise_softirq(TIMER_SOFTIRQ);
2064
	raise_timer_softirq();
2058
}
2065
}
2059
2066
2060
/*
2067
/*
(-)a/kernel/trace/trace.c (+2 lines)
Lines 2722-2727 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status) Link Here
2722
2722
2723
	if (tif_need_resched())
2723
	if (tif_need_resched())
2724
		trace_flags |= TRACE_FLAG_NEED_RESCHED;
2724
		trace_flags |= TRACE_FLAG_NEED_RESCHED;
2725
	if (tif_need_resched_lazy())
2726
		trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
2725
	if (test_preempt_need_resched())
2727
	if (test_preempt_need_resched())
2726
		trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2728
		trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2727
	return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2729
	return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
(-)a/kernel/trace/trace_output.c (-2 / +14 lines)
Lines 460-476 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) Link Here
460
		(entry->flags & TRACE_FLAG_IRQS_OFF && bh_off) ? 'D' :
460
		(entry->flags & TRACE_FLAG_IRQS_OFF && bh_off) ? 'D' :
461
		(entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
461
		(entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
462
		bh_off ? 'b' :
462
		bh_off ? 'b' :
463
		(entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
463
		!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_SUPPORT) ? 'X' :
464
		'.';
464
		'.';
465
465
466
	switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
466
	switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY |
467
				TRACE_FLAG_PREEMPT_RESCHED)) {
467
				TRACE_FLAG_PREEMPT_RESCHED)) {
468
	case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
469
		need_resched = 'B';
470
		break;
468
	case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
471
	case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
469
		need_resched = 'N';
472
		need_resched = 'N';
470
		break;
473
		break;
474
	case TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
475
		need_resched = 'L';
476
		break;
477
	case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY:
478
		need_resched = 'b';
479
		break;
471
	case TRACE_FLAG_NEED_RESCHED:
480
	case TRACE_FLAG_NEED_RESCHED:
472
		need_resched = 'n';
481
		need_resched = 'n';
473
		break;
482
		break;
483
	case TRACE_FLAG_NEED_RESCHED_LAZY:
484
		need_resched = 'l';
485
		break;
474
	case TRACE_FLAG_PREEMPT_RESCHED:
486
	case TRACE_FLAG_PREEMPT_RESCHED:
475
		need_resched = 'p';
487
		need_resched = 'p';
476
		break;
488
		break;
(-)a/localversion-rt (+1 lines)
Line 0 Link Here
1
-rt10
(-)a/net/core/dev.c (-9 / +30 lines)
Lines 4677-4691 static void rps_trigger_softirq(void *data) Link Here
4677
4677
4678
#endif /* CONFIG_RPS */
4678
#endif /* CONFIG_RPS */
4679
4679
4680
/* Called from hardirq (IPI) context */
4681
static void trigger_rx_softirq(void *data)
4682
{
4683
	struct softnet_data *sd = data;
4684
4685
	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4686
	smp_store_release(&sd->defer_ipi_scheduled, 0);
4687
}
4688
4689
/*
4680
/*
4690
 * After we queued a packet into sd->input_pkt_queue,
4681
 * After we queued a packet into sd->input_pkt_queue,
4691
 * we need to make sure this queue is serviced soon.
4682
 * we need to make sure this queue is serviced soon.
Lines 6654-6659 static void skb_defer_free_flush(struct softnet_data *sd) Link Here
6654
	}
6645
	}
6655
}
6646
}
6656
6647
6648
#ifndef CONFIG_PREEMPT_RT
6649
6650
/* Called from hardirq (IPI) context */
6651
static void trigger_rx_softirq(void *data)
6652
{
6653
	struct softnet_data *sd = data;
6654
6655
	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
6656
	smp_store_release(&sd->defer_ipi_scheduled, 0);
6657
}
6658
6659
#else
6660
6661
static void trigger_rx_softirq(struct work_struct *defer_work)
6662
{
6663
	struct softnet_data *sd;
6664
6665
	sd = container_of(defer_work, struct softnet_data, defer_work);
6666
	smp_store_release(&sd->defer_ipi_scheduled, 0);
6667
	local_bh_disable();
6668
	skb_defer_free_flush(sd);
6669
	local_bh_enable();
6670
}
6671
6672
#endif
6673
6657
static int napi_threaded_poll(void *data)
6674
static int napi_threaded_poll(void *data)
6658
{
6675
{
6659
	struct napi_struct *napi = data;
6676
	struct napi_struct *napi = data;
Lines 11513-11519 static int __init net_dev_init(void) Link Here
11513
		INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
11530
		INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
11514
		sd->cpu = i;
11531
		sd->cpu = i;
11515
#endif
11532
#endif
11533
#ifndef CONFIG_PREEMPT_RT
11516
		INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
11534
		INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
11535
#else
11536
		INIT_WORK(&sd->defer_work, trigger_rx_softirq);
11537
#endif
11517
		spin_lock_init(&sd->defer_lock);
11538
		spin_lock_init(&sd->defer_lock);
11518
11539
11519
		init_gro_hash(&sd->backlog);
11540
		init_gro_hash(&sd->backlog);
(-)a/net/core/skbuff.c (-1 / +6 lines)
Lines 6840-6847 nodefer: __kfree_skb(skb); Link Here
6840
	/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
6840
	/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
6841
	 * if we are unlucky enough (this seems very unlikely).
6841
	 * if we are unlucky enough (this seems very unlikely).
6842
	 */
6842
	 */
6843
	if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
6843
	if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
6844
#ifndef CONFIG_PREEMPT_RT
6844
		smp_call_function_single_async(cpu, &sd->defer_csd);
6845
		smp_call_function_single_async(cpu, &sd->defer_csd);
6846
#else
6847
		schedule_work_on(cpu, &sd->defer_work);
6848
#endif
6849
	}
6845
}
6850
}
6846
6851
6847
static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
6852
static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,

Return to bug 916954