/*
* linux/arch/i386/kernel/irq.c
*
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
*
* This file contains the code used by various IRQ handling routines:
* asking for different IRQ's should be done through these routines
* instead of just grabbing them. Thus setups with different IRQ numbers
* shouldn't result in any weird surprises, and installing new handlers
* should be easier.
*/
/*
* (mostly architecture independent, will move to kernel/irq.c in 2.5.)
*
* IRQs are in fact implemented a bit like signal handlers for the kernel.
* Naturally it's not a 1:1 relation, but there are similarities.
*/
#include <linux/config.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/malloc.h>
#include <linux/random.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/irq.h>
#include <linux/proc_fs.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/system.h>
#include <asm/bitops.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/delay.h>
#include <asm/desc.h>
#include <asm/irq.h>
/*
* Linux has a controller-independent x86 interrupt architecture.
* every controller has a 'controller-template', that is used
* by the main code to do the right thing. Each driver-visible
* interrupt source is transparently wired to the apropriate
* controller. Thus drivers need not be aware of the
* interrupt-controller.
*
* Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
* PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
* (IO-APICs assumed to be messaging to Pentium local-APICs)
*
* the code is designed to be easily extended with new/different
* interrupt controllers, without having to do assembly magic.
*/
/*
* Controller mappings for all interrupt sources:
*/
irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
{ [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
static void register_irq_proc (unsigned int irq);
/*
* Special irq handlers.
*/
void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
/*
* Generic no controller code
*/
static void enable_none(unsigned int irq) { }
static unsigned int startup_none(unsigned int irq) { return 0; }
static void disable_none(unsigned int irq) { }
static void ack_none(unsigned int irq)
{
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves, it doesnt deserve
* a generic callback i think.
*/
#if CONFIG_X86
printk("unexpected IRQ trap at vector %02x\n", irq);
#ifdef CONFIG_X86_LOCAL_APIC
/*
* Currently unexpected vectors happen only on SMP and APIC.
* We _must_ ack these because every local APIC has only N
* irq slots per priority level, and a 'hanging, unacked' IRQ
* holds up an irq slot - in excessive cases (when multiple
* unexpected vectors occur) that might lock up the APIC
* completely.
*/
ack_APIC_irq();
#endif
#endif
}
/* startup is the same as "enable", shutdown is same as "disable" */
#define shutdown_none disable_none
#define end_none enable_none
struct hw_interrupt_type no_irq_type = {
"none",
startup_none,
shutdown_none,
enable_none,
disable_none,
ack_none,
end_none
};
volatile unsigned long irq_err_count;
/*
* Generic, controller-independent functions:
*/
int get_irq_list(char *buf)
{
int i, j;
struct irqaction * action;
char *p = buf;
p += sprintf(p, " ");
for (j=0; j<smp_num_cpus; j++)
p += sprintf(p, "CPU%d ",j);
*p++ = '\n';
for (i = 0 ; i < NR_IRQS ; i++) {
action = irq_desc[i].action;
if (!action)
continue;
p += sprintf(p, "%3d: ",i);
#ifndef CONFIG_SMP
p += sprintf(p, "%10u ", kstat_irqs(i));
#else
for (j = 0; j < smp_num_cpus; j++)
p += sprintf(p, "%10u ",
kstat.irqs[cpu_logical_map(j)][i]);
#endif
p += sprintf(p, " %14s", irq_desc[i].handler->typename);
p += sprintf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
p += sprintf(p, ", %s", action->name);
*p++ = '\n';
}
p += sprintf(p, "NMI: ");
for (j = 0; j < smp_num_cpus; j++)
p += sprintf(p, "%10u ",
nmi_count(cpu_logical_map(j)));
p += sprintf(p, "\n");
#if CONFIG_SMP
p += sprintf(p, "LOC: ");
for (j = 0; j < smp_num_cpus; j++)
p += sprintf(p, "%10u ",
apic_timer_irqs[cpu_logical_map(j)]);
p += sprintf(p, "\n");
#endif
p += sprintf(p, "ERR: %10lu\n", irq_err_count);
return p - buf;
}
/*
* Global interrupt locks for SMP. Allow interrupts to come in on any
* CPU, yet make cli/sti act globally to protect critical regions..
*/
#ifdef CONFIG_SMP
unsigned char global_irq_holder = NO_PROC_ID;
unsigned volatile long global_irq_lock; /* pendantic: long for set_bit --RR */
extern void show_stack(unsigned long* esp);
static void show(char * str)
{
int i;
int cpu = smp_processor_id();
printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: %d [",irqs_running());
for(i=0;i < smp_num_cpus;i++)
printk(" %d",local_irq_count(i));
printk(" ]\nbh: %d [",spin_is_locked(&global_bh_lock) ? 1 : 0);
for(i=0;i < smp_num_cpus;i++)
printk(" %d",local_bh_count(i));
printk(" ]\nStack dumps:");
for(i = 0; i < smp_num_cpus; i++) {
unsigned long esp;
if (i == cpu)
continue;
printk("\nCPU %d:",i);
esp = init_tss[i].esp0;
if (!esp) {
/* tss->esp0 is set to NULL in cpu_init(),
* it's initialized when the cpu returns to user
* space. -- manfreds
*/
printk(" <unknown> ");
continue;
}
esp &= ~(THREAD_SIZE-1);
esp += sizeof(struct task_struct);
show_stack((void*)esp);
}
printk("\nCPU %d:",cpu);
show_stack(NULL);
printk("\n");
}
#define MAXCOUNT 100000000
/*
* I had a lockup scenario where a tight loop doing
* spin_unlock()/spin_lock() on CPU#1 was racing with
* spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
* apparently the spin_unlock() information did not make it
* through to CPU#0 ... nasty, is this by design, do we have to limit
* 'memory update oscillation frequency' artificially like here?
*
* Such 'high frequency update' races can be avoided by careful design, but
* some of our major constructs like spinlocks use similar techniques,
* it would be nice to clarify this issue. Set this define to 0 if you
* want to check whether your system freezes. I suspect the delay done
* by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
* i thought that such things are guaranteed by design, since we use
* the 'LOCK' prefix.
*/
#define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0
#if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
# define SYNC_OTHER_CORES(x) udelay(x+1)
#else
/*
* We have to allow irqs to arrive between __sti and __cli
*/
# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
#endif
static inline void wait_on_irq(int cpu)
{
int count = MAXCOUNT;
for (;;) {
/*
* Wait until all interrupts are gone. Wait
* for bottom half handlers unless we're
* already executing in one..
*/
if (!irqs_running())
if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
break;
/* Duh, we have to loop. Release the lock to avoid deadlocks */
clear_bit(0,&global_irq_lock);
for (;;) {
if (!--count) {
show("wait_on_irq");
count = ~0;
}
__sti();
SYNC_OTHER_CORES(cpu);
__cli();
if (irqs_running())
continue;
if (global_irq_lock)
continue;
if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
continue;
if (!test_and_set_bit(0,&global_irq_lock))
break;
}
}
}
/*
* This is called