@@ -674,115 +674,3 @@ void __init init_IRQ(void)
674674 /* Initialize EPOLL Loop */
675675 os_setup_epoll ();
676676}
677-
678- /*
679- * IRQ stack entry and exit:
680- *
681- * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
682- * and switch over to the IRQ stack after some preparation. We use
683- * sigaltstack to receive signals on a separate stack from the start.
684- * These two functions make sure the rest of the kernel won't be too
685- * upset by being on a different stack. The IRQ stack has a
686- * thread_info structure at the bottom so that current et al continue
687- * to work.
688- *
689- * to_irq_stack copies the current task's thread_info to the IRQ stack
690- * thread_info and sets the tasks's stack to point to the IRQ stack.
691- *
692- * from_irq_stack copies the thread_info struct back (flags may have
693- * been modified) and resets the task's stack pointer.
694- *
695- * Tricky bits -
696- *
697- * What happens when two signals race each other? UML doesn't block
698- * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
699- * could arrive while a previous one is still setting up the
700- * thread_info.
701- *
702- * There are three cases -
703- * The first interrupt on the stack - sets up the thread_info and
704- * handles the interrupt
705- * A nested interrupt interrupting the copying of the thread_info -
706- * can't handle the interrupt, as the stack is in an unknown state
707- * A nested interrupt not interrupting the copying of the
708- * thread_info - doesn't do any setup, just handles the interrupt
709- *
710- * The first job is to figure out whether we interrupted stack setup.
711- * This is done by xchging the signal mask with thread_info->pending.
712- * If the value that comes back is zero, then there is no setup in
713- * progress, and the interrupt can be handled. If the value is
714- * non-zero, then there is stack setup in progress. In order to have
715- * the interrupt handled, we leave our signal in the mask, and it will
716- * be handled by the upper handler after it has set up the stack.
717- *
718- * Next is to figure out whether we are the outer handler or a nested
719- * one. As part of setting up the stack, thread_info->real_thread is
720- * set to non-NULL (and is reset to NULL on exit). This is the
721- * nesting indicator. If it is non-NULL, then the stack is already
722- * set up and the handler can run.
723- */
724-
725- static unsigned long pending_mask ;
726-
727- unsigned long to_irq_stack (unsigned long * mask_out )
728- {
729- struct thread_info * ti ;
730- unsigned long mask , old ;
731- int nested ;
732-
733- mask = xchg (& pending_mask , * mask_out );
734- if (mask != 0 ) {
735- /*
736- * If any interrupts come in at this point, we want to
737- * make sure that their bits aren't lost by our
738- * putting our bit in. So, this loop accumulates bits
739- * until xchg returns the same value that we put in.
740- * When that happens, there were no new interrupts,
741- * and pending_mask contains a bit for each interrupt
742- * that came in.
743- */
744- old = * mask_out ;
745- do {
746- old |= mask ;
747- mask = xchg (& pending_mask , old );
748- } while (mask != old );
749- return 1 ;
750- }
751-
752- ti = current_thread_info ();
753- nested = (ti -> real_thread != NULL );
754- if (!nested ) {
755- struct task_struct * task ;
756- struct thread_info * tti ;
757-
758- task = cpu_tasks [ti -> cpu ].task ;
759- tti = task_thread_info (task );
760-
761- * ti = * tti ;
762- ti -> real_thread = tti ;
763- task -> stack = ti ;
764- }
765-
766- mask = xchg (& pending_mask , 0 );
767- * mask_out |= mask | nested ;
768- return 0 ;
769- }
770-
771- unsigned long from_irq_stack (int nested )
772- {
773- struct thread_info * ti , * to ;
774- unsigned long mask ;
775-
776- ti = current_thread_info ();
777-
778- pending_mask = 1 ;
779-
780- to = ti -> real_thread ;
781- current -> stack = to ;
782- ti -> real_thread = NULL ;
783- * to = * ti ;
784-
785- mask = xchg (& pending_mask , 0 );
786- return mask & ~1 ;
787- }
788-
0 commit comments