Hi,
On Mon, 2005-04-04 at 13:28, Nathan Lynch wrote:
> On Mon, Apr 04, 2005 at 10:07:02AM +0800, Li Shaohua wrote:
> > Clean up all CPU states including its runqueue and idle thread, 
> > so we can use boot time code without any changes.
> > Note this makes /sys/devices/system/cpu/cpux/online unworkable.
> 
> In what sense does it make the online attribute unworkable?
I removed the idle thread and other CPU states, and makes the dead CPU
into a 'halt' busy loop. 

> 
> > diff -puN kernel/exit.c~cpu_state_clean kernel/exit.c
> > --- linux-2.6.11/kernel/exit.c~cpu_state_clean      2005-03-31 
> > 10:50:27.000000000 +0800
> > +++ linux-2.6.11-root/kernel/exit.c 2005-03-31 10:50:27.000000000 +0800
> > @@ -845,6 +845,65 @@ fastcall NORET_TYPE void do_exit(long co
> >     for (;;) ;
> >  }
> >  
> > +#ifdef CONFIG_STR_SMP
> > +void do_exit_idle(void)
> > +{
> > +   struct task_struct *tsk = current;
> > +   int group_dead;
> > +
> > +   BUG_ON(tsk->pid);
> > +   BUG_ON(tsk->mm);
> > +
> > +   if (tsk->io_context)
> > +           exit_io_context();
> > +   tsk->flags |= PF_EXITING;
> > +   tsk->it_virt_expires = cputime_zero;
> > +   tsk->it_prof_expires = cputime_zero;
> > +   tsk->it_sched_expires = 0;
> > +
> > +   acct_update_integrals(tsk);
> > +   update_mem_hiwater(tsk);
> > +   group_dead = atomic_dec_and_test(&tsk->signal->live);
> > +   if (group_dead) {
> > +           del_timer_sync(&tsk->signal->real_timer);
> > +           acct_process(-1);
> > +   }
> > +   exit_mm(tsk);
> > +
> > +   exit_sem(tsk);
> > +   __exit_files(tsk);
> > +   __exit_fs(tsk);
> > +   exit_namespace(tsk);
> > +   exit_thread();
> > +   exit_keys(tsk);
> > +
> > +   if (group_dead && tsk->signal->leader)
> > +           disassociate_ctty(1);
> > +
> > +   module_put(tsk->thread_info->exec_domain->module);
> > +   if (tsk->binfmt)
> > +           module_put(tsk->binfmt->module);
> > +
> > +   tsk->exit_code = -1;
> > +   tsk->exit_state = EXIT_DEAD;
> > +
> > +   /* in release_task */
> > +   atomic_dec(&tsk->user->processes);
> > +   write_lock_irq(&tasklist_lock);
> > +   __exit_signal(tsk);
> > +   __exit_sighand(tsk);
> > +   write_unlock_irq(&tasklist_lock);
> > +   release_thread(tsk);
> > +   put_task_struct(tsk);
> > +
> > +   tsk->flags |= PF_DEAD;
> > +#ifdef CONFIG_NUMA
> > +   mpol_free(tsk->mempolicy);
> > +   tsk->mempolicy = NULL;
> > +#endif
> > +}
> > +#endif
> 
> I don't understand why this is needed at all.  It looks like a fair
> amount of code from do_exit is being duplicated here.  
Yes, exactly. Someone who understand do_exit please help clean up the
code. I'd like to remove the idle thread, since the smpboot code will
create a new idle thread.

> We've been
> doing cpu removal on ppc64 logical partitions for a while and never
> needed to do anything like this. 
Did it remove idle thread? or dead cpu is in a busy loop of idle?

>  Maybe idle_task_exit would suffice?
idle_task_exit seems just drop mm. We need destroy the idle task for
physical CPU hotplug, right?

> 
> 
> > diff -puN kernel/sched.c~cpu_state_clean kernel/sched.c
> > --- linux-2.6.11/kernel/sched.c~cpu_state_clean     2005-03-31 
> > 10:50:27.000000000 +0800
> > +++ linux-2.6.11-root/kernel/sched.c        2005-04-04 09:06:40.362357104 
> > +0800
> > @@ -4028,6 +4028,58 @@ void __devinit init_idle(task_t *idle, i
> >  }
> >  
> >  /*
> > + * Initial dummy domain for early boot and for hotplug cpu. Being static,
> > + * it is initialized to zero, so all balancing flags are cleared which is
> > + * what we want.
> > + */
> > +static struct sched_domain sched_domain_dummy;
> > +
> > +#ifdef CONFIG_STR_SMP
> > +static void __devinit exit_idle(int cpu)
> > +{
> > +   runqueue_t *rq = cpu_rq(cpu);
> > +   struct task_struct *p = rq->idle;
> > +   int j, k;
> > +   prio_array_t *array;
> > +
> > +   /* init runqueue */
> > +   spin_lock_init(&rq->lock);
> > +   rq->active = rq->arrays;
> > +   rq->expired = rq->arrays + 1;
> > +   rq->best_expired_prio = MAX_PRIO;
> > +
> > +   rq->prev_mm = NULL;
> > +   rq->curr = rq->idle = NULL;
> > +   rq->expired_timestamp = 0;
> > +
> > +   rq->sd = &sched_domain_dummy;
> > +   rq->cpu_load = 0;
> > +   rq->active_balance = 0;
> > +   rq->push_cpu = 0;
> > +   rq->migration_thread = NULL;
> > +   INIT_LIST_HEAD(&rq->migration_queue);
> > +   atomic_set(&rq->nr_iowait, 0);
> > +
> > +   for (j = 0; j < 2; j++) {
> > +           array = rq->arrays + j;
> > +           for (k = 0; k < MAX_PRIO; k++) {
> > +                   INIT_LIST_HEAD(array->queue + k);
> > +                   __clear_bit(k, array->bitmap);
> > +           }
> > +           // delimiter for bitsearch
> > +           __set_bit(MAX_PRIO, array->bitmap);
> > +   }
> > +   /* Destroy IDLE thread.
> > +    * it's safe now, the CPU is in busy loop
> > +    */
> > +   if (p->active_mm)
> > +           mmdrop(p->active_mm);
> > +   p->active_mm = NULL;
> > +   put_task_struct(p);
> > +}
> > +#endif
> > +
> > +/*
> >   * In a system that switches off the HZ timer nohz_cpu_mask
> >   * indicates which cpus entered this state. This is used
> >   * in the rcu update to wait only for active cpus. For system
> > @@ -4432,6 +4484,9 @@ static int migration_call(struct notifie
> >                     complete(&req->done);
> >             }
> >             spin_unlock_irq(&rq->lock);
> > +#ifdef CONFIG_STR_SMP
> > +           exit_idle(cpu);
> > +#endif
> 
> I don't understand the need for this, either.  The existing cpu
> hotplug notifier in the scheduler takes care of initializing the sched
> domains and groups appropriately for online/offline events; why do you
> need to touch the runqueue structures?
If a CPU is physically hotremoved from the system, shouldn't we clean
its runqueue?

Thanks,
Shaohua

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to