Diff for /src/sys/kern/lwkt_ipiq.c between versions 1.1 and 1.2

version 1.1, 2004/02/15 02:14:41 version 1.2, 2004/02/15 05:15:25
Line 306  lwkt_process_ipiq1(lwkt_ipiq_t ip, struc Line 306  lwkt_process_ipiq1(lwkt_ipiq_t ip, struc
   
 /*  /*
  * CPU Synchronization Support   * CPU Synchronization Support
    *
    * lwkt_cpusync_simple()
    *
    *      The function is executed synchronously before return on remote cpus.
    *      A lwkt_cpusync_t pointer is passed as an argument.  The data can
    *      be accessed via arg->cs_data.
    *
    *      XXX should I just pass the data as an argument to be consistent?
  */   */
   
 void  void
lwkt_cpusync_simple(cpumask_t mask, cpusync_func2_t func, void *data)lwkt_cpusync_simple(cpumask_t mask, cpusync_func_t func, void *data)
 {
     struct lwkt_cpusync cmd;
 
     cmd.cs_run_func = NULL;
     cmd.cs_fin1_func = func;
     cmd.cs_fin2_func = NULL;
     cmd.cs_data = data;
     lwkt_cpusync_start(mask & mycpu->gd_other_cpus, &cmd);
     if (mask & (1 << mycpu->gd_cpuid))
         func(&cmd);
     lwkt_cpusync_finish(&cmd);
 }
 
 /*
  * lwkt_cpusync_fastdata()
  *
  *      The function is executed in tandem with return on remote cpus.
  *      The data is directly passed as an argument.  Do not pass pointers to
  *      temporary storage as the storage might have
  *      gone poof by the time the target cpu executes
  *      the function.
  *
  *      At the moment lwkt_cpusync is declared on the stack and we must wait
  *      for all remote cpus to ack in lwkt_cpusync_finish(), but as a future
  *      optimization we should be able to put a counter in the globaldata
  *      structure (if it is not otherwise being used) and just poke it and
  *      return without waiting. XXX
  */
 void
 lwkt_cpusync_fastdata(cpumask_t mask, cpusync_func2_t func, void *data)
 {  {
     struct lwkt_cpusync cmd;      struct lwkt_cpusync cmd;
     int count;  
   
     cmd.cs_run_func = NULL;      cmd.cs_run_func = NULL;
     cmd.cs_fin1_func = NULL;      cmd.cs_fin1_func = NULL;
     cmd.cs_fin2_func = func;      cmd.cs_fin2_func = func;
    cmd.cs_data = data;    cmd.cs_data = NULL;
    count = lwkt_cpusync_start(mask & mycpu->gd_other_cpus, &cmd);    lwkt_cpusync_start(mask & mycpu->gd_other_cpus, &cmd);
     if (mask & (1 << mycpu->gd_cpuid))      if (mask & (1 << mycpu->gd_cpuid))
         func(data);          func(data);
    lwkt_cpusync_finish(&cmd, count);    lwkt_cpusync_finish(&cmd);
 }  }
   
 /*  /*
 * Start synchronization with a set of target cpus, return once they are * lwkt_cpusync_start()
 * known to be in a synchronization loop.  The target cpus will execute *
 * poll->cs_run_func() IN TANDEM WITH THE RETURN. *        Start synchronization with a set of target cpus, return once they are
  *      known to be in a synchronization loop.  The target cpus will execute
  *      poll->cs_run_func() IN TANDEM WITH THE RETURN.
  *
  *      XXX future: add lwkt_cpusync_start_quick() and require a call to
  *      lwkt_cpusync_add() or lwkt_cpusync_wait(), allowing the caller to
  *      potentially absorb the IPI latency doing something useful.
  */   */
intvoid
 lwkt_cpusync_start(cpumask_t mask, lwkt_cpusync_t poll)  lwkt_cpusync_start(cpumask_t mask, lwkt_cpusync_t poll)
 {  {
     int count;  
   
     poll->cs_count = 0;      poll->cs_count = 0;
    count = lwkt_send_ipiq_mask(mask, (ipifunc_t)lwkt_cpusync_remote1, poll);    poll->cs_mask = mask;
    while (poll->cs_count != count) {    poll->cs_maxcount = lwkt_send_ipiq_mask(mask & mycpu->gd_other_cpus,
                                 (ipifunc_t)lwkt_cpusync_remote1, poll);
     if (mask & (1 << mycpu->gd_cpuid)) {
         if (poll->cs_run_func)
             poll->cs_run_func(poll);
     }
     while (poll->cs_count != poll->cs_maxcount) {
         crit_enter();
         lwkt_process_ipiq();
         crit_exit();
     }
 }
 
 void
 lwkt_cpusync_add(cpumask_t mask, lwkt_cpusync_t poll)
 {
     mask &= ~poll->cs_mask;
     poll->cs_mask |= mask;
     poll->cs_maxcount += lwkt_send_ipiq_mask(mask & mycpu->gd_other_cpus,
                                 (ipifunc_t)lwkt_cpusync_remote1, poll);
     if (mask & (1 << mycpu->gd_cpuid)) {
         if (poll->cs_run_func)
             poll->cs_run_func(poll);
     }
     while (poll->cs_count != poll->cs_maxcount) {
         crit_enter();          crit_enter();
         lwkt_process_ipiq();          lwkt_process_ipiq();
         crit_exit();          crit_exit();
     }      }
     return(count);  
 }  }
   
 /*  /*
Line 350  lwkt_cpusync_start(cpumask_t mask, lwkt_ Line 414  lwkt_cpusync_start(cpumask_t mask, lwkt_
  * execute cs_fin2_func(data) IN TANDEM WITH THIS FUNCTION'S RETURN.   * execute cs_fin2_func(data) IN TANDEM WITH THIS FUNCTION'S RETURN.
  */   */
 void  void
lwkt_cpusync_finish(lwkt_cpusync_t poll, int count)lwkt_cpusync_finish(lwkt_cpusync_t poll)
 {  {
    count = -(count + 1);    int count;
 
     count = -(poll->cs_maxcount + 1);
     poll->cs_count = -1;      poll->cs_count = -1;
       if (poll->cs_mask & (1 << mycpu->gd_cpuid)) {
           if (poll->cs_fin1_func)
               poll->cs_fin1_func(poll);
           if (poll->cs_fin2_func)
               poll->cs_fin2_func(poll->cs_data);
       }
     while (poll->cs_count != count) {      while (poll->cs_count != count) {
         crit_enter();          crit_enter();
         lwkt_process_ipiq();          lwkt_process_ipiq();

Removed from v.1.1  
changed lines
  Added in v.1.2