Diff for /src/sys/vm/vm_object.c between versions 1.15 and 1.16

version 1.15, 2004/05/10 11:05:13 version 1.16, 2004/05/13 17:40:19
Line 503  vm_object_terminate(vm_object_t object) Line 503  vm_object_terminate(vm_object_t object)
  *      synchronous clustering mode implementation.   *      synchronous clustering mode implementation.
  *   *
  *      Odd semantics: if start == end, we clean everything.   *      Odd semantics: if start == end, we clean everything.
  *  
  *      The object must be locked.  
  */   */
   
 void  void
Line 519  vm_object_page_clean(vm_object_t object, Line 517  vm_object_page_clean(vm_object_t object,
         int pagerflags;          int pagerflags;
         int curgeneration;          int curgeneration;
         lwkt_tokref vlock;          lwkt_tokref vlock;
           int s;
   
         if (object->type != OBJT_VNODE ||          if (object->type != OBJT_VNODE ||
                 (object->flags & OBJ_MIGHTBEDIRTY) == 0)                  (object->flags & OBJ_MIGHTBEDIRTY) == 0)
Line 560  vm_object_page_clean(vm_object_t object, Line 559  vm_object_page_clean(vm_object_t object,
   
                 scanlimit = scanreset;                  scanlimit = scanreset;
                 tscan = tstart;                  tscan = tstart;
   
                   /*
                    * spl protection is required despite the obj generation
                    * tracking because we cannot safely call vm_page_test_dirty()
                    * or avoid page field tests against an interrupt unbusy/free
                    * race that might occur prior to the busy check in
                    * vm_object_page_collect_flush().
                    */
                   s = splvm();
                 while (tscan < tend) {                  while (tscan < tend) {
                         curgeneration = object->generation;                          curgeneration = object->generation;
                         p = vm_page_lookup(object, tscan);                          p = vm_page_lookup(object, tscan);
Line 593  vm_object_page_clean(vm_object_t object, Line 601  vm_object_page_clean(vm_object_t object,
                          * This returns 0 if it was unable to busy the first                           * This returns 0 if it was unable to busy the first
                          * page (i.e. had to sleep).                           * page (i.e. had to sleep).
                          */                           */
                        tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags);                        tscan += vm_object_page_collect_flush(object, p, 
                                                 curgeneration, pagerflags);
                 }                  }
                   splx(s);
   
                 /*                  /*
                  * If everything was dirty and we flushed it successfully,                   * If everything was dirty and we flushed it successfully,
Line 616  vm_object_page_clean(vm_object_t object, Line 626  vm_object_page_clean(vm_object_t object,
          * However, if this is a nosync mmap then the object is likely to            * However, if this is a nosync mmap then the object is likely to 
          * stay dirty so do not mess with the page and do not clear the           * stay dirty so do not mess with the page and do not clear the
          * object flags.           * object flags.
            *
            * spl protection is required because an interrupt can remove page
            * from the object.
          */           */
   
         clearobjflags = 1;          clearobjflags = 1;
   
        for(p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq)) {        s = splvm();
         for (p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq)) {
                 vm_page_flag_set(p, PG_CLEANCHK);                  vm_page_flag_set(p, PG_CLEANCHK);
                 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC))                  if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC))
                         clearobjflags = 0;                          clearobjflags = 0;
                 else                  else
                         vm_page_protect(p, VM_PROT_READ);                          vm_page_protect(p, VM_PROT_READ);
         }          }
           splx(s);
   
         if (clearobjflags && (tstart == 0) && (tend == object->size)) {          if (clearobjflags && (tstart == 0) && (tend == object->size)) {
                 struct vnode *vp;                  struct vnode *vp;
Line 642  vm_object_page_clean(vm_object_t object, Line 656  vm_object_page_clean(vm_object_t object,
                 }                  }
         }          }
   
           /*
            * spl protection is required both to avoid an interrupt unbusy/free
            * race against a vm_page_lookup(), and also to ensure that the
            * memq is consistent.  We do not want a busy page to be ripped out
            * from under us.
            */
           s = splvm();
 rescan:  rescan:
           splx(s);        /* give interrupts a chance */
           s = splvm();
         curgeneration = object->generation;          curgeneration = object->generation;
   
        for(p = TAILQ_FIRST(&object->memq); p; p = np) {        for (p = TAILQ_FIRST(&object->memq); p; p = np) {
                 int n;                  int n;
   
                 np = TAILQ_NEXT(p, listq);                  np = TAILQ_NEXT(p, listq);
Line 692  again: Line 715  again:
                                 goto again;                                  goto again;
                 }                  }
         }          }
           splx(s);
   
 #if 0  #if 0
         VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc);          VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc);
Line 701  again: Line 725  again:
         return;          return;
 }  }
   
   /*
    * This routine must be called at splvm() to properly avoid an interrupt
    * unbusy/free race that can occur prior to the busy check.
    *
    * Using the object generation number here to detect page ripout is not
    * the best idea in the world. XXX
    *
    * NOTE: we operate under the assumption that a page found to not be busy
    * will not be ripped out from under us by an interrupt.  XXX we should
    * recode this to explicitly busy the pages.
    */
 static int  static int
 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags)  vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags)
 {  {
         int runlen;          int runlen;
         int s;  
         int maxf;          int maxf;
         int chkb;          int chkb;
         int maxb;          int maxb;
Line 715  vm_object_page_collect_flush(vm_object_t Line 749  vm_object_page_collect_flush(vm_object_t
         vm_page_t mab[vm_pageout_page_count];          vm_page_t mab[vm_pageout_page_count];
         vm_page_t ma[vm_pageout_page_count];          vm_page_t ma[vm_pageout_page_count];
   
         s = splvm();  
         pi = p->pindex;          pi = p->pindex;
         while (vm_page_sleep_busy(p, TRUE, "vpcwai")) {          while (vm_page_sleep_busy(p, TRUE, "vpcwai")) {
                 if (object->generation != curgeneration) {                  if (object->generation != curgeneration) {
                         splx(s);  
                         return(0);                          return(0);
                 }                  }
         }          }
Line 793  vm_object_page_collect_flush(vm_object_t Line 825  vm_object_page_collect_flush(vm_object_t
         }          }
         runlen = maxb + maxf + 1;          runlen = maxb + maxf + 1;
   
         splx(s);  
         vm_pageout_flush(ma, runlen, pagerflags);          vm_pageout_flush(ma, runlen, pagerflags);
         for (i = 0; i < runlen; i++) {          for (i = 0; i < runlen; i++) {
                 if (ma[i]->valid & ma[i]->dirty) {                  if (ma[i]->valid & ma[i]->dirty) {
Line 826  static void Line 857  static void
 vm_object_deactivate_pages(vm_object_t object)  vm_object_deactivate_pages(vm_object_t object)
 {  {
         vm_page_t p, next;          vm_page_t p, next;
           int s;
   
           s = splvm();
         for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) {          for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) {
                 next = TAILQ_NEXT(p, listq);                  next = TAILQ_NEXT(p, listq);
                 vm_page_deactivate(p);                  vm_page_deactivate(p);
         }          }
           splx(s);
 }  }
 #endif  #endif
   
Line 845  vm_object_deactivate_pages(vm_object_t o Line 879  vm_object_deactivate_pages(vm_object_t o
  * NOTE: If the page is already at VM_PROT_NONE, calling   * NOTE: If the page is already at VM_PROT_NONE, calling
  * vm_page_protect will have no effect.   * vm_page_protect will have no effect.
  */   */
   
 void  void
 vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)  vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
 {  {
         vm_pindex_t idx;          vm_pindex_t idx;
         vm_page_t p;          vm_page_t p;
           int s;
   
         if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)          if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
                 return;                  return;
   
           /*
            * spl protection needed to prevent races between the lookup,
            * an interrupt unbusy/free, and our protect call.
            */
           s = splvm();
         for (idx = start; idx < end; idx++) {          for (idx = start; idx < end; idx++) {
                 p = vm_page_lookup(object, idx);                  p = vm_page_lookup(object, idx);
                 if (p == NULL)                  if (p == NULL)
                         continue;                          continue;
                 vm_page_protect(p, VM_PROT_READ);                  vm_page_protect(p, VM_PROT_READ);
         }          }
           splx(s);
 }  }
   
 /*  /*
Line 875  void Line 915  void
 vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end)  vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
 {  {
         vm_page_t p;          vm_page_t p;
           int s;
   
         if (object == NULL)          if (object == NULL)
                 return;                  return;
   
           /*
            * spl protection is required because an interrupt can unbusy/free
            * a page.
            */
           s = splvm();
         for (p = TAILQ_FIRST(&object->memq);          for (p = TAILQ_FIRST(&object->memq);
                p != NULL;            p != NULL;
                p = TAILQ_NEXT(p, listq)) {            p = TAILQ_NEXT(p, listq)
         ) {
                 if (p->pindex >= start && p->pindex < end)                  if (p->pindex >= start && p->pindex < end)
                         vm_page_protect(p, VM_PROT_NONE);                          vm_page_protect(p, VM_PROT_NONE);
         }          }
           splx(s);
         if ((start == 0) && (object->size == end))          if ((start == 0) && (object->size == end))
                 vm_object_clear_flag(object, OBJ_WRITEABLE);                  vm_object_clear_flag(object, OBJ_WRITEABLE);
 }  }
Line 915  vm_object_madvise(vm_object_t object, vm Line 964  vm_object_madvise(vm_object_t object, vm
         vm_pindex_t end, tpindex;          vm_pindex_t end, tpindex;
         vm_object_t tobject;          vm_object_t tobject;
         vm_page_t m;          vm_page_t m;
           int s;
   
         if (object == NULL)          if (object == NULL)
                 return;                  return;
Line 942  shadowlookup: Line 992  shadowlookup:
                         }                          }
                 }                  }
   
                   /*
                    * spl protection is required to avoid a race between the
                    * lookup, an interrupt unbusy/free, and our busy check.
                    */
   
                   s = splvm();
                 m = vm_page_lookup(tobject, tpindex);                  m = vm_page_lookup(tobject, tpindex);
   
                 if (m == NULL) {                  if (m == NULL) {
Line 954  shadowlookup: Line 1010  shadowlookup:
                         /*                          /*
                          * next object                           * next object
                          */                           */
                           splx(s);
                         tobject = tobject->backing_object;                          tobject = tobject->backing_object;
                         if (tobject == NULL)                          if (tobject == NULL)
                                 continue;                                  continue;
Line 973  shadowlookup: Line 1030  shadowlookup:
                     (m->flags & PG_UNMANAGED) ||                      (m->flags & PG_UNMANAGED) ||
                     m->valid != VM_PAGE_BITS_ALL                      m->valid != VM_PAGE_BITS_ALL
                 ) {                  ) {
                           splx(s);
                         continue;                          continue;
                 }                  }
   
                if (vm_page_sleep_busy(m, TRUE, "madvpo"))                if (vm_page_sleep_busy(m, TRUE, "madvpo")) {
                         splx(s);
                         goto relookup;                          goto relookup;
                   }
                   splx(s);
   
                   /*
                    * Theoretically once a page is known not to be busy, an
                    * interrupt cannot come along and rip it out from under us.
                    */
   
                 if (advise == MADV_WILLNEED) {                  if (advise == MADV_WILLNEED) {
                         vm_page_activate(m);                          vm_page_activate(m);
Line 1094  vm_object_backing_scan(vm_object_t objec Line 1160  vm_object_backing_scan(vm_object_t objec
         vm_object_t backing_object;          vm_object_t backing_object;
         vm_pindex_t backing_offset_index;          vm_pindex_t backing_offset_index;
   
           /*
            * spl protection is required to avoid races between the memq/lookup,
            * an interrupt doing an unbusy/free, and our busy check.  Amoung
            * other things.
            */
         s = splvm();          s = splvm();
   
         backing_object = object->backing_object;          backing_object = object->backing_object;
Line 1493  vm_object_collapse(vm_object_t object) Line 1564  vm_object_collapse(vm_object_t object)
  *   *
  *      Removes all physical pages in the specified   *      Removes all physical pages in the specified
  *      object range from the object's list of pages.   *      object range from the object's list of pages.
  *  
  *      The object must be locked.  
  */   */
 void  void
 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,  vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
Line 1503  vm_object_page_remove(vm_object_t object Line 1572  vm_object_page_remove(vm_object_t object
         vm_page_t p, next;          vm_page_t p, next;
         unsigned int size;          unsigned int size;
         int all;          int all;
           int s;
   
        if (object == NULL ||        if (object == NULL || object->resident_page_count == 0)
            object->resident_page_count == 0) 
                 return;                  return;
   
         all = ((end == 0) && (start == 0));          all = ((end == 0) && (start == 0));
Line 1515  vm_object_page_remove(vm_object_t object Line 1584  vm_object_page_remove(vm_object_t object
          * remove pages from the object (we must instead remove the page           * remove pages from the object (we must instead remove the page
          * references, and then destroy the object).           * references, and then destroy the object).
          */           */
        KASSERT(object->type != OBJT_PHYS, ("attempt to remove pages from a physical object"));        KASSERT(object->type != OBJT_PHYS, 
                 ("attempt to remove pages from a physical object"));
   
           /*
            * Indicating that the object is undergoing paging.
            *
            * spl protection is required to avoid a race between the memq scan,
            * an interrupt unbusy/free, and the busy check.
            */
         vm_object_pip_add(object, 1);          vm_object_pip_add(object, 1);
           s = splvm();
 again:  again:
         size = end - start;          size = end - start;
         if (all || size > object->resident_page_count / 4) {          if (all || size > object->resident_page_count / 4) {
Line 1553  again: Line 1630  again:
         } else {          } else {
                 while (size > 0) {                  while (size > 0) {
                         if ((p = vm_page_lookup(object, start)) != 0) {                          if ((p = vm_page_lookup(object, start)) != 0) {
   
                                 if (p->wire_count != 0) {                                  if (p->wire_count != 0) {
                                         vm_page_protect(p, VM_PROT_NONE);                                          vm_page_protect(p, VM_PROT_NONE);
                                         if (!clean_only)                                          if (!clean_only)
Line 1587  again: Line 1663  again:
                         size -= 1;                          size -= 1;
                 }                  }
         }          }
           splx(s);
         vm_object_pip_wakeup(object);          vm_object_pip_wakeup(object);
 }  }
   
Line 1888  DB_SHOW_COMMAND(vmopag, vm_object_print_ Line 1965  DB_SHOW_COMMAND(vmopag, vm_object_print_
                 osize = object->size;                  osize = object->size;
                 if (osize > 128)                  if (osize > 128)
                         osize = 128;                          osize = 128;
                for(idx=0;idx<osize;idx++) {                for (idx = 0; idx < osize; idx++) {
                         m = vm_page_lookup(object, idx);                          m = vm_page_lookup(object, idx);
                         if (m == NULL) {                          if (m == NULL) {
                                 if (rcount) {                                  if (rcount) {

Removed from v.1.15  
changed lines
  Added in v.1.16