Index: uvm_swap.c =================================================================== --- uvm_swap.c (revision 1160) +++ uvm_swap.c (revision 1163) @@ -934,15 +934,6 @@ swap_on(p, sdp) printf("leaving %d pages of swap\n", size); } - /* - * try to add anons to reflect the new swap space. - */ - - error = uvm_anon_add(size); - if (error) { - goto bad; - } - /* * add a ref to vp to reflect usage as a swap device. */ @@ -990,7 +981,8 @@ swap_off(p, sdp) struct proc *p; struct swapdev *sdp; { - int npages = sdp->swd_npages; + int npages = sdp->swd_npages; + int error = 0; UVMHIST_FUNC("swap_off"); UVMHIST_CALLED(pdhist); UVMHIST_LOG(pdhist, " dev=%x, npages=%d", sdp->swd_dev,npages,0,0); @@ -1009,16 +1001,21 @@ swap_off(p, sdp) if (uao_swap_off(sdp->swd_drumoffset, sdp->swd_drumoffset + sdp->swd_drumsize) || - anon_swap_off(sdp->swd_drumoffset, + amap_swap_off(sdp->swd_drumoffset, sdp->swd_drumoffset + sdp->swd_drumsize)) { + error = ENOMEM; + } else if (sdp->swd_npginuse > sdp->swd_npgbad) { + error = EBUSY; + } + if (error) { simple_lock(&uvm.swap_data_lock); sdp->swd_flags |= SWF_ENABLE; uvmexp.swpgavail += npages; simple_unlock(&uvm.swap_data_lock); - return ENOMEM; + + return error; } - KASSERT(sdp->swd_npginuse == sdp->swd_npgbad); /* * done with the vnode. @@ -1029,9 +1026,6 @@ swap_off(p, sdp) if (sdp->swd_vp != rootvp) { (void) VOP_CLOSE(sdp->swd_vp, FREAD|FWRITE, p->p_ucred, p); } - - /* remove anons from the system */ - uvm_anon_remove(npages); simple_lock(&uvm.swap_data_lock); uvmexp.swpages -= npages; Index: uvm_page.c =================================================================== --- uvm_page.c (revision 1160) +++ uvm_page.c (revision 1163) @@ -1183,7 +1183,7 @@ uvm_pagealloc_strat(obj, off, anon, flag pg->uanon = anon; pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE; if (anon) { - anon->u.an_page = pg; + anon->an_page = pg; pg->pqflags = PQ_ANON; uvmexp.anonpages++; } else { @@ -1364,7 +1364,7 @@ uvm_pagefree(pg) pg->pqflags &= ~PQ_ANON; uvmexp.anonpages--; } - pg->uanon->u.an_page = NULL; + pg->uanon->an_page = NULL; pg->uanon = NULL; } if (pg->flags & PG_WANTED) { @@ -1387,7 +1387,7 @@ uvm_pagefree(pg) if (pg->uobject != NULL) { uvm_pageremove(pg); } else if (pg->uanon != NULL) { - pg->uanon->u.an_page = NULL; + pg->uanon->an_page = NULL; uvmexp.anonpages--; } Index: uvm_map.c =================================================================== --- uvm_map.c (revision 1160) +++ uvm_map.c (revision 1163) @@ -3496,7 +3496,7 @@ uvm_map_clean(struct vm_map *map, vaddr_ continue; simple_lock(&anon->an_lock); - pg = anon->u.an_page; + pg = anon->an_page; if (pg == NULL) { simple_unlock(&anon->an_lock); continue; @@ -4504,9 +4504,9 @@ uvm_page_printit(struct vm_page *pg, boo /* cross-verify object/anon */ if ((pg->pqflags & PQ_FREE) == 0) { if (pg->pqflags & PQ_ANON) { - if (pg->uanon == NULL || pg->uanon->u.an_page != pg) + if (pg->uanon == NULL || pg->uanon->an_page != pg) (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n", - (pg->uanon) ? pg->uanon->u.an_page : NULL); + (pg->uanon) ? pg->uanon->an_page : NULL); else (*pr)(" anon backpointer is OK\n"); } else { Index: uvm_pdaemon.c =================================================================== --- uvm_pdaemon.c (revision 1160) +++ uvm_pdaemon.c (revision 1163) @@ -598,7 +598,7 @@ uvmpd_scan_inactive(pglst) if (anon) { KASSERT(anon->an_swslot != 0); - anon->u.an_page = NULL; + anon->an_page = NULL; slot = anon->an_swslot; } else { slot = uao_find_swslot(uobj, pageidx); Index: uvm_loan.c =================================================================== --- uvm_loan.c (revision 1160) +++ uvm_loan.c (revision 1163) @@ -373,7 +373,7 @@ uvm_loananon(ufi, output, flags, anon) if (flags & UVM_LOAN_TOANON) { simple_lock(&anon->an_lock); - pg = anon->u.an_page; + pg = anon->an_page; if (pg && (pg->pqflags & PQ_ANON) != 0 && anon->an_ref == 1) { if (pg->wire_count > 0) { UVMHIST_LOG(loanhist, "->A wired %p", pg,0,0,0); @@ -428,7 +428,7 @@ uvm_loananon(ufi, output, flags, anon) * we have the page and its owner locked: do the loan now. */ - pg = anon->u.an_page; + pg = anon->an_page; uvm_lock_pageq(); if (pg->wire_count > 0) { uvm_unlock_pageq(); @@ -795,14 +795,14 @@ uvm_loanuobj(ufi, output, flags, va) if (anon == NULL) { goto fail; } - anon->u.an_page = pg; + anon->an_page = pg; pg->uanon = anon; uvm_lock_pageq(); if (pg->wire_count > 0) { uvm_unlock_pageq(); UVMHIST_LOG(loanhist, "wired %p", pg,0,0,0); pg->uanon = NULL; - anon->u.an_page = NULL; + anon->an_page = NULL; anon->an_ref--; simple_unlock(&anon->an_lock); uvm_anfree(anon); @@ -937,7 +937,7 @@ again: uvmfault_unlockall(ufi, amap, uobj, NULL); return (-1); } - anon->u.an_page = pg; + anon->an_page = pg; pg->uanon = anon; uvm_lock_pageq(); pg->loan_count++; Index: uvm_amap_i.h =================================================================== --- uvm_amap_i.h (revision 1160) +++ uvm_amap_i.h (revision 1163) @@ -134,9 +134,9 @@ amap_add(aref, offset, anon, replace) if (amap->am_anon[slot] == NULL) panic("amap_add: replacing null anon"); - if (amap->am_anon[slot]->u.an_page != NULL && + if (amap->am_anon[slot]->an_page != NULL && (amap->am_flags & AMAP_SHARED) != 0) { - pmap_page_protect(amap->am_anon[slot]->u.an_page, + pmap_page_protect(amap->am_anon[slot]->an_page, VM_PROT_NONE); /* * XXX: suppose page is supposed to be wired somewhere? Index: uvm_fault.c =================================================================== --- uvm_fault.c (revision 1160) +++ uvm_fault.c (revision 1163) @@ -204,7 +204,7 @@ uvmfault_anonflush(anons, n) if (anons[lcv] == NULL) continue; simple_lock(&anons[lcv]->an_lock); - pg = anons[lcv]->u.an_page; + pg = anons[lcv]->an_page; if (pg && (pg->flags & PG_BUSY) == 0 && pg->loan_count == 0) { uvm_lock_pageq(); if (pg->wire_count == 0) { @@ -303,7 +303,7 @@ uvmfault_anonget(ufi, amap, anon) error = 0; uvmexp.fltanget++; /* bump rusage counters */ - if (anon->u.an_page) + if (anon->an_page) curproc->p_stats->p_ru.ru_minflt++; else curproc->p_stats->p_ru.ru_majflt++; @@ -314,7 +314,7 @@ uvmfault_anonget(ufi, amap, anon) for (;;) { we_own = FALSE; /* TRUE if we set PG_BUSY on a page */ - pg = anon->u.an_page; + pg = anon->an_page; /* * if there is a resident page and it is loaned, then anon @@ -826,14 +826,14 @@ ReFault: anon = anons[lcv]; simple_lock(&anon->an_lock); /* ignore loaned pages */ - if (anon->u.an_page && anon->u.an_page->loan_count == 0 && - (anon->u.an_page->flags & PG_BUSY) == 0) { + if (anon->an_page && anon->an_page->loan_count == 0 && + (anon->an_page->flags & PG_BUSY) == 0) { uvm_lock_pageq(); - uvm_pageactivate(anon->u.an_page); + uvm_pageactivate(anon->an_page); uvm_unlock_pageq(); UVMHIST_LOG(maphist, " MAPPING: n anon: pm=0x%x, va=0x%x, pg=0x%x", - ufi.orig_map->pmap, currva, anon->u.an_page, 0); + ufi.orig_map->pmap, currva, anon->an_page, 0); uvmexp.fltnamap++; /* @@ -843,7 +843,7 @@ ReFault: */ (void) pmap_enter(ufi.orig_map->pmap, currva, - VM_PAGE_TO_PHYS(anon->u.an_page), + VM_PAGE_TO_PHYS(anon->an_page), (anon->an_ref > 1) ? (enter_prot & ~VM_PROT_WRITE) : enter_prot, PMAP_CANFAIL | @@ -1070,7 +1070,7 @@ ReFault: * uobj is non null if the page is on loan from an object (i.e. uobj) */ - uobj = anon->u.an_page->uobject; /* locked by anonget if !NULL */ + uobj = anon->an_page->uobject; /* locked by anonget if !NULL */ /* locked: maps(read), amap, anon, uobj(if one) */ @@ -1078,7 +1078,7 @@ ReFault: * special handling for loaned pages */ - if (anon->u.an_page->loan_count) { + if (anon->an_page->loan_count) { if (!cow_now) { @@ -1120,26 +1120,26 @@ ReFault: * (if any) */ /* copy old -> new */ - uvm_pagecopy(anon->u.an_page, pg); + uvm_pagecopy(anon->an_page, pg); /* force reload */ - pmap_page_protect(anon->u.an_page, + pmap_page_protect(anon->an_page, VM_PROT_NONE); uvm_lock_pageq(); /* KILL loan */ - anon->u.an_page->uanon = NULL; + anon->an_page->uanon = NULL; /* in case we owned */ - anon->u.an_page->pqflags &= ~PQ_ANON; + anon->an_page->pqflags &= ~PQ_ANON; if (uobj) { /* if we were receiver of loan */ - anon->u.an_page->loan_count--; + anon->an_page->loan_count--; } else { /* * we were the lender (A->K); need * to remove the page from pageq's. */ - uvm_pagedequeue(anon->u.an_page); + uvm_pagedequeue(anon->an_page); } uvm_pageactivate(pg); @@ -1150,7 +1150,7 @@ ReFault: } /* install new page in anon */ - anon->u.an_page = pg; + anon->an_page = pg; pg->uanon = anon; pg->pqflags |= PQ_ANON; pg->flags &= ~(PG_BUSY|PG_FAKE); @@ -1193,7 +1193,7 @@ ReFault: uvm_anfree(anon); } uvmfault_unlockall(&ufi, amap, uobj, oanon); - if (anon == NULL || !uvm_reclaimable()) { + if (!uvm_reclaimable()) { UVMHIST_LOG(maphist, "<- failed. out of VM",0,0,0,0); uvmexp.fltnoanon++; @@ -1206,7 +1206,7 @@ ReFault: } /* got all resources, replace anon with nanon */ - uvm_pagecopy(oanon->u.an_page, pg); + uvm_pagecopy(oanon->an_page, pg); uvm_lock_pageq(); uvm_pageactivate(pg); pg->flags &= ~(PG_BUSY|PG_FAKE); @@ -1228,7 +1228,7 @@ ReFault: uvmexp.flt_anon++; oanon = anon; /* old, locked anon is same as anon */ - pg = anon->u.an_page; + pg = anon->an_page; if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */ enter_prot = enter_prot & ~VM_PROT_WRITE; @@ -1561,7 +1561,7 @@ Case2: /* unlock and fail ... */ uvmfault_unlockall(&ufi, amap, uobj, NULL); - if (anon == NULL || !uvm_reclaimable()) { + if (!uvm_reclaimable()) { UVMHIST_LOG(maphist, " promote: out of VM", 0,0,0,0); uvmexp.fltnoanon++; Index: uvm_amap.c =================================================================== --- uvm_amap.c (revision 1160) +++ uvm_amap.c (revision 1163) @@ -71,12 +71,35 @@ POOL_INIT(uvm_amap_pool, sizeof(struct v MALLOC_DEFINE(M_UVMAMAP, "UVM amap", "UVM amap and related structures"); +static struct simplelock amap_list_lock = SIMPLELOCK_INITIALIZER; +static LIST_HEAD(, vm_amap) amap_list; + /* * local functions */ static struct vm_amap *amap_alloc1(int, int, int); +static __inline void amap_list_insert(struct vm_amap *); +static __inline void amap_list_remove(struct vm_amap *); +static __inline void +amap_list_insert(struct vm_amap *amap) +{ + + simple_lock(&amap_list_lock); + LIST_INSERT_HEAD(&amap_list, amap, am_list); + simple_unlock(&amap_list_lock); +} + +static __inline void +amap_list_remove(struct vm_amap *amap) +{ + + simple_lock(&amap_list_lock); + LIST_REMOVE(amap, am_list); + simple_unlock(&amap_list_lock); +} + #ifdef UVM_AMAP_PPREF /* * what is ppref? ppref is an _optional_ amap feature which is used @@ -243,6 +266,8 @@ amap_alloc(sz, padsz, waitf) memset(amap->am_anon, 0, amap->am_maxslot * sizeof(struct vm_anon *)); + amap_list_insert(amap); + UVMHIST_LOG(maphist,"<- done, amap = 0x%x, sz=%d", amap, sz, 0, 0); return(amap); } @@ -261,6 +286,7 @@ amap_free(amap) UVMHIST_FUNC("amap_free"); UVMHIST_CALLED(maphist); KASSERT(amap->am_ref == 0 && amap->am_nused == 0); + KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0); LOCK_ASSERT(!simple_lock_held(&amap->am_l)); free(amap->am_slots, M_UVMAMAP); free(amap->am_bckptr, M_UVMAMAP); @@ -619,8 +645,8 @@ amap_share_protect(entry, prot) for (lcv = entry->aref.ar_pageoff ; lcv < stop ; lcv++) { if (amap->am_anon[lcv] == NULL) continue; - if (amap->am_anon[lcv]->u.an_page != NULL) - pmap_page_protect(amap->am_anon[lcv]->u.an_page, + if (amap->am_anon[lcv]->an_page != NULL) + pmap_page_protect(amap->am_anon[lcv]->an_page, prot); } return; @@ -631,8 +657,8 @@ amap_share_protect(entry, prot) slot = amap->am_slots[lcv]; if (slot < entry->aref.ar_pageoff || slot >= stop) continue; - if (amap->am_anon[slot]->u.an_page != NULL) - pmap_page_protect(amap->am_anon[slot]->u.an_page, prot); + if (amap->am_anon[slot]->an_page != NULL) + pmap_page_protect(amap->am_anon[slot]->an_page, prot); } } @@ -653,7 +679,19 @@ amap_wipeout(amap) UVMHIST_FUNC("amap_wipeout"); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist,"(amap=0x%x)", amap, 0,0,0); + KASSERT(amap->am_ref == 1); + amap->am_ref = 0; /* ... was one */ + + if (__predict_false((amap->am_flags & AMAP_SWAPOFF) != 0)) { + /* + * amap_swap_off will call us again. + */ + amap_unlock(amap); + return; + } + amap_list_remove(amap); amap_unlock(amap); + for (lcv = 0 ; lcv < amap->am_nused ; lcv++) { int refs; @@ -685,7 +723,6 @@ amap_wipeout(amap) * now we free the map */ - amap->am_ref = 0; /* ... was one */ amap->am_nused = 0; amap_free(amap); /* will unlock and free amap */ UVMHIST_LOG(maphist,"<- done!", 0,0,0,0); @@ -844,6 +881,8 @@ amap_copy(map, entry, waitf, canchunk, s amap_unlock(srcamap); + amap_list_insert(amap); + /* * install new amap. */ @@ -904,7 +943,7 @@ ReStart: slot = amap->am_slots[lcv]; anon = amap->am_anon[slot]; simple_lock(&anon->an_lock); - pg = anon->u.an_page; + pg = anon->an_page; /* * page must be resident since parent is wired @@ -1223,3 +1262,111 @@ amap_wiperange(amap, slotoff, slots) } #endif + +/* + * amap_swap_off: pagein anonymous pages in amaps and drop swap slots. + * + * => called with swap_syscall_lock held. + * => note that we don't always traverse all anons. + * eg. amaps being wiped out, released anons. + * => return TRUE if failed. + */ + +boolean_t +amap_swap_off(int startslot, int endslot) +{ + struct vm_amap *am; + struct vm_amap *am_next; + struct vm_amap marker_prev; + struct vm_amap marker_next; + struct lwp *l = curlwp; + boolean_t rv = FALSE; + +#if defined(DIAGNOSTIC) + memset(&marker_prev, 0, sizeof(marker_prev)); + memset(&marker_next, 0, sizeof(marker_next)); +#endif /* defined(DIAGNOSTIC) */ + + PHOLD(l); + simple_lock(&amap_list_lock); + for (am = LIST_FIRST(&amap_list); am != NULL && !rv; am = am_next) { + int i; + + LIST_INSERT_BEFORE(am, &marker_prev, am_list); + LIST_INSERT_AFTER(am, &marker_next, am_list); + + if (!amap_lock_try(am)) { + simple_unlock(&amap_list_lock); + preempt(1); + simple_lock(&amap_list_lock); + am_next = LIST_NEXT(&marker_prev, am_list); + if (am_next == &marker_next) { + am_next = LIST_NEXT(am_next, am_list); + } else { + KASSERT(LIST_NEXT(am_next, am_list) == + &marker_next); + } + LIST_REMOVE(&marker_prev, am_list); + LIST_REMOVE(&marker_next, am_list); + continue; + } + + simple_unlock(&amap_list_lock); + + if (am->am_nused <= 0) { + amap_unlock(am); + goto next; + } + + for (i = 0; i < am->am_nused; i++) { + int slot; + int swslot; + struct vm_anon *anon; + + slot = am->am_slots[i]; + anon = am->am_anon[slot]; + simple_lock(&anon->an_lock); + + swslot = anon->an_swslot; + if (swslot < startslot || endslot <= swslot) { + simple_unlock(&anon->an_lock); + continue; + } + + am->am_flags |= AMAP_SWAPOFF; + amap_unlock(am); + + rv = uvm_anon_pagein(anon); + + amap_lock(am); + am->am_flags &= ~AMAP_SWAPOFF; + if (amap_refs(am) == 0) { + am->am_ref = 1; /* XXX */ + amap_wipeout(am); + am = NULL; + break; + } + if (rv) { + break; + } + i = 0; + } + + if (am) { + amap_unlock(am); + } + +next: + simple_lock(&amap_list_lock); + KASSERT(LIST_NEXT(&marker_prev, am_list) == &marker_next || + LIST_NEXT(LIST_NEXT(&marker_prev, am_list), am_list) == + &marker_next); + am_next = LIST_NEXT(&marker_next, am_list); + LIST_REMOVE(&marker_prev, am_list); + LIST_REMOVE(&marker_next, am_list); + } + simple_unlock(&amap_list_lock); + PRELE(l); + + return rv; +} Index: uvm_amap.h =================================================================== --- uvm_amap.h (revision 1160) +++ uvm_amap.h (revision 1163) @@ -125,6 +125,8 @@ void amap_unref /* drop reference to an (struct vm_amap *, vaddr_t, vsize_t, int); void amap_wipeout /* remove all anons from amap */ (struct vm_amap *); +boolean_t amap_swap_off + (int, int); /* * amap flag values @@ -132,6 +134,7 @@ void amap_wipeout /* remove all anons f #define AMAP_SHARED 0x1 /* amap is shared */ #define AMAP_REFALL 0x2 /* amap_ref: reference entire amap */ +#define AMAP_SWAPOFF 0x4 /* amap_swap_off() is in progress */ /* * amap_extend flags @@ -174,6 +177,7 @@ struct vm_amap { #ifdef UVM_AMAP_PPREF int *am_ppref; /* per page reference count (if !NULL) */ #endif + LIST_ENTRY(vm_amap) am_list; }; /* @@ -264,6 +268,7 @@ MALLOC_DECLARE(M_UVMAMAP); #define amap_flags(AMAP) ((AMAP)->am_flags) #define amap_lock(AMAP) simple_lock(&(AMAP)->am_l) +#define amap_lock_try(AMAP) simple_lock_try(&(AMAP)->am_l) #define amap_refs(AMAP) ((AMAP)->am_ref) #define amap_unlock(AMAP) simple_unlock(&(AMAP)->am_l) Index: uvm.h =================================================================== --- uvm.h (revision 1160) +++ uvm.h (revision 1163) @@ -106,10 +106,6 @@ struct uvm { int page_hashmask; /* hash mask */ struct simplelock hashlock; /* lock on page_hash array */ - /* anon stuff */ - struct vm_anon *afree; /* anon free list */ - struct simplelock afreelock; /* lock on anon free list */ - struct simplelock kentry_lock; /* aio_done is locked by uvm.pagedaemon_lock and splbio! */ Index: uvm_mmap.c =================================================================== --- uvm_mmap.c (revision 1160) +++ uvm_mmap.c (revision 1163) @@ -228,7 +228,7 @@ sys_mincore(l, v, retval) anon = amap_lookup(&entry->aref, start - entry->start); /* Don't need to lock anon here. */ - if (anon != NULL && anon->u.an_page != NULL) { + if (anon != NULL && anon->an_page != NULL) { /* * Anon has the page for this entry Index: uvm_anon.c =================================================================== --- uvm_anon.c (revision 1160) +++ uvm_anon.c (revision 1163) @@ -51,107 +51,37 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v #include #include -/* - * anonblock_list: global list of anon blocks, - * locked by swap_syscall_lock (since we never remove - * anything from this list and we only add to it via swapctl(2)). - */ +static POOL_INIT(uvm_anon_pool, sizeof(struct vm_anon), 0, 0, 0, "anonpl", + &pool_allocator_nointr); +static struct pool_cache uvm_anon_pool_cache; -struct uvm_anonblock { - LIST_ENTRY(uvm_anonblock) list; - int count; - struct vm_anon *anons; -}; -static LIST_HEAD(anonlist, uvm_anonblock) anonblock_list; +static int uvm_anon_ctor(void *, void *, int); - -static boolean_t anon_pagein(struct vm_anon *); - - /* * allocate anons */ void uvm_anon_init() { - int nanon = uvmexp.free - (uvmexp.free / 16); /* XXXCDC ??? */ - simple_lock_init(&uvm.afreelock); - LIST_INIT(&anonblock_list); - - /* - * Allocate the initial anons. - */ - uvm_anon_add(nanon); + pool_cache_init(&uvm_anon_pool_cache, &uvm_anon_pool, + uvm_anon_ctor, NULL, NULL); } -/* - * add some more anons to the free pool. called when we add - * more swap space. - * - * => swap_syscall_lock should be held (protects anonblock_list). - */ -int -uvm_anon_add(count) - int count; +static int +uvm_anon_ctor(void *arg, void *object, int flags) { - struct uvm_anonblock *anonblock; - struct vm_anon *anon; - int lcv, needed; + struct vm_anon *anon = object; - simple_lock(&uvm.afreelock); - uvmexp.nanonneeded += count; - needed = uvmexp.nanonneeded - uvmexp.nanon; - simple_unlock(&uvm.afreelock); + anon->an_ref = 0; + simple_lock_init(&anon->an_lock); + anon->an_page = NULL; + anon->an_swslot = 0; - if (needed <= 0) { - return 0; - } - anon = (void *)uvm_km_alloc(kernel_map, sizeof(*anon) * needed, 0, - UVM_KMF_WIRED); - if (anon == NULL) { - simple_lock(&uvm.afreelock); - uvmexp.nanonneeded -= count; - simple_unlock(&uvm.afreelock); - return ENOMEM; - } - MALLOC(anonblock, void *, sizeof(*anonblock), M_UVMAMAP, M_WAITOK); - - anonblock->count = needed; - anonblock->anons = anon; - LIST_INSERT_HEAD(&anonblock_list, anonblock, list); - memset(anon, 0, sizeof(*anon) * needed); - - simple_lock(&uvm.afreelock); - uvmexp.nanon += needed; - uvmexp.nfreeanon += needed; - for (lcv = 0; lcv < needed; lcv++) { - simple_lock_init(&anon[lcv].an_lock); - anon[lcv].u.an_nxt = uvm.afree; - uvm.afree = &anon[lcv]; - } - simple_unlock(&uvm.afreelock); return 0; } /* - * remove anons from the free pool. - */ -void -uvm_anon_remove(count) - int count; -{ - /* - * we never actually free any anons, to avoid allocation overhead. - * XXX someday we might want to try to free anons. - */ - - simple_lock(&uvm.afreelock); - uvmexp.nanonneeded -= count; - simple_unlock(&uvm.afreelock); -} - -/* * allocate an anon * * => new anon is returned locked! @@ -159,21 +89,18 @@ uvm_anon_remove(count) struct vm_anon * uvm_analloc() { - struct vm_anon *a; + struct vm_anon *anon; - simple_lock(&uvm.afreelock); - a = uvm.afree; - if (a) { - uvm.afree = a->u.an_nxt; - uvmexp.nfreeanon--; - a->an_ref = 1; - a->an_swslot = 0; - a->u.an_page = NULL; /* so we can free quickly */ - LOCK_ASSERT(simple_lock_held(&a->an_lock) == 0); - simple_lock(&a->an_lock); + anon = pool_cache_get(&uvm_anon_pool_cache, PR_NOWAIT); + if (anon) { + KASSERT(anon->an_ref == 0); + LOCK_ASSERT(simple_lock_held(&anon->an_lock) == 0); + KASSERT(anon->an_page == NULL); + KASSERT(anon->an_swslot == 0); + anon->an_ref = 1; + simple_lock(&anon->an_lock); } - simple_unlock(&uvm.afreelock); - return(a); + return anon; } /* @@ -200,7 +127,7 @@ uvm_anfree(anon) * get page */ - pg = anon->u.an_page; + pg = anon->an_page; /* * if there is a resident page and it is loaned, then anon may not @@ -280,14 +207,10 @@ uvm_anfree(anon) * free the anon itself. */ - KASSERT(anon->u.an_page == NULL); + KASSERT(anon->an_page == NULL); KASSERT(anon->an_swslot == 0); - simple_lock(&uvm.afreelock); - anon->u.an_nxt = uvm.afree; - uvm.afree = anon; - uvmexp.nfreeanon++; - simple_unlock(&uvm.afreelock); + pool_cache_put(&uvm_anon_pool_cache, anon); UVMHIST_LOG(maphist,"<- done!",0,0,0,0); } @@ -346,7 +269,7 @@ uvm_anon_lockloanpg(anon) * not produce an incorrect result. */ - while (((pg = anon->u.an_page) != NULL) && pg->loan_count != 0) { + while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) { /* * quickly check to see if the page has an object before @@ -400,80 +323,15 @@ uvm_anon_lockloanpg(anon) return(pg); } - - /* - * page in every anon that is paged out to a range of swslots. - * - * swap_syscall_lock should be held (protects anonblock_list). - */ - -boolean_t -anon_swap_off(startslot, endslot) - int startslot, endslot; -{ - struct uvm_anonblock *anonblock; - - LIST_FOREACH(anonblock, &anonblock_list, list) { - int i; - - /* - * loop thru all the anons in the anonblock, - * paging in where needed. - */ - - for (i = 0; i < anonblock->count; i++) { - struct vm_anon *anon = &anonblock->anons[i]; - int slot; - - /* - * lock anon to work on it. - */ - - simple_lock(&anon->an_lock); - - /* - * is this anon's swap slot in range? - */ - - slot = anon->an_swslot; - if (slot >= startslot && slot < endslot) { - boolean_t rv; - - /* - * yup, page it in. - */ - - /* locked: anon */ - rv = anon_pagein(anon); - /* unlocked: anon */ - - if (rv) { - return rv; - } - } else { - - /* - * nope, unlock and proceed. - */ - - simple_unlock(&anon->an_lock); - } - } - } - return FALSE; -} - - -/* * fetch an anon's page. * * => anon must be locked, and is unlocked upon return. * => returns TRUE if pagein was aborted due to lack of memory. */ -static boolean_t -anon_pagein(anon) +boolean_t +uvm_anon_pagein(anon) struct vm_anon *anon; { struct vm_page *pg; @@ -514,7 +372,7 @@ anon_pagein(anon) * mark it as dirty, clear its swslot and un-busy it. */ - pg = anon->u.an_page; + pg = anon->an_page; uobj = pg->uobject; if (anon->an_swslot > 0) uvm_swap_free(anon->an_swslot, 1); @@ -557,7 +415,7 @@ void uvm_anon_release(anon) struct vm_anon *anon; { - struct vm_page *pg = anon->u.an_page; + struct vm_page *pg = anon->an_page; LOCK_ASSERT(simple_lock_held(&anon->an_lock)); @@ -574,7 +432,7 @@ uvm_anon_release(anon) uvm_unlock_pageq(); simple_unlock(&anon->an_lock); - KASSERT(anon->u.an_page == NULL); + KASSERT(anon->an_page == NULL); uvm_anfree(anon); } Index: uvm_anon.h =================================================================== --- uvm_anon.h (revision 1160) +++ uvm_anon.h (revision 1163) @@ -50,22 +50,17 @@ struct vm_anon { int an_ref; /* reference count [an_lock] */ struct simplelock an_lock; /* lock for an_ref */ - union { - struct vm_anon *an_nxt; /* if on free list [afreelock] */ - struct vm_page *an_page;/* if in RAM [an_lock] */ - } u; + struct vm_page *an_page;/* if in RAM [an_lock] */ int an_swslot; /* drum swap slot # (if != 0) [an_lock. also, it is ok to read an_swslot if we hold an_page PG_BUSY] */ }; /* - * a pool of vm_anon data structures is allocated and put on a global - * free list at boot time. vm_anon's on the free list use "an_nxt" as - * a pointer to the next item on the free list. for active vm_anon's - * the data can be in one of the following state: [1] in a vm_page - * with no backing store allocated yet, [2] in a vm_page with backing - * store allocated, or [3] paged out to backing store (no vm_page). + * for active vm_anon's the data can be in one of the following state: + * [1] in a vm_page with no backing store allocated yet, [2] in a vm_page + * with backing store allocated, or [3] paged out to backing store + * (no vm_page). * * for pageout in case [2]: if the page has been modified then we must * flush it out to backing store, otherwise we can just dump the @@ -101,12 +96,10 @@ struct vm_aref { struct vm_anon *uvm_analloc(void); void uvm_anfree(struct vm_anon *); void uvm_anon_init(void); -int uvm_anon_add(int); -void uvm_anon_remove(int); struct vm_page *uvm_anon_lockloanpg(struct vm_anon *); void uvm_anon_dropswap(struct vm_anon *); -boolean_t anon_swap_off(int, int); void uvm_anon_release(struct vm_anon *); +boolean_t uvm_anon_pagein(struct vm_anon *); #endif /* _KERNEL */ #endif /* _UVM_UVM_ANON_H_ */ Index: uvm_init.c =================================================================== --- uvm_init.c (revision 1160) +++ uvm_init.c (revision 1163) @@ -136,15 +136,9 @@ uvm_init() uvm_pager_init(); /* - * step 8: init anonymous memory systems + * step 8: init the uvm_loan() facility. */ - uvm_anon_init(); /* allocate initial anons */ - - /* - * step 9: init the uvm_loan() facility. - */ - uvm_loan_init(); /* @@ -163,4 +157,10 @@ uvm_init() */ link_pool_init(); + + /* + * init anonymous memory systems + */ + + uvm_anon_init(); }