File:  [DragonFly] / src / sys / dev / agp / agp.c
Revision 1.10: download - view: text, annotated - select for diffs
Mon Mar 1 06:33:13 2004 UTC (10 years, 4 months ago) by dillon
Branches: MAIN
CVS tags: HEAD
Newtoken commit.  Change the token implementation as follows:  (1) Obtaining
a token no longer enters a critical section.  (2) tokens can be held through
schedular switches and blocking conditions and are effectively released and
reacquired on resume.  Thus tokens serialize access only while the thread
is actually running.  Serialization is not broken by preemptive interrupts.
That is, interrupt threads which preempt do no release the preempted thread's
tokens.  (3) Unlike spl's, tokens will interlock w/ interrupt threads on
the same or on a different cpu.

The vnode interlock code has been rewritten and the API has changed.  The
mountlist vnode scanning code has been consolidated and all known races have
been fixed.  The vnode interlock is now a pool token.

The code that frees unreferenced vnodes whos last VM page has been freed has
been moved out of the low level vm_page_free() code and moved to the
periodic filesystem sycer code in vfs_msycn().

The SMP startup code and the IPI code has been cleaned up considerably.
Certain early token interactions on AP cpus have been moved to the BSP.

The LWKT rwlock API has been cleaned up and turned on.

Major testing by: David Rhodus

    1: /*-
    2:  * Copyright (c) 2000 Doug Rabson
    3:  * All rights reserved.
    4:  *
    5:  * Redistribution and use in source and binary forms, with or without
    6:  * modification, are permitted provided that the following conditions
    7:  * are met:
    8:  * 1. Redistributions of source code must retain the above copyright
    9:  *    notice, this list of conditions and the following disclaimer.
   10:  * 2. Redistributions in binary form must reproduce the above copyright
   11:  *    notice, this list of conditions and the following disclaimer in the
   12:  *    documentation and/or other materials provided with the distribution.
   13:  *
   14:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24:  * SUCH DAMAGE.
   25:  *
   26:  *	$FreeBSD: src/sys/pci/agp.c,v 1.3.2.4 2002/08/11 19:58:12 alc Exp $
   27:  *	$DragonFly: src/sys/dev/agp/agp.c,v 1.10 2004/03/01 06:33:13 dillon Exp $
   28:  */
   29: 
   30: #include "opt_bus.h"
   31: #include "opt_pci.h"
   32: 
   33: #include <sys/param.h>
   34: #include <sys/systm.h>
   35: #include <sys/malloc.h>
   36: #include <sys/kernel.h>
   37: #include <sys/bus.h>
   38: #include <sys/conf.h>
   39: #include <sys/ioccom.h>
   40: #include <sys/agpio.h>
   41: #include <sys/lock.h>
   42: #include <sys/proc.h>
   43: 
   44: #include <bus/pci/pcivar.h>
   45: #include <bus/pci/pcireg.h>
   46: #include "agppriv.h"
   47: #include "agpvar.h"
   48: #include "agpreg.h"
   49: 
   50: #include <vm/vm.h>
   51: #include <vm/vm_object.h>
   52: #include <vm/vm_page.h>
   53: #include <vm/vm_pageout.h>
   54: #include <vm/pmap.h>
   55: 
   56: #include <machine/md_var.h>
   57: #include <machine/bus.h>
   58: #include <machine/resource.h>
   59: #include <sys/rman.h>
   60: 
   61: MODULE_VERSION(agp, 1);
   62: 
   63: MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
   64: 
   65: #define CDEV_MAJOR	148
   66: 				/* agp_drv.c */
   67: static d_open_t agp_open;
   68: static d_close_t agp_close;
   69: static d_ioctl_t agp_ioctl;
   70: static d_mmap_t agp_mmap;
   71: 
   72: static struct cdevsw agp_cdevsw = {
   73: 	/* name */	"agp",
   74: 	/* maj */	CDEV_MAJOR,
   75: 	/* flags */	D_TTY,
   76: 	/* port */	NULL,
   77: 	/* autoq */	0,
   78: 
   79: 	/* open */	agp_open,
   80: 	/* close */	agp_close,
   81: 	/* read */	noread,
   82: 	/* write */	nowrite,
   83: 	/* ioctl */	agp_ioctl,
   84: 	/* poll */	nopoll,
   85: 	/* mmap */	agp_mmap,
   86: 	/* strategy */	nostrategy,
   87: 	/* dump */	nodump,
   88: 	/* psize */	nopsize
   89: };
   90: 
   91: static devclass_t agp_devclass;
   92: #define KDEV2DEV(kdev)	devclass_get_device(agp_devclass, minor(kdev))
   93: 
   94: /* Helper functions for implementing chipset mini drivers. */
   95: 
   96: void
   97: agp_flush_cache()
   98: {
   99: #ifdef __i386__
  100: 	wbinvd();
  101: #endif
  102: }
  103: 
  104: u_int8_t
  105: agp_find_caps(device_t dev)
  106: {
  107: 	u_int32_t status;
  108: 	u_int8_t ptr, next;
  109: 
  110: 	/*
  111: 	 * Check the CAP_LIST bit of the PCI status register first.
  112: 	 */
  113: 	status = pci_read_config(dev, PCIR_STATUS, 2);
  114: 	if (!(status & 0x10))
  115: 		return 0;
  116: 
  117: 	/*
  118: 	 * Traverse the capabilities list.
  119: 	 */
  120: 	for (ptr = pci_read_config(dev, AGP_CAPPTR, 1);
  121: 	     ptr != 0;
  122: 	     ptr = next) {
  123: 		u_int32_t capid = pci_read_config(dev, ptr, 4);
  124: 		next = AGP_CAPID_GET_NEXT_PTR(capid);
  125: 
  126: 		/*
  127: 		 * If this capability entry ID is 2, then we are done.
  128: 		 */
  129: 		if (AGP_CAPID_GET_CAP_ID(capid) == 2)
  130: 			return ptr;
  131: 	}
  132: 
  133: 	return 0;
  134: }
  135: 
  136: /*
  137:  * Find an AGP display device (if any).
  138:  */
  139: static device_t
  140: agp_find_display(void)
  141: {
  142: 	devclass_t pci = devclass_find("pci");
  143: 	device_t bus, dev = 0;
  144: 	device_t *kids;
  145: 	int busnum, numkids, i;
  146: 
  147: 	for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
  148: 		bus = devclass_get_device(pci, busnum);
  149: 		if (!bus)
  150: 			continue;
  151: 		device_get_children(bus, &kids, &numkids);
  152: 		for (i = 0; i < numkids; i++) {
  153: 			dev = kids[i];
  154: 			if (pci_get_class(dev) == PCIC_DISPLAY
  155: 			    && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
  156: 				if (agp_find_caps(dev)) {
  157: 					free(kids, M_TEMP);
  158: 					return dev;
  159: 				}
  160: 					
  161: 		}
  162: 		free(kids, M_TEMP);
  163: 	}
  164: 
  165: 	return 0;
  166: }
  167: 
  168: struct agp_gatt *
  169: agp_alloc_gatt(device_t dev)
  170: {
  171: 	u_int32_t apsize = AGP_GET_APERTURE(dev);
  172: 	u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
  173: 	struct agp_gatt *gatt;
  174: 
  175: 	if (bootverbose)
  176: 		device_printf(dev,
  177: 			      "allocating GATT for aperture of size %dM\n",
  178: 			      apsize / (1024*1024));
  179: 
  180: 	if (entries == 0) {
  181: 		device_printf(dev, "bad aperture size\n");
  182: 		return NULL;
  183: 	}
  184: 
  185: 	gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
  186: 	if (!gatt)
  187: 		return 0;
  188: 
  189: 	gatt->ag_entries = entries;
  190: 	gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
  191: 					0, ~0, PAGE_SIZE, 0);
  192: 	if (!gatt->ag_virtual) {
  193: 		if (bootverbose)
  194: 			device_printf(dev, "contiguous allocation failed\n");
  195: 		free(gatt, M_AGP);
  196: 		return 0;
  197: 	}
  198: 	bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
  199: 	gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
  200: 	agp_flush_cache();
  201: 
  202: 	return gatt;
  203: }
  204: 
  205: void
  206: agp_free_gatt(struct agp_gatt *gatt)
  207: {
  208: 	contigfree(gatt->ag_virtual,
  209: 		   gatt->ag_entries * sizeof(u_int32_t), M_AGP);
  210: 	free(gatt, M_AGP);
  211: }
  212: 
  213: static int agp_max[][2] = {
  214: 	{0,	0},
  215: 	{32,	4},
  216: 	{64,	28},
  217: 	{128,	96},
  218: 	{256,	204},
  219: 	{512,	440},
  220: 	{1024,	942},
  221: 	{2048,	1920},
  222: 	{4096,	3932}
  223: };
  224: #define agp_max_size	(sizeof(agp_max) / sizeof(agp_max[0]))
  225: 
  226: int
  227: agp_generic_attach(device_t dev)
  228: {
  229: 	struct agp_softc *sc = device_get_softc(dev);
  230: 	int rid, memsize, i;
  231: 
  232: 	/*
  233: 	 * Find and map the aperture.
  234: 	 */
  235: 	rid = AGP_APBASE;
  236: 	sc->as_aperture = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
  237: 					     0, ~0, 1, RF_ACTIVE);
  238: 	if (!sc->as_aperture)
  239: 		return ENOMEM;
  240: 
  241: 	/*
  242: 	 * Work out an upper bound for agp memory allocation. This
  243: 	 * uses a heurisitc table from the Linux driver.
  244: 	 */
  245: 	memsize = ptoa(Maxmem) >> 20;
  246: 	for (i = 0; i < agp_max_size; i++) {
  247: 		if (memsize <= agp_max[i][0])
  248: 			break;
  249: 	}
  250: 	if (i == agp_max_size) i = agp_max_size - 1;
  251: 	sc->as_maxmem = agp_max[i][1] << 20U;
  252: 
  253: 	/*
  254: 	 * The lock is used to prevent re-entry to
  255: 	 * agp_generic_bind_memory() since that function can sleep.
  256: 	 */
  257: 	lockinit(&sc->as_lock, PCATCH, "agplk", 0, 0);
  258: 
  259: 	/*
  260: 	 * Initialise stuff for the userland device.
  261: 	 */
  262: 	agp_devclass = devclass_find("agp");
  263: 	TAILQ_INIT(&sc->as_memory);
  264: 	sc->as_nextid = 1;
  265: 
  266: 	sc->as_devnode = make_dev(&agp_cdevsw,
  267: 				  device_get_unit(dev),
  268: 				  UID_ROOT,
  269: 				  GID_WHEEL,
  270: 				  0600,
  271: 				  "agpgart");
  272: 
  273: 	return 0;
  274: }
  275: 
  276: int
  277: agp_generic_detach(device_t dev)
  278: {
  279: 	struct agp_softc *sc = device_get_softc(dev);
  280: 	bus_release_resource(dev, SYS_RES_MEMORY, AGP_APBASE, sc->as_aperture);
  281: 	lockmgr(&sc->as_lock, LK_DRAIN, NULL, curthread); /* XXX */
  282: 	destroy_dev(sc->as_devnode);
  283: 	agp_flush_cache();
  284: 	return 0;
  285: }
  286: 
  287: /*
  288:  * This does the enable logic for v3, with the same topology
  289:  * restrictions as in place for v2 -- one bus, one device on the bus.
  290:  */
  291: static int
  292: agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
  293: {
  294: 	u_int32_t tstatus, mstatus;
  295: 	u_int32_t command;
  296: 	int rq, sba, fw, rate, arqsz, cal;
  297: 
  298: 	tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
  299: 	mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
  300: 
  301: 	/* Set RQ to the min of mode, tstatus and mstatus */
  302: 	rq = AGP_MODE_GET_RQ(mode);
  303: 	if (AGP_MODE_GET_RQ(tstatus) < rq)
  304: 		rq = AGP_MODE_GET_RQ(tstatus);
  305: 	if (AGP_MODE_GET_RQ(mstatus) < rq)
  306: 		rq = AGP_MODE_GET_RQ(mstatus);
  307: 
  308: 	/*
  309: 	 * ARQSZ - Set the value to the maximum one.
  310: 	 * Don't allow the mode register to override values.
  311: 	 */
  312: 	arqsz = AGP_MODE_GET_ARQSZ(mode);
  313: 	if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
  314: 		rq = AGP_MODE_GET_ARQSZ(tstatus);
  315: 	if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
  316: 		rq = AGP_MODE_GET_ARQSZ(mstatus);
  317: 
  318: 	/* Calibration cycle - don't allow override by mode register */
  319: 	cal = AGP_MODE_GET_CAL(tstatus);
  320: 	if (AGP_MODE_GET_CAL(mstatus) < cal)
  321: 		cal = AGP_MODE_GET_CAL(mstatus);
  322: 
  323: 	/* SBA must be supported for AGP v3. */
  324: 	sba = 1;
  325: 
  326: 	/* Set FW if all three support it. */
  327: 	fw = (AGP_MODE_GET_FW(tstatus)
  328: 	       & AGP_MODE_GET_FW(mstatus)
  329: 	       & AGP_MODE_GET_FW(mode));
  330: 	
  331: 	/* Figure out the max rate */
  332: 	rate = (AGP_MODE_GET_RATE(tstatus)
  333: 		& AGP_MODE_GET_RATE(mstatus)
  334: 		& AGP_MODE_GET_RATE(mode));
  335: 	if (rate & AGP_MODE_V3_RATE_8x)
  336: 		rate = AGP_MODE_V3_RATE_8x;
  337: 	else
  338: 		rate = AGP_MODE_V3_RATE_4x;
  339: 	if (bootverbose)
  340: 		device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
  341: 
  342: 	pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
  343: 
  344: 	/* Construct the new mode word and tell the hardware */
  345: 	command = AGP_MODE_SET_RQ(0, rq);
  346: 	command = AGP_MODE_SET_ARQSZ(command, arqsz);
  347: 	command = AGP_MODE_SET_CAL(command, cal);
  348: 	command = AGP_MODE_SET_SBA(command, sba);
  349: 	command = AGP_MODE_SET_FW(command, fw);
  350: 	command = AGP_MODE_SET_RATE(command, rate);
  351: 	command = AGP_MODE_SET_AGP(command, 1);
  352: 	pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
  353: 	pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
  354: 
  355: 	return 0;
  356: }
  357: 
  358: static int
  359: agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
  360: {
  361: 	u_int32_t tstatus, mstatus;
  362: 	u_int32_t command;
  363: 	int rq, sba, fw, rate;
  364: 
  365: 	tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
  366: 	mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
  367: 
  368: 	/* Set RQ to the min of mode, tstatus and mstatus */
  369: 	rq = AGP_MODE_GET_RQ(mode);
  370: 	if (AGP_MODE_GET_RQ(tstatus) < rq)
  371: 		rq = AGP_MODE_GET_RQ(tstatus);
  372: 	if (AGP_MODE_GET_RQ(mstatus) < rq)
  373: 		rq = AGP_MODE_GET_RQ(mstatus);
  374: 
  375: 	/* Set SBA if all three can deal with SBA */
  376: 	sba = (AGP_MODE_GET_SBA(tstatus)
  377: 	       & AGP_MODE_GET_SBA(mstatus)
  378: 	       & AGP_MODE_GET_SBA(mode));
  379: 
  380: 	/* Similar for FW */
  381: 	fw = (AGP_MODE_GET_FW(tstatus)
  382: 	       & AGP_MODE_GET_FW(mstatus)
  383: 	       & AGP_MODE_GET_FW(mode));
  384: 
  385: 	/* Figure out the max rate */
  386: 	rate = (AGP_MODE_GET_RATE(tstatus)
  387: 		& AGP_MODE_GET_RATE(mstatus)
  388: 		& AGP_MODE_GET_RATE(mode));
  389: 	if (rate & AGP_MODE_V2_RATE_4x)
  390: 		rate = AGP_MODE_V2_RATE_4x;
  391: 	else if (rate & AGP_MODE_V2_RATE_2x)
  392: 		rate = AGP_MODE_V2_RATE_2x;
  393: 	else
  394: 		rate = AGP_MODE_V2_RATE_1x;
  395: 	if (bootverbose)
  396: 		device_printf(dev, "Setting AGP v2 mode %d\n", rate);
  397: 
  398: 	/* Construct the new mode word and tell the hardware */
  399: 	command = AGP_MODE_SET_RQ(0, rq);
  400: 	command = AGP_MODE_SET_SBA(command, sba);
  401: 	command = AGP_MODE_SET_FW(command, fw);
  402: 	command = AGP_MODE_SET_RATE(command, rate);
  403: 	command = AGP_MODE_SET_AGP(command, 1);
  404: 	pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
  405: 	pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
  406: 
  407: 	return 0;
  408: }
  409: 
  410: int
  411: agp_generic_enable(device_t dev, u_int32_t mode)
  412: {
  413: 	device_t mdev = agp_find_display();
  414: 	u_int32_t tstatus, mstatus;
  415: 
  416: 	if (!mdev) {
  417: 		AGP_DPF("can't find display\n");
  418: 		return ENXIO;
  419: 	}
  420: 
  421: 	tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
  422: 	mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
  423: 
  424: 	/*
  425: 	 * Check display and bridge for AGP v3 support.  AGP v3 allows
  426: 	 * more variety in topology than v2, e.g. multiple AGP devices
  427: 	 * attached to one bridge, or multiple AGP bridges in one
  428: 	 * system.  This doesn't attempt to address those situations,
  429: 	 * but should work fine for a classic single AGP slot system
  430: 	 * with AGP v3.
  431: 	 */
  432: 	if (AGP_MODE_GET_MODE_3(tstatus) && AGP_MODE_GET_MODE_3(mstatus))
  433: 		return (agp_v3_enable(dev, mdev, mode));
  434: 	else
  435: 		return (agp_v2_enable(dev, mdev, mode));	    
  436: }
  437: 
  438: struct agp_memory *
  439: agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
  440: {
  441: 	struct agp_softc *sc = device_get_softc(dev);
  442: 	struct agp_memory *mem;
  443: 
  444: 	if ((size & (AGP_PAGE_SIZE - 1)) != 0)
  445: 		return 0;
  446: 
  447: 	if (sc->as_allocated + size > sc->as_maxmem)
  448: 		return 0;
  449: 
  450: 	if (type != 0) {
  451: 		printf("agp_generic_alloc_memory: unsupported type %d\n",
  452: 		       type);
  453: 		return 0;
  454: 	}
  455: 
  456: 	mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
  457: 	mem->am_id = sc->as_nextid++;
  458: 	mem->am_size = size;
  459: 	mem->am_type = 0;
  460: 	mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
  461: 	mem->am_physical = 0;
  462: 	mem->am_offset = 0;
  463: 	mem->am_is_bound = 0;
  464: 	TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
  465: 	sc->as_allocated += size;
  466: 
  467: 	return mem;
  468: }
  469: 
  470: int
  471: agp_generic_free_memory(device_t dev, struct agp_memory *mem)
  472: {
  473: 	struct agp_softc *sc = device_get_softc(dev);
  474: 
  475: 	if (mem->am_is_bound)
  476: 		return EBUSY;
  477: 
  478: 	sc->as_allocated -= mem->am_size;
  479: 	TAILQ_REMOVE(&sc->as_memory, mem, am_link);
  480: 	vm_object_deallocate(mem->am_obj);
  481: 	free(mem, M_AGP);
  482: 	return 0;
  483: }
  484: 
  485: int
  486: agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
  487: 			vm_offset_t offset)
  488: {
  489: 	struct agp_softc *sc = device_get_softc(dev);
  490: 	vm_offset_t i, j, k;
  491: 	vm_page_t m;
  492: 	int error;
  493: 
  494: 	lockmgr(&sc->as_lock, LK_EXCLUSIVE, NULL, curthread); /* XXX */
  495: 
  496: 	if (mem->am_is_bound) {
  497: 		device_printf(dev, "memory already bound\n");
  498: 		return EINVAL;
  499: 	}
  500: 	
  501: 	if (offset < 0
  502: 	    || (offset & (AGP_PAGE_SIZE - 1)) != 0
  503: 	    || offset + mem->am_size > AGP_GET_APERTURE(dev)) {
  504: 		device_printf(dev, "binding memory at bad offset %#x\n",
  505: 			      (int) offset);
  506: 		return EINVAL;
  507: 	}
  508: 
  509: 	/*
  510: 	 * Bind the individual pages and flush the chipset's
  511: 	 * TLB.
  512: 	 *
  513: 	 * XXX Presumably, this needs to be the pci address on alpha
  514: 	 * (i.e. use alpha_XXX_dmamap()). I don't have access to any
  515: 	 * alpha AGP hardware to check.
  516: 	 */
  517: 	for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
  518: 		/*
  519: 		 * Find a page from the object and wire it
  520: 		 * down. This page will be mapped using one or more
  521: 		 * entries in the GATT (assuming that PAGE_SIZE >=
  522: 		 * AGP_PAGE_SIZE. If this is the first call to bind,
  523: 		 * the pages will be allocated and zeroed.
  524: 		 */
  525: 		m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
  526: 			 VM_ALLOC_NORMAL | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
  527: 		if ((m->flags & PG_ZERO) == 0)
  528: 			vm_page_zero_fill(m);
  529: 		AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
  530: 		vm_page_wire(m);
  531: 
  532: 		/*
  533: 		 * Install entries in the GATT, making sure that if
  534: 		 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
  535: 		 * aligned to PAGE_SIZE, we don't modify too many GATT 
  536: 		 * entries.
  537: 		 */
  538: 		for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
  539: 		     j += AGP_PAGE_SIZE) {
  540: 			vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
  541: 			AGP_DPF("binding offset %#x to pa %#x\n",
  542: 				offset + i + j, pa);
  543: 			error = AGP_BIND_PAGE(dev, offset + i + j, pa);
  544: 			if (error) {
  545: 				/*
  546: 				 * Bail out. Reverse all the mappings
  547: 				 * and unwire the pages.
  548: 				 */
  549: 				vm_page_wakeup(m);
  550: 				for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
  551: 					AGP_UNBIND_PAGE(dev, offset + k);
  552: 				for (k = 0; k <= i; k += PAGE_SIZE) {
  553: 					m = vm_page_lookup(mem->am_obj,
  554: 							   OFF_TO_IDX(k));
  555: 					vm_page_unwire(m, 0);
  556: 				}
  557: 				lockmgr(&sc->as_lock, LK_RELEASE, NULL, curthread); /* XXX */
  558: 				return error;
  559: 			}
  560: 		}
  561: 		vm_page_wakeup(m);
  562: 	}
  563: 
  564: 	/*
  565: 	 * Flush the cpu cache since we are providing a new mapping
  566: 	 * for these pages.
  567: 	 */
  568: 	agp_flush_cache();
  569: 
  570: 	/*
  571: 	 * Make sure the chipset gets the new mappings.
  572: 	 */
  573: 	AGP_FLUSH_TLB(dev);
  574: 
  575: 	mem->am_offset = offset;
  576: 	mem->am_is_bound = 1;
  577: 
  578: 	lockmgr(&sc->as_lock, LK_RELEASE, NULL, curthread); /* XXX */
  579: 
  580: 	return 0;
  581: }
  582: 
  583: int
  584: agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
  585: {
  586: 	struct agp_softc *sc = device_get_softc(dev);
  587: 	vm_page_t m;
  588: 	int i;
  589: 
  590: 	lockmgr(&sc->as_lock, LK_EXCLUSIVE, NULL, curthread); /* XXX */
  591: 
  592: 	if (!mem->am_is_bound) {
  593: 		device_printf(dev, "memory is not bound\n");
  594: 		return EINVAL;
  595: 	}
  596: 
  597: 
  598: 	/*
  599: 	 * Unbind the individual pages and flush the chipset's
  600: 	 * TLB. Unwire the pages so they can be swapped.
  601: 	 */
  602: 	for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
  603: 		AGP_UNBIND_PAGE(dev, mem->am_offset + i);
  604: 	for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
  605: 		m = vm_page_lookup(mem->am_obj, atop(i));
  606: 		vm_page_unwire(m, 0);
  607: 	}
  608: 		
  609: 	agp_flush_cache();
  610: 	AGP_FLUSH_TLB(dev);
  611: 
  612: 	mem->am_offset = 0;
  613: 	mem->am_is_bound = 0;
  614: 
  615: 	lockmgr(&sc->as_lock, LK_RELEASE, NULL, curthread); /* XXX */
  616: 
  617: 	return 0;
  618: }
  619: 
  620: /* Helper functions for implementing user/kernel api */
  621: 
  622: static int
  623: agp_acquire_helper(device_t dev, enum agp_acquire_state state)
  624: {
  625: 	struct agp_softc *sc = device_get_softc(dev);
  626: 
  627: 	if (sc->as_state != AGP_ACQUIRE_FREE)
  628: 		return EBUSY;
  629: 	sc->as_state = state;
  630: 
  631: 	return 0;
  632: }
  633: 
  634: static int
  635: agp_release_helper(device_t dev, enum agp_acquire_state state)
  636: {
  637: 	struct agp_softc *sc = device_get_softc(dev);
  638: 
  639: 	if (sc->as_state == AGP_ACQUIRE_FREE)
  640: 		return 0;
  641: 
  642: 	if (sc->as_state != state)
  643: 		return EBUSY;
  644: 
  645: 	sc->as_state = AGP_ACQUIRE_FREE;
  646: 	return 0;
  647: }
  648: 
  649: static struct agp_memory *
  650: agp_find_memory(device_t dev, int id)
  651: {
  652: 	struct agp_softc *sc = device_get_softc(dev);
  653: 	struct agp_memory *mem;
  654: 
  655: 	AGP_DPF("searching for memory block %d\n", id);
  656: 	TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
  657: 		AGP_DPF("considering memory block %d\n", mem->am_id);
  658: 		if (mem->am_id == id)
  659: 			return mem;
  660: 	}
  661: 	return 0;
  662: }
  663: 
  664: /* Implementation of the userland ioctl api */
  665: 
  666: static int
  667: agp_info_user(device_t dev, agp_info *info)
  668: {
  669: 	struct agp_softc *sc = device_get_softc(dev);
  670: 
  671: 	bzero(info, sizeof *info);
  672: 	info->bridge_id = pci_get_devid(dev);
  673: 	info->agp_mode = 
  674: 	    pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
  675: 	info->aper_base = rman_get_start(sc->as_aperture);
  676: 	info->aper_size = AGP_GET_APERTURE(dev) >> 20;
  677: 	info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
  678: 	info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
  679: 
  680: 	return 0;
  681: }
  682: 
  683: static int
  684: agp_setup_user(device_t dev, agp_setup *setup)
  685: {
  686: 	return AGP_ENABLE(dev, setup->agp_mode);
  687: }
  688: 
  689: static int
  690: agp_allocate_user(device_t dev, agp_allocate *alloc)
  691: {
  692: 	struct agp_memory *mem;
  693: 
  694: 	mem = AGP_ALLOC_MEMORY(dev,
  695: 			       alloc->type,
  696: 			       alloc->pg_count << AGP_PAGE_SHIFT);
  697: 	if (mem) {
  698: 		alloc->key = mem->am_id;
  699: 		alloc->physical = mem->am_physical;
  700: 		return 0;
  701: 	} else {
  702: 		return ENOMEM;
  703: 	}
  704: }
  705: 
  706: static int
  707: agp_deallocate_user(device_t dev, int id)
  708: {
  709: 	struct agp_memory *mem = agp_find_memory(dev, id);;
  710: 
  711: 	if (mem) {
  712: 		AGP_FREE_MEMORY(dev, mem);
  713: 		return 0;
  714: 	} else {
  715: 		return ENOENT;
  716: 	}
  717: }
  718: 
  719: static int
  720: agp_bind_user(device_t dev, agp_bind *bind)
  721: {
  722: 	struct agp_memory *mem = agp_find_memory(dev, bind->key);
  723: 
  724: 	if (!mem)
  725: 		return ENOENT;
  726: 
  727: 	return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
  728: }
  729: 
  730: static int
  731: agp_unbind_user(device_t dev, agp_unbind *unbind)
  732: {
  733: 	struct agp_memory *mem = agp_find_memory(dev, unbind->key);
  734: 
  735: 	if (!mem)
  736: 		return ENOENT;
  737: 
  738: 	return AGP_UNBIND_MEMORY(dev, mem);
  739: }
  740: 
  741: static int
  742: agp_open(dev_t kdev, int oflags, int devtype, struct thread *td)
  743: {
  744: 	device_t dev = KDEV2DEV(kdev);
  745: 	struct agp_softc *sc = device_get_softc(dev);
  746: 
  747: 	if (!sc->as_isopen) {
  748: 		sc->as_isopen = 1;
  749: 		device_busy(dev);
  750: 	}
  751: 
  752: 	return 0;
  753: }
  754: 
  755: static int
  756: agp_close(dev_t kdev, int fflag, int devtype, struct thread *td)
  757: {
  758: 	device_t dev = KDEV2DEV(kdev);
  759: 	struct agp_softc *sc = device_get_softc(dev);
  760: 	struct agp_memory *mem;
  761: 
  762: 	/*
  763: 	 * Clear the GATT and force release on last close
  764: 	 */
  765: 	while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
  766: 		if (mem->am_is_bound)
  767: 			AGP_UNBIND_MEMORY(dev, mem);
  768: 		AGP_FREE_MEMORY(dev, mem);
  769: 	}
  770: 	if (sc->as_state == AGP_ACQUIRE_USER)
  771: 		agp_release_helper(dev, AGP_ACQUIRE_USER);
  772: 	sc->as_isopen = 0;
  773: 	device_unbusy(dev);
  774: 
  775: 	return 0;
  776: }
  777: 
  778: static int
  779: agp_ioctl(dev_t kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
  780: {
  781: 	device_t dev = KDEV2DEV(kdev);
  782: 
  783: 	switch (cmd) {
  784: 	case AGPIOC_INFO:
  785: 		return agp_info_user(dev, (agp_info *) data);
  786: 
  787: 	case AGPIOC_ACQUIRE:
  788: 		return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
  789: 
  790: 	case AGPIOC_RELEASE:
  791: 		return agp_release_helper(dev, AGP_ACQUIRE_USER);
  792: 
  793: 	case AGPIOC_SETUP:
  794: 		return agp_setup_user(dev, (agp_setup *)data);
  795: 
  796: 	case AGPIOC_ALLOCATE:
  797: 		return agp_allocate_user(dev, (agp_allocate *)data);
  798: 
  799: 	case AGPIOC_DEALLOCATE:
  800: 		return agp_deallocate_user(dev, *(int *) data);
  801: 
  802: 	case AGPIOC_BIND:
  803: 		return agp_bind_user(dev, (agp_bind *)data);
  804: 
  805: 	case AGPIOC_UNBIND:
  806: 		return agp_unbind_user(dev, (agp_unbind *)data);
  807: 
  808: 	}
  809: 
  810: 	return EINVAL;
  811: }
  812: 
  813: static int
  814: agp_mmap(dev_t kdev, vm_offset_t offset, int prot)
  815: {
  816: 	device_t dev = KDEV2DEV(kdev);
  817: 	struct agp_softc *sc = device_get_softc(dev);
  818: 
  819: 	if (offset > AGP_GET_APERTURE(dev))
  820: 		return -1;
  821: 	return atop(rman_get_start(sc->as_aperture) + offset);
  822: }
  823: 
  824: /* Implementation of the kernel api */
  825: 
  826: device_t
  827: agp_find_device()
  828: {
  829: 	if (!agp_devclass)
  830: 		return 0;
  831: 	return devclass_get_device(agp_devclass, 0);
  832: }
  833: 
  834: enum agp_acquire_state
  835: agp_state(device_t dev)
  836: {
  837: 	struct agp_softc *sc = device_get_softc(dev);
  838: 	return sc->as_state;
  839: }
  840: 
  841: void
  842: agp_get_info(device_t dev, struct agp_info *info)
  843: {
  844: 	struct agp_softc *sc = device_get_softc(dev);
  845: 
  846: 	info->ai_mode =
  847: 		pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
  848: 	info->ai_aperture_base = rman_get_start(sc->as_aperture);
  849: 	info->ai_aperture_size = (rman_get_end(sc->as_aperture)
  850: 				  - rman_get_start(sc->as_aperture)) + 1;
  851: 	info->ai_aperture_va = (vm_offset_t) rman_get_virtual(sc->as_aperture);
  852: 	info->ai_memory_allowed = sc->as_maxmem;
  853: 	info->ai_memory_used = sc->as_allocated;
  854: }
  855: 
  856: int
  857: agp_acquire(device_t dev)
  858: {
  859: 	return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
  860: }
  861: 
  862: int
  863: agp_release(device_t dev)
  864: {
  865: 	return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
  866: }
  867: 
  868: int
  869: agp_enable(device_t dev, u_int32_t mode)
  870: {
  871: 	return AGP_ENABLE(dev, mode);
  872: }
  873: 
  874: void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
  875: {
  876: 	return  (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
  877: }
  878: 
  879: void agp_free_memory(device_t dev, void *handle)
  880: {
  881: 	struct agp_memory *mem = (struct agp_memory *) handle;
  882: 	AGP_FREE_MEMORY(dev, mem);
  883: }
  884: 
  885: int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
  886: {
  887: 	struct agp_memory *mem = (struct agp_memory *) handle;
  888: 	return AGP_BIND_MEMORY(dev, mem, offset);
  889: }
  890: 
  891: int agp_unbind_memory(device_t dev, void *handle)
  892: {
  893: 	struct agp_memory *mem = (struct agp_memory *) handle;
  894: 	return AGP_UNBIND_MEMORY(dev, mem);
  895: }
  896: 
  897: void agp_memory_info(device_t dev, void *handle, struct
  898: 		     agp_memory_info *mi)
  899: {
  900: 	struct agp_memory *mem = (struct agp_memory *) handle;
  901: 
  902: 	mi->ami_size = mem->am_size;
  903: 	mi->ami_physical = mem->am_physical;
  904: 	mi->ami_offset = mem->am_offset;
  905: 	mi->ami_is_bound = mem->am_is_bound;
  906: }