File:  [DragonFly] / src / sys / dev / agp / agp.c
Revision 1.12: download - view: text, annotated - select for diffs
Thu May 13 23:49:14 2004 UTC (10 years, 5 months ago) by dillon
Branches: MAIN
CVS tags: HEAD
device switch 1/many: Remove d_autoq, add d_clone (where d_autoq was).

d_autoq was used to allow the device port dispatch to mix old-style synchronous
calls with new style messaging calls within a particular device.  It was never
used for that purpose.

d_clone will be more fully implemented as work continues.  We are going to
install d_port in the dev_t (struct specinfo) structure itself and d_clone
will be needed to allow devices to 'revector' the port on a minor-number
by minor-number basis, in particular allowing minor numbers to be directly
dispatched to distinct threads.  This is something we will be needing later
on.

    1: /*-
    2:  * Copyright (c) 2000 Doug Rabson
    3:  * All rights reserved.
    4:  *
    5:  * Redistribution and use in source and binary forms, with or without
    6:  * modification, are permitted provided that the following conditions
    7:  * are met:
    8:  * 1. Redistributions of source code must retain the above copyright
    9:  *    notice, this list of conditions and the following disclaimer.
   10:  * 2. Redistributions in binary form must reproduce the above copyright
   11:  *    notice, this list of conditions and the following disclaimer in the
   12:  *    documentation and/or other materials provided with the distribution.
   13:  *
   14:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24:  * SUCH DAMAGE.
   25:  *
   26:  *	$FreeBSD: src/sys/pci/agp.c,v 1.3.2.4 2002/08/11 19:58:12 alc Exp $
   27:  *	$DragonFly: src/sys/dev/agp/agp.c,v 1.12 2004/05/13 23:49:14 dillon Exp $
   28:  */
   29: 
   30: #include "opt_bus.h"
   31: #include "opt_pci.h"
   32: 
   33: #include <sys/param.h>
   34: #include <sys/systm.h>
   35: #include <sys/malloc.h>
   36: #include <sys/kernel.h>
   37: #include <sys/bus.h>
   38: #include <sys/conf.h>
   39: #include <sys/ioccom.h>
   40: #include <sys/agpio.h>
   41: #include <sys/lock.h>
   42: #include <sys/proc.h>
   43: 
   44: #include <bus/pci/pcivar.h>
   45: #include <bus/pci/pcireg.h>
   46: #include "agppriv.h"
   47: #include "agpvar.h"
   48: #include "agpreg.h"
   49: 
   50: #include <vm/vm.h>
   51: #include <vm/vm_object.h>
   52: #include <vm/vm_page.h>
   53: #include <vm/vm_pageout.h>
   54: #include <vm/pmap.h>
   55: 
   56: #include <machine/md_var.h>
   57: #include <machine/bus.h>
   58: #include <machine/resource.h>
   59: #include <sys/rman.h>
   60: 
   61: MODULE_VERSION(agp, 1);
   62: 
   63: MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
   64: 
   65: #define CDEV_MAJOR	148
   66: 				/* agp_drv.c */
   67: static d_open_t agp_open;
   68: static d_close_t agp_close;
   69: static d_ioctl_t agp_ioctl;
   70: static d_mmap_t agp_mmap;
   71: 
   72: static struct cdevsw agp_cdevsw = {
   73: 	/* name */	"agp",
   74: 	/* maj */	CDEV_MAJOR,
   75: 	/* flags */	D_TTY,
   76: 	/* port */	NULL,
   77: 	/* clone */	NULL,
   78: 
   79: 	/* open */	agp_open,
   80: 	/* close */	agp_close,
   81: 	/* read */	noread,
   82: 	/* write */	nowrite,
   83: 	/* ioctl */	agp_ioctl,
   84: 	/* poll */	nopoll,
   85: 	/* mmap */	agp_mmap,
   86: 	/* strategy */	nostrategy,
   87: 	/* dump */	nodump,
   88: 	/* psize */	nopsize
   89: };
   90: 
   91: static devclass_t agp_devclass;
   92: #define KDEV2DEV(kdev)	devclass_get_device(agp_devclass, minor(kdev))
   93: 
   94: /* Helper functions for implementing chipset mini drivers. */
   95: 
   96: void
   97: agp_flush_cache()
   98: {
   99: #ifdef __i386__
  100: 	wbinvd();
  101: #endif
  102: }
  103: 
  104: u_int8_t
  105: agp_find_caps(device_t dev)
  106: {
  107: 	u_int32_t status;
  108: 	u_int8_t ptr, next;
  109: 
  110: 	/*
  111: 	 * Check the CAP_LIST bit of the PCI status register first.
  112: 	 */
  113: 	status = pci_read_config(dev, PCIR_STATUS, 2);
  114: 	if (!(status & 0x10))
  115: 		return 0;
  116: 
  117: 	/*
  118: 	 * Traverse the capabilities list.
  119: 	 */
  120: 	for (ptr = pci_read_config(dev, AGP_CAPPTR, 1);
  121: 	     ptr != 0;
  122: 	     ptr = next) {
  123: 		u_int32_t capid = pci_read_config(dev, ptr, 4);
  124: 		next = AGP_CAPID_GET_NEXT_PTR(capid);
  125: 
  126: 		/*
  127: 		 * If this capability entry ID is 2, then we are done.
  128: 		 */
  129: 		if (AGP_CAPID_GET_CAP_ID(capid) == 2)
  130: 			return ptr;
  131: 	}
  132: 
  133: 	return 0;
  134: }
  135: 
  136: /*
  137:  * Find an AGP display device (if any).
  138:  */
  139: static device_t
  140: agp_find_display(void)
  141: {
  142: 	devclass_t pci = devclass_find("pci");
  143: 	device_t bus, dev = 0;
  144: 	device_t *kids;
  145: 	int busnum, numkids, i;
  146: 
  147: 	for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
  148: 		bus = devclass_get_device(pci, busnum);
  149: 		if (!bus)
  150: 			continue;
  151: 		device_get_children(bus, &kids, &numkids);
  152: 		for (i = 0; i < numkids; i++) {
  153: 			dev = kids[i];
  154: 			if (pci_get_class(dev) == PCIC_DISPLAY
  155: 			    && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
  156: 				if (agp_find_caps(dev)) {
  157: 					free(kids, M_TEMP);
  158: 					return dev;
  159: 				}
  160: 					
  161: 		}
  162: 		free(kids, M_TEMP);
  163: 	}
  164: 
  165: 	return 0;
  166: }
  167: 
  168: struct agp_gatt *
  169: agp_alloc_gatt(device_t dev)
  170: {
  171: 	u_int32_t apsize = AGP_GET_APERTURE(dev);
  172: 	u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
  173: 	struct agp_gatt *gatt;
  174: 
  175: 	if (bootverbose)
  176: 		device_printf(dev,
  177: 			      "allocating GATT for aperture of size %dM\n",
  178: 			      apsize / (1024*1024));
  179: 
  180: 	if (entries == 0) {
  181: 		device_printf(dev, "bad aperture size\n");
  182: 		return NULL;
  183: 	}
  184: 
  185: 	gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_INTWAIT);
  186: 	gatt->ag_entries = entries;
  187: 	gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 
  188: 					M_WAITOK, 0, ~0, PAGE_SIZE, 0);
  189: 	if (!gatt->ag_virtual) {
  190: 		if (bootverbose)
  191: 			device_printf(dev, "contiguous allocation failed\n");
  192: 		free(gatt, M_AGP);
  193: 		return 0;
  194: 	}
  195: 	bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
  196: 	gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
  197: 	agp_flush_cache();
  198: 
  199: 	return gatt;
  200: }
  201: 
  202: void
  203: agp_free_gatt(struct agp_gatt *gatt)
  204: {
  205: 	contigfree(gatt->ag_virtual,
  206: 		   gatt->ag_entries * sizeof(u_int32_t), M_AGP);
  207: 	free(gatt, M_AGP);
  208: }
  209: 
  210: static int agp_max[][2] = {
  211: 	{0,	0},
  212: 	{32,	4},
  213: 	{64,	28},
  214: 	{128,	96},
  215: 	{256,	204},
  216: 	{512,	440},
  217: 	{1024,	942},
  218: 	{2048,	1920},
  219: 	{4096,	3932}
  220: };
  221: #define agp_max_size	(sizeof(agp_max) / sizeof(agp_max[0]))
  222: 
  223: int
  224: agp_generic_attach(device_t dev)
  225: {
  226: 	struct agp_softc *sc = device_get_softc(dev);
  227: 	int rid, memsize, i;
  228: 
  229: 	/*
  230: 	 * Find and map the aperture.
  231: 	 */
  232: 	rid = AGP_APBASE;
  233: 	sc->as_aperture = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
  234: 					     0, ~0, 1, RF_ACTIVE);
  235: 	if (!sc->as_aperture)
  236: 		return ENOMEM;
  237: 
  238: 	/*
  239: 	 * Work out an upper bound for agp memory allocation. This
  240: 	 * uses a heurisitc table from the Linux driver.
  241: 	 */
  242: 	memsize = ptoa(Maxmem) >> 20;
  243: 	for (i = 0; i < agp_max_size; i++) {
  244: 		if (memsize <= agp_max[i][0])
  245: 			break;
  246: 	}
  247: 	if (i == agp_max_size) i = agp_max_size - 1;
  248: 	sc->as_maxmem = agp_max[i][1] << 20U;
  249: 
  250: 	/*
  251: 	 * The lock is used to prevent re-entry to
  252: 	 * agp_generic_bind_memory() since that function can sleep.
  253: 	 */
  254: 	lockinit(&sc->as_lock, PCATCH, "agplk", 0, 0);
  255: 
  256: 	/*
  257: 	 * Initialise stuff for the userland device.
  258: 	 */
  259: 	agp_devclass = devclass_find("agp");
  260: 	TAILQ_INIT(&sc->as_memory);
  261: 	sc->as_nextid = 1;
  262: 
  263: 	sc->as_devnode = make_dev(&agp_cdevsw,
  264: 				  device_get_unit(dev),
  265: 				  UID_ROOT,
  266: 				  GID_WHEEL,
  267: 				  0600,
  268: 				  "agpgart");
  269: 
  270: 	return 0;
  271: }
  272: 
  273: int
  274: agp_generic_detach(device_t dev)
  275: {
  276: 	struct agp_softc *sc = device_get_softc(dev);
  277: 	bus_release_resource(dev, SYS_RES_MEMORY, AGP_APBASE, sc->as_aperture);
  278: 	lockmgr(&sc->as_lock, LK_DRAIN, NULL, curthread); /* XXX */
  279: 	destroy_dev(sc->as_devnode);
  280: 	agp_flush_cache();
  281: 	return 0;
  282: }
  283: 
  284: /*
  285:  * This does the enable logic for v3, with the same topology
  286:  * restrictions as in place for v2 -- one bus, one device on the bus.
  287:  */
  288: static int
  289: agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
  290: {
  291: 	u_int32_t tstatus, mstatus;
  292: 	u_int32_t command;
  293: 	int rq, sba, fw, rate, arqsz, cal;
  294: 
  295: 	tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
  296: 	mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
  297: 
  298: 	/* Set RQ to the min of mode, tstatus and mstatus */
  299: 	rq = AGP_MODE_GET_RQ(mode);
  300: 	if (AGP_MODE_GET_RQ(tstatus) < rq)
  301: 		rq = AGP_MODE_GET_RQ(tstatus);
  302: 	if (AGP_MODE_GET_RQ(mstatus) < rq)
  303: 		rq = AGP_MODE_GET_RQ(mstatus);
  304: 
  305: 	/*
  306: 	 * ARQSZ - Set the value to the maximum one.
  307: 	 * Don't allow the mode register to override values.
  308: 	 */
  309: 	arqsz = AGP_MODE_GET_ARQSZ(mode);
  310: 	if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
  311: 		rq = AGP_MODE_GET_ARQSZ(tstatus);
  312: 	if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
  313: 		rq = AGP_MODE_GET_ARQSZ(mstatus);
  314: 
  315: 	/* Calibration cycle - don't allow override by mode register */
  316: 	cal = AGP_MODE_GET_CAL(tstatus);
  317: 	if (AGP_MODE_GET_CAL(mstatus) < cal)
  318: 		cal = AGP_MODE_GET_CAL(mstatus);
  319: 
  320: 	/* SBA must be supported for AGP v3. */
  321: 	sba = 1;
  322: 
  323: 	/* Set FW if all three support it. */
  324: 	fw = (AGP_MODE_GET_FW(tstatus)
  325: 	       & AGP_MODE_GET_FW(mstatus)
  326: 	       & AGP_MODE_GET_FW(mode));
  327: 	
  328: 	/* Figure out the max rate */
  329: 	rate = (AGP_MODE_GET_RATE(tstatus)
  330: 		& AGP_MODE_GET_RATE(mstatus)
  331: 		& AGP_MODE_GET_RATE(mode));
  332: 	if (rate & AGP_MODE_V3_RATE_8x)
  333: 		rate = AGP_MODE_V3_RATE_8x;
  334: 	else
  335: 		rate = AGP_MODE_V3_RATE_4x;
  336: 	if (bootverbose)
  337: 		device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
  338: 
  339: 	pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
  340: 
  341: 	/* Construct the new mode word and tell the hardware */
  342: 	command = AGP_MODE_SET_RQ(0, rq);
  343: 	command = AGP_MODE_SET_ARQSZ(command, arqsz);
  344: 	command = AGP_MODE_SET_CAL(command, cal);
  345: 	command = AGP_MODE_SET_SBA(command, sba);
  346: 	command = AGP_MODE_SET_FW(command, fw);
  347: 	command = AGP_MODE_SET_RATE(command, rate);
  348: 	command = AGP_MODE_SET_AGP(command, 1);
  349: 	pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
  350: 	pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
  351: 
  352: 	return 0;
  353: }
  354: 
  355: static int
  356: agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
  357: {
  358: 	u_int32_t tstatus, mstatus;
  359: 	u_int32_t command;
  360: 	int rq, sba, fw, rate;
  361: 
  362: 	tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
  363: 	mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
  364: 
  365: 	/* Set RQ to the min of mode, tstatus and mstatus */
  366: 	rq = AGP_MODE_GET_RQ(mode);
  367: 	if (AGP_MODE_GET_RQ(tstatus) < rq)
  368: 		rq = AGP_MODE_GET_RQ(tstatus);
  369: 	if (AGP_MODE_GET_RQ(mstatus) < rq)
  370: 		rq = AGP_MODE_GET_RQ(mstatus);
  371: 
  372: 	/* Set SBA if all three can deal with SBA */
  373: 	sba = (AGP_MODE_GET_SBA(tstatus)
  374: 	       & AGP_MODE_GET_SBA(mstatus)
  375: 	       & AGP_MODE_GET_SBA(mode));
  376: 
  377: 	/* Similar for FW */
  378: 	fw = (AGP_MODE_GET_FW(tstatus)
  379: 	       & AGP_MODE_GET_FW(mstatus)
  380: 	       & AGP_MODE_GET_FW(mode));
  381: 
  382: 	/* Figure out the max rate */
  383: 	rate = (AGP_MODE_GET_RATE(tstatus)
  384: 		& AGP_MODE_GET_RATE(mstatus)
  385: 		& AGP_MODE_GET_RATE(mode));
  386: 	if (rate & AGP_MODE_V2_RATE_4x)
  387: 		rate = AGP_MODE_V2_RATE_4x;
  388: 	else if (rate & AGP_MODE_V2_RATE_2x)
  389: 		rate = AGP_MODE_V2_RATE_2x;
  390: 	else
  391: 		rate = AGP_MODE_V2_RATE_1x;
  392: 	if (bootverbose)
  393: 		device_printf(dev, "Setting AGP v2 mode %d\n", rate);
  394: 
  395: 	/* Construct the new mode word and tell the hardware */
  396: 	command = AGP_MODE_SET_RQ(0, rq);
  397: 	command = AGP_MODE_SET_SBA(command, sba);
  398: 	command = AGP_MODE_SET_FW(command, fw);
  399: 	command = AGP_MODE_SET_RATE(command, rate);
  400: 	command = AGP_MODE_SET_AGP(command, 1);
  401: 	pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
  402: 	pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
  403: 
  404: 	return 0;
  405: }
  406: 
  407: int
  408: agp_generic_enable(device_t dev, u_int32_t mode)
  409: {
  410: 	device_t mdev = agp_find_display();
  411: 	u_int32_t tstatus, mstatus;
  412: 
  413: 	if (!mdev) {
  414: 		AGP_DPF("can't find display\n");
  415: 		return ENXIO;
  416: 	}
  417: 
  418: 	tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
  419: 	mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
  420: 
  421: 	/*
  422: 	 * Check display and bridge for AGP v3 support.  AGP v3 allows
  423: 	 * more variety in topology than v2, e.g. multiple AGP devices
  424: 	 * attached to one bridge, or multiple AGP bridges in one
  425: 	 * system.  This doesn't attempt to address those situations,
  426: 	 * but should work fine for a classic single AGP slot system
  427: 	 * with AGP v3.
  428: 	 */
  429: 	if (AGP_MODE_GET_MODE_3(tstatus) && AGP_MODE_GET_MODE_3(mstatus))
  430: 		return (agp_v3_enable(dev, mdev, mode));
  431: 	else
  432: 		return (agp_v2_enable(dev, mdev, mode));	    
  433: }
  434: 
  435: struct agp_memory *
  436: agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
  437: {
  438: 	struct agp_softc *sc = device_get_softc(dev);
  439: 	struct agp_memory *mem;
  440: 
  441: 	if ((size & (AGP_PAGE_SIZE - 1)) != 0)
  442: 		return 0;
  443: 
  444: 	if (sc->as_allocated + size > sc->as_maxmem)
  445: 		return 0;
  446: 
  447: 	if (type != 0) {
  448: 		printf("agp_generic_alloc_memory: unsupported type %d\n",
  449: 		       type);
  450: 		return 0;
  451: 	}
  452: 
  453: 	mem = malloc(sizeof *mem, M_AGP, M_INTWAIT);
  454: 	mem->am_id = sc->as_nextid++;
  455: 	mem->am_size = size;
  456: 	mem->am_type = 0;
  457: 	mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
  458: 	mem->am_physical = 0;
  459: 	mem->am_offset = 0;
  460: 	mem->am_is_bound = 0;
  461: 	TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
  462: 	sc->as_allocated += size;
  463: 
  464: 	return mem;
  465: }
  466: 
  467: int
  468: agp_generic_free_memory(device_t dev, struct agp_memory *mem)
  469: {
  470: 	struct agp_softc *sc = device_get_softc(dev);
  471: 
  472: 	if (mem->am_is_bound)
  473: 		return EBUSY;
  474: 
  475: 	sc->as_allocated -= mem->am_size;
  476: 	TAILQ_REMOVE(&sc->as_memory, mem, am_link);
  477: 	vm_object_deallocate(mem->am_obj);
  478: 	free(mem, M_AGP);
  479: 	return 0;
  480: }
  481: 
  482: int
  483: agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
  484: 			vm_offset_t offset)
  485: {
  486: 	struct agp_softc *sc = device_get_softc(dev);
  487: 	vm_offset_t i, j, k;
  488: 	vm_page_t m;
  489: 	int error;
  490: 
  491: 	lockmgr(&sc->as_lock, LK_EXCLUSIVE, NULL, curthread); /* XXX */
  492: 
  493: 	if (mem->am_is_bound) {
  494: 		device_printf(dev, "memory already bound\n");
  495: 		return EINVAL;
  496: 	}
  497: 	
  498: 	if (offset < 0
  499: 	    || (offset & (AGP_PAGE_SIZE - 1)) != 0
  500: 	    || offset + mem->am_size > AGP_GET_APERTURE(dev)) {
  501: 		device_printf(dev, "binding memory at bad offset %#x\n",
  502: 			      (int) offset);
  503: 		return EINVAL;
  504: 	}
  505: 
  506: 	/*
  507: 	 * Bind the individual pages and flush the chipset's
  508: 	 * TLB.
  509: 	 *
  510: 	 * XXX Presumably, this needs to be the pci address on alpha
  511: 	 * (i.e. use alpha_XXX_dmamap()). I don't have access to any
  512: 	 * alpha AGP hardware to check.
  513: 	 */
  514: 	for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
  515: 		/*
  516: 		 * Find a page from the object and wire it
  517: 		 * down. This page will be mapped using one or more
  518: 		 * entries in the GATT (assuming that PAGE_SIZE >=
  519: 		 * AGP_PAGE_SIZE. If this is the first call to bind,
  520: 		 * the pages will be allocated and zeroed.
  521: 		 */
  522: 		m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
  523: 			 VM_ALLOC_NORMAL | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
  524: 		if ((m->flags & PG_ZERO) == 0)
  525: 			vm_page_zero_fill(m);
  526: 		AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
  527: 		vm_page_wire(m);
  528: 
  529: 		/*
  530: 		 * Install entries in the GATT, making sure that if
  531: 		 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
  532: 		 * aligned to PAGE_SIZE, we don't modify too many GATT 
  533: 		 * entries.
  534: 		 */
  535: 		for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
  536: 		     j += AGP_PAGE_SIZE) {
  537: 			vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
  538: 			AGP_DPF("binding offset %#x to pa %#x\n",
  539: 				offset + i + j, pa);
  540: 			error = AGP_BIND_PAGE(dev, offset + i + j, pa);
  541: 			if (error) {
  542: 				/*
  543: 				 * Bail out. Reverse all the mappings
  544: 				 * and unwire the pages.
  545: 				 */
  546: 				vm_page_wakeup(m);
  547: 				for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
  548: 					AGP_UNBIND_PAGE(dev, offset + k);
  549: 				for (k = 0; k <= i; k += PAGE_SIZE) {
  550: 					m = vm_page_lookup(mem->am_obj,
  551: 							   OFF_TO_IDX(k));
  552: 					vm_page_unwire(m, 0);
  553: 				}
  554: 				lockmgr(&sc->as_lock, LK_RELEASE, NULL, curthread); /* XXX */
  555: 				return error;
  556: 			}
  557: 		}
  558: 		vm_page_wakeup(m);
  559: 	}
  560: 
  561: 	/*
  562: 	 * Flush the cpu cache since we are providing a new mapping
  563: 	 * for these pages.
  564: 	 */
  565: 	agp_flush_cache();
  566: 
  567: 	/*
  568: 	 * Make sure the chipset gets the new mappings.
  569: 	 */
  570: 	AGP_FLUSH_TLB(dev);
  571: 
  572: 	mem->am_offset = offset;
  573: 	mem->am_is_bound = 1;
  574: 
  575: 	lockmgr(&sc->as_lock, LK_RELEASE, NULL, curthread); /* XXX */
  576: 
  577: 	return 0;
  578: }
  579: 
  580: int
  581: agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
  582: {
  583: 	struct agp_softc *sc = device_get_softc(dev);
  584: 	vm_page_t m;
  585: 	int i;
  586: 
  587: 	lockmgr(&sc->as_lock, LK_EXCLUSIVE, NULL, curthread); /* XXX */
  588: 
  589: 	if (!mem->am_is_bound) {
  590: 		device_printf(dev, "memory is not bound\n");
  591: 		return EINVAL;
  592: 	}
  593: 
  594: 
  595: 	/*
  596: 	 * Unbind the individual pages and flush the chipset's
  597: 	 * TLB. Unwire the pages so they can be swapped.
  598: 	 */
  599: 	for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
  600: 		AGP_UNBIND_PAGE(dev, mem->am_offset + i);
  601: 	for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
  602: 		m = vm_page_lookup(mem->am_obj, atop(i));
  603: 		vm_page_unwire(m, 0);
  604: 	}
  605: 		
  606: 	agp_flush_cache();
  607: 	AGP_FLUSH_TLB(dev);
  608: 
  609: 	mem->am_offset = 0;
  610: 	mem->am_is_bound = 0;
  611: 
  612: 	lockmgr(&sc->as_lock, LK_RELEASE, NULL, curthread); /* XXX */
  613: 
  614: 	return 0;
  615: }
  616: 
  617: /* Helper functions for implementing user/kernel api */
  618: 
  619: static int
  620: agp_acquire_helper(device_t dev, enum agp_acquire_state state)
  621: {
  622: 	struct agp_softc *sc = device_get_softc(dev);
  623: 
  624: 	if (sc->as_state != AGP_ACQUIRE_FREE)
  625: 		return EBUSY;
  626: 	sc->as_state = state;
  627: 
  628: 	return 0;
  629: }
  630: 
  631: static int
  632: agp_release_helper(device_t dev, enum agp_acquire_state state)
  633: {
  634: 	struct agp_softc *sc = device_get_softc(dev);
  635: 
  636: 	if (sc->as_state == AGP_ACQUIRE_FREE)
  637: 		return 0;
  638: 
  639: 	if (sc->as_state != state)
  640: 		return EBUSY;
  641: 
  642: 	sc->as_state = AGP_ACQUIRE_FREE;
  643: 	return 0;
  644: }
  645: 
  646: static struct agp_memory *
  647: agp_find_memory(device_t dev, int id)
  648: {
  649: 	struct agp_softc *sc = device_get_softc(dev);
  650: 	struct agp_memory *mem;
  651: 
  652: 	AGP_DPF("searching for memory block %d\n", id);
  653: 	TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
  654: 		AGP_DPF("considering memory block %d\n", mem->am_id);
  655: 		if (mem->am_id == id)
  656: 			return mem;
  657: 	}
  658: 	return 0;
  659: }
  660: 
  661: /* Implementation of the userland ioctl api */
  662: 
  663: static int
  664: agp_info_user(device_t dev, agp_info *info)
  665: {
  666: 	struct agp_softc *sc = device_get_softc(dev);
  667: 
  668: 	bzero(info, sizeof *info);
  669: 	info->bridge_id = pci_get_devid(dev);
  670: 	info->agp_mode = 
  671: 	    pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
  672: 	info->aper_base = rman_get_start(sc->as_aperture);
  673: 	info->aper_size = AGP_GET_APERTURE(dev) >> 20;
  674: 	info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
  675: 	info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
  676: 
  677: 	return 0;
  678: }
  679: 
  680: static int
  681: agp_setup_user(device_t dev, agp_setup *setup)
  682: {
  683: 	return AGP_ENABLE(dev, setup->agp_mode);
  684: }
  685: 
  686: static int
  687: agp_allocate_user(device_t dev, agp_allocate *alloc)
  688: {
  689: 	struct agp_memory *mem;
  690: 
  691: 	mem = AGP_ALLOC_MEMORY(dev,
  692: 			       alloc->type,
  693: 			       alloc->pg_count << AGP_PAGE_SHIFT);
  694: 	if (mem) {
  695: 		alloc->key = mem->am_id;
  696: 		alloc->physical = mem->am_physical;
  697: 		return 0;
  698: 	} else {
  699: 		return ENOMEM;
  700: 	}
  701: }
  702: 
  703: static int
  704: agp_deallocate_user(device_t dev, int id)
  705: {
  706: 	struct agp_memory *mem = agp_find_memory(dev, id);;
  707: 
  708: 	if (mem) {
  709: 		AGP_FREE_MEMORY(dev, mem);
  710: 		return 0;
  711: 	} else {
  712: 		return ENOENT;
  713: 	}
  714: }
  715: 
  716: static int
  717: agp_bind_user(device_t dev, agp_bind *bind)
  718: {
  719: 	struct agp_memory *mem = agp_find_memory(dev, bind->key);
  720: 
  721: 	if (!mem)
  722: 		return ENOENT;
  723: 
  724: 	return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
  725: }
  726: 
  727: static int
  728: agp_unbind_user(device_t dev, agp_unbind *unbind)
  729: {
  730: 	struct agp_memory *mem = agp_find_memory(dev, unbind->key);
  731: 
  732: 	if (!mem)
  733: 		return ENOENT;
  734: 
  735: 	return AGP_UNBIND_MEMORY(dev, mem);
  736: }
  737: 
  738: static int
  739: agp_open(dev_t kdev, int oflags, int devtype, struct thread *td)
  740: {
  741: 	device_t dev = KDEV2DEV(kdev);
  742: 	struct agp_softc *sc = device_get_softc(dev);
  743: 
  744: 	if (!sc->as_isopen) {
  745: 		sc->as_isopen = 1;
  746: 		device_busy(dev);
  747: 	}
  748: 
  749: 	return 0;
  750: }
  751: 
  752: static int
  753: agp_close(dev_t kdev, int fflag, int devtype, struct thread *td)
  754: {
  755: 	device_t dev = KDEV2DEV(kdev);
  756: 	struct agp_softc *sc = device_get_softc(dev);
  757: 	struct agp_memory *mem;
  758: 
  759: 	/*
  760: 	 * Clear the GATT and force release on last close
  761: 	 */
  762: 	while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
  763: 		if (mem->am_is_bound)
  764: 			AGP_UNBIND_MEMORY(dev, mem);
  765: 		AGP_FREE_MEMORY(dev, mem);
  766: 	}
  767: 	if (sc->as_state == AGP_ACQUIRE_USER)
  768: 		agp_release_helper(dev, AGP_ACQUIRE_USER);
  769: 	sc->as_isopen = 0;
  770: 	device_unbusy(dev);
  771: 
  772: 	return 0;
  773: }
  774: 
  775: static int
  776: agp_ioctl(dev_t kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
  777: {
  778: 	device_t dev = KDEV2DEV(kdev);
  779: 
  780: 	switch (cmd) {
  781: 	case AGPIOC_INFO:
  782: 		return agp_info_user(dev, (agp_info *) data);
  783: 
  784: 	case AGPIOC_ACQUIRE:
  785: 		return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
  786: 
  787: 	case AGPIOC_RELEASE:
  788: 		return agp_release_helper(dev, AGP_ACQUIRE_USER);
  789: 
  790: 	case AGPIOC_SETUP:
  791: 		return agp_setup_user(dev, (agp_setup *)data);
  792: 
  793: 	case AGPIOC_ALLOCATE:
  794: 		return agp_allocate_user(dev, (agp_allocate *)data);
  795: 
  796: 	case AGPIOC_DEALLOCATE:
  797: 		return agp_deallocate_user(dev, *(int *) data);
  798: 
  799: 	case AGPIOC_BIND:
  800: 		return agp_bind_user(dev, (agp_bind *)data);
  801: 
  802: 	case AGPIOC_UNBIND:
  803: 		return agp_unbind_user(dev, (agp_unbind *)data);
  804: 
  805: 	}
  806: 
  807: 	return EINVAL;
  808: }
  809: 
  810: static int
  811: agp_mmap(dev_t kdev, vm_offset_t offset, int prot)
  812: {
  813: 	device_t dev = KDEV2DEV(kdev);
  814: 	struct agp_softc *sc = device_get_softc(dev);
  815: 
  816: 	if (offset > AGP_GET_APERTURE(dev))
  817: 		return -1;
  818: 	return atop(rman_get_start(sc->as_aperture) + offset);
  819: }
  820: 
  821: /* Implementation of the kernel api */
  822: 
  823: device_t
  824: agp_find_device()
  825: {
  826: 	if (!agp_devclass)
  827: 		return 0;
  828: 	return devclass_get_device(agp_devclass, 0);
  829: }
  830: 
  831: enum agp_acquire_state
  832: agp_state(device_t dev)
  833: {
  834: 	struct agp_softc *sc = device_get_softc(dev);
  835: 	return sc->as_state;
  836: }
  837: 
  838: void
  839: agp_get_info(device_t dev, struct agp_info *info)
  840: {
  841: 	struct agp_softc *sc = device_get_softc(dev);
  842: 
  843: 	info->ai_mode =
  844: 		pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
  845: 	info->ai_aperture_base = rman_get_start(sc->as_aperture);
  846: 	info->ai_aperture_size = (rman_get_end(sc->as_aperture)
  847: 				  - rman_get_start(sc->as_aperture)) + 1;
  848: 	info->ai_aperture_va = (vm_offset_t) rman_get_virtual(sc->as_aperture);
  849: 	info->ai_memory_allowed = sc->as_maxmem;
  850: 	info->ai_memory_used = sc->as_allocated;
  851: }
  852: 
  853: int
  854: agp_acquire(device_t dev)
  855: {
  856: 	return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
  857: }
  858: 
  859: int
  860: agp_release(device_t dev)
  861: {
  862: 	return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
  863: }
  864: 
  865: int
  866: agp_enable(device_t dev, u_int32_t mode)
  867: {
  868: 	return AGP_ENABLE(dev, mode);
  869: }
  870: 
  871: void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
  872: {
  873: 	return  (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
  874: }
  875: 
  876: void agp_free_memory(device_t dev, void *handle)
  877: {
  878: 	struct agp_memory *mem = (struct agp_memory *) handle;
  879: 	AGP_FREE_MEMORY(dev, mem);
  880: }
  881: 
  882: int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
  883: {
  884: 	struct agp_memory *mem = (struct agp_memory *) handle;
  885: 	return AGP_BIND_MEMORY(dev, mem, offset);
  886: }
  887: 
  888: int agp_unbind_memory(device_t dev, void *handle)
  889: {
  890: 	struct agp_memory *mem = (struct agp_memory *) handle;
  891: 	return AGP_UNBIND_MEMORY(dev, mem);
  892: }
  893: 
  894: void agp_memory_info(device_t dev, void *handle, struct
  895: 		     agp_memory_info *mi)
  896: {
  897: 	struct agp_memory *mem = (struct agp_memory *) handle;
  898: 
  899: 	mi->ami_size = mem->am_size;
  900: 	mi->ami_physical = mem->am_physical;
  901: 	mi->ami_offset = mem->am_offset;
  902: 	mi->ami_is_bound = mem->am_is_bound;
  903: }