File:  [DragonFly] / src / sys / dev / agp / agp.c
Revision 1.13: download - view: text, annotated - select for diffs
Wed May 19 22:52:40 2004 UTC (10 years, 2 months ago) by dillon
Branches: MAIN
CVS tags: HEAD, DragonFly_1_0_REL, DragonFly_1_0_RC1, DragonFly_1_0A_REL
Device layer rollup commit.

* cdevsw_add() is now required.  cdevsw_add() and cdevsw_remove() may specify
  a mask/match indicating the range of supported minor numbers.  Multiple
  cdevsw_add()'s using the same major number, but distinctly different
  ranges, may be issued.  All devices that failed to call cdevsw_add() before
  now do.

* cdevsw_remove() now automatically marks all devices within its supported
  range as being destroyed.

* vnode->v_rdev is no longer resolved when the vnode is created.  Instead,
  only v_udev (a newly added field) is resolved.  v_rdev is resolved when
  the vnode is opened and cleared on the last close.

* A great deal of code was making rather dubious assumptions with regards
  to the validity of devices associated with vnodes, primarily due to
  the persistence of a device structure due to being indexed by (major, minor)
  instead of by (cdevsw, major, minor).  In particular, if you run a program
  which connects to a USB device and then you pull the USB device and plug
  it back in, the vnode subsystem will continue to believe that the device
  is open when, in fact, it isn't (because it was destroyed and recreated).

  In particular, note that all the VFS mount procedures now check devices
  via v_udev instead of v_rdev prior to calling VOP_OPEN(), since v_rdev
  is NULL prior to the first open.

* The disk layer's device interaction has been rewritten.  The disk layer
  (i.e. the slice and disklabel management layer) no longer overloads
  its data onto the device structure representing the underlying physical
  disk.  Instead, the disk layer uses the new cdevsw_add() functionality
  to register its own cdevsw using the underlying device's major number,
  and simply does NOT register the underlying device's cdevsw.  No
  confusion is created because the device hash is now based on
  (cdevsw,major,minor) rather then (major,minor).

  NOTE: This also means that underlying raw disk devices may use the entire
  device minor number instead of having to reserve the bits used by the disk
  layer, and also means that can we (theoretically) stack a fully
  disklabel-supported 'disk' on top of any block device.

* The new reference counting scheme prevents this by associating a device
  with a cdevsw and disconnecting the device from its cdevsw when the cdevsw
  is removed.  Additionally, all udev2dev() lookups run through the cdevsw
  mask/match and only successfully find devices still associated with an
  active cdevsw.

* Major work on MFS:  MFS no longer shortcuts vnode and device creation.  It
  now creates a real vnode and a real device and implements real open and
  close VOPs.  Additionally, due to the disk layer changes, MFS is no longer
  limited to 255 mounts.  The new limit is 16 million.  Since MFS creates a
  real device node, mount_mfs will now create a real /dev/mfs<PID> device
  that can be read from userland (e.g. so you can dump an MFS filesystem).

* BUF AND DEVICE STRATEGY changes.  The struct buf contains a b_dev field.
  In order to properly handle stacked devices we now require that the b_dev
  field be initialized before the device strategy routine is called.  This
  required some additional work in various VFS implementations.  To enforce
  this requirement, biodone() now sets b_dev to NODEV.  The new disk layer
  will adjust b_dev before forwarding a request to the actual physical
  device.

* A bug in the ISO CD boot sequence which resulted in a panic has been fixed.

Testing by: lots of people, but David Rhodus found the most aggregious bugs.

    1: /*-
    2:  * Copyright (c) 2000 Doug Rabson
    3:  * All rights reserved.
    4:  *
    5:  * Redistribution and use in source and binary forms, with or without
    6:  * modification, are permitted provided that the following conditions
    7:  * are met:
    8:  * 1. Redistributions of source code must retain the above copyright
    9:  *    notice, this list of conditions and the following disclaimer.
   10:  * 2. Redistributions in binary form must reproduce the above copyright
   11:  *    notice, this list of conditions and the following disclaimer in the
   12:  *    documentation and/or other materials provided with the distribution.
   13:  *
   14:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24:  * SUCH DAMAGE.
   25:  *
   26:  *	$FreeBSD: src/sys/pci/agp.c,v 1.3.2.4 2002/08/11 19:58:12 alc Exp $
   27:  *	$DragonFly: src/sys/dev/agp/agp.c,v 1.13 2004/05/19 22:52:40 dillon Exp $
   28:  */
   29: 
   30: #include "opt_bus.h"
   31: #include "opt_pci.h"
   32: 
   33: #include <sys/param.h>
   34: #include <sys/systm.h>
   35: #include <sys/malloc.h>
   36: #include <sys/kernel.h>
   37: #include <sys/bus.h>
   38: #include <sys/conf.h>
   39: #include <sys/ioccom.h>
   40: #include <sys/agpio.h>
   41: #include <sys/lock.h>
   42: #include <sys/proc.h>
   43: 
   44: #include <bus/pci/pcivar.h>
   45: #include <bus/pci/pcireg.h>
   46: #include "agppriv.h"
   47: #include "agpvar.h"
   48: #include "agpreg.h"
   49: 
   50: #include <vm/vm.h>
   51: #include <vm/vm_object.h>
   52: #include <vm/vm_page.h>
   53: #include <vm/vm_pageout.h>
   54: #include <vm/pmap.h>
   55: 
   56: #include <machine/md_var.h>
   57: #include <machine/bus.h>
   58: #include <machine/resource.h>
   59: #include <sys/rman.h>
   60: 
   61: MODULE_VERSION(agp, 1);
   62: 
   63: MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
   64: 
   65: #define CDEV_MAJOR	148
   66: 				/* agp_drv.c */
   67: static d_open_t agp_open;
   68: static d_close_t agp_close;
   69: static d_ioctl_t agp_ioctl;
   70: static d_mmap_t agp_mmap;
   71: 
   72: static struct cdevsw agp_cdevsw = {
   73: 	/* name */	"agp",
   74: 	/* maj */	CDEV_MAJOR,
   75: 	/* flags */	D_TTY,
   76: 	/* port */	NULL,
   77: 	/* clone */	NULL,
   78: 
   79: 	/* open */	agp_open,
   80: 	/* close */	agp_close,
   81: 	/* read */	noread,
   82: 	/* write */	nowrite,
   83: 	/* ioctl */	agp_ioctl,
   84: 	/* poll */	nopoll,
   85: 	/* mmap */	agp_mmap,
   86: 	/* strategy */	nostrategy,
   87: 	/* dump */	nodump,
   88: 	/* psize */	nopsize
   89: };
   90: 
   91: static devclass_t agp_devclass;
   92: #define KDEV2DEV(kdev)	devclass_get_device(agp_devclass, minor(kdev))
   93: 
   94: /* Helper functions for implementing chipset mini drivers. */
   95: 
   96: void
   97: agp_flush_cache()
   98: {
   99: #ifdef __i386__
  100: 	wbinvd();
  101: #endif
  102: }
  103: 
  104: u_int8_t
  105: agp_find_caps(device_t dev)
  106: {
  107: 	u_int32_t status;
  108: 	u_int8_t ptr, next;
  109: 
  110: 	/*
  111: 	 * Check the CAP_LIST bit of the PCI status register first.
  112: 	 */
  113: 	status = pci_read_config(dev, PCIR_STATUS, 2);
  114: 	if (!(status & 0x10))
  115: 		return 0;
  116: 
  117: 	/*
  118: 	 * Traverse the capabilities list.
  119: 	 */
  120: 	for (ptr = pci_read_config(dev, AGP_CAPPTR, 1);
  121: 	     ptr != 0;
  122: 	     ptr = next) {
  123: 		u_int32_t capid = pci_read_config(dev, ptr, 4);
  124: 		next = AGP_CAPID_GET_NEXT_PTR(capid);
  125: 
  126: 		/*
  127: 		 * If this capability entry ID is 2, then we are done.
  128: 		 */
  129: 		if (AGP_CAPID_GET_CAP_ID(capid) == 2)
  130: 			return ptr;
  131: 	}
  132: 
  133: 	return 0;
  134: }
  135: 
  136: /*
  137:  * Find an AGP display device (if any).
  138:  */
  139: static device_t
  140: agp_find_display(void)
  141: {
  142: 	devclass_t pci = devclass_find("pci");
  143: 	device_t bus, dev = 0;
  144: 	device_t *kids;
  145: 	int busnum, numkids, i;
  146: 
  147: 	for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
  148: 		bus = devclass_get_device(pci, busnum);
  149: 		if (!bus)
  150: 			continue;
  151: 		device_get_children(bus, &kids, &numkids);
  152: 		for (i = 0; i < numkids; i++) {
  153: 			dev = kids[i];
  154: 			if (pci_get_class(dev) == PCIC_DISPLAY
  155: 			    && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
  156: 				if (agp_find_caps(dev)) {
  157: 					free(kids, M_TEMP);
  158: 					return dev;
  159: 				}
  160: 					
  161: 		}
  162: 		free(kids, M_TEMP);
  163: 	}
  164: 
  165: 	return 0;
  166: }
  167: 
  168: struct agp_gatt *
  169: agp_alloc_gatt(device_t dev)
  170: {
  171: 	u_int32_t apsize = AGP_GET_APERTURE(dev);
  172: 	u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
  173: 	struct agp_gatt *gatt;
  174: 
  175: 	if (bootverbose)
  176: 		device_printf(dev,
  177: 			      "allocating GATT for aperture of size %dM\n",
  178: 			      apsize / (1024*1024));
  179: 
  180: 	if (entries == 0) {
  181: 		device_printf(dev, "bad aperture size\n");
  182: 		return NULL;
  183: 	}
  184: 
  185: 	gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_INTWAIT);
  186: 	gatt->ag_entries = entries;
  187: 	gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 
  188: 					M_WAITOK, 0, ~0, PAGE_SIZE, 0);
  189: 	if (!gatt->ag_virtual) {
  190: 		if (bootverbose)
  191: 			device_printf(dev, "contiguous allocation failed\n");
  192: 		free(gatt, M_AGP);
  193: 		return 0;
  194: 	}
  195: 	bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
  196: 	gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
  197: 	agp_flush_cache();
  198: 
  199: 	return gatt;
  200: }
  201: 
  202: void
  203: agp_free_gatt(struct agp_gatt *gatt)
  204: {
  205: 	contigfree(gatt->ag_virtual,
  206: 		   gatt->ag_entries * sizeof(u_int32_t), M_AGP);
  207: 	free(gatt, M_AGP);
  208: }
  209: 
  210: static int agp_max[][2] = {
  211: 	{0,	0},
  212: 	{32,	4},
  213: 	{64,	28},
  214: 	{128,	96},
  215: 	{256,	204},
  216: 	{512,	440},
  217: 	{1024,	942},
  218: 	{2048,	1920},
  219: 	{4096,	3932}
  220: };
  221: #define agp_max_size	(sizeof(agp_max) / sizeof(agp_max[0]))
  222: 
  223: int
  224: agp_generic_attach(device_t dev)
  225: {
  226: 	struct agp_softc *sc = device_get_softc(dev);
  227: 	int rid, memsize, i;
  228: 
  229: 	/*
  230: 	 * Find and map the aperture.
  231: 	 */
  232: 	rid = AGP_APBASE;
  233: 	sc->as_aperture = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
  234: 					     0, ~0, 1, RF_ACTIVE);
  235: 	if (!sc->as_aperture)
  236: 		return ENOMEM;
  237: 
  238: 	/*
  239: 	 * Work out an upper bound for agp memory allocation. This
  240: 	 * uses a heurisitc table from the Linux driver.
  241: 	 */
  242: 	memsize = ptoa(Maxmem) >> 20;
  243: 	for (i = 0; i < agp_max_size; i++) {
  244: 		if (memsize <= agp_max[i][0])
  245: 			break;
  246: 	}
  247: 	if (i == agp_max_size) i = agp_max_size - 1;
  248: 	sc->as_maxmem = agp_max[i][1] << 20U;
  249: 
  250: 	/*
  251: 	 * The lock is used to prevent re-entry to
  252: 	 * agp_generic_bind_memory() since that function can sleep.
  253: 	 */
  254: 	lockinit(&sc->as_lock, PCATCH, "agplk", 0, 0);
  255: 
  256: 	/*
  257: 	 * Initialise stuff for the userland device.
  258: 	 */
  259: 	agp_devclass = devclass_find("agp");
  260: 	TAILQ_INIT(&sc->as_memory);
  261: 	sc->as_nextid = 1;
  262: 
  263: 	cdevsw_add(&agp_cdevsw, -1, device_get_unit(dev));
  264: 	make_dev(&agp_cdevsw, device_get_unit(dev), UID_ROOT, GID_WHEEL,
  265: 		  0600, "agpgart");
  266: 
  267: 	return 0;
  268: }
  269: 
  270: int
  271: agp_generic_detach(device_t dev)
  272: {
  273: 	struct agp_softc *sc = device_get_softc(dev);
  274: 	bus_release_resource(dev, SYS_RES_MEMORY, AGP_APBASE, sc->as_aperture);
  275: 	lockmgr(&sc->as_lock, LK_DRAIN, NULL, curthread); /* XXX */
  276: 	agp_flush_cache();
  277: 	cdevsw_remove(&agp_cdevsw, -1, device_get_unit(dev));
  278: 	return 0;
  279: }
  280: 
  281: /*
  282:  * This does the enable logic for v3, with the same topology
  283:  * restrictions as in place for v2 -- one bus, one device on the bus.
  284:  */
  285: static int
  286: agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
  287: {
  288: 	u_int32_t tstatus, mstatus;
  289: 	u_int32_t command;
  290: 	int rq, sba, fw, rate, arqsz, cal;
  291: 
  292: 	tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
  293: 	mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
  294: 
  295: 	/* Set RQ to the min of mode, tstatus and mstatus */
  296: 	rq = AGP_MODE_GET_RQ(mode);
  297: 	if (AGP_MODE_GET_RQ(tstatus) < rq)
  298: 		rq = AGP_MODE_GET_RQ(tstatus);
  299: 	if (AGP_MODE_GET_RQ(mstatus) < rq)
  300: 		rq = AGP_MODE_GET_RQ(mstatus);
  301: 
  302: 	/*
  303: 	 * ARQSZ - Set the value to the maximum one.
  304: 	 * Don't allow the mode register to override values.
  305: 	 */
  306: 	arqsz = AGP_MODE_GET_ARQSZ(mode);
  307: 	if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
  308: 		rq = AGP_MODE_GET_ARQSZ(tstatus);
  309: 	if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
  310: 		rq = AGP_MODE_GET_ARQSZ(mstatus);
  311: 
  312: 	/* Calibration cycle - don't allow override by mode register */
  313: 	cal = AGP_MODE_GET_CAL(tstatus);
  314: 	if (AGP_MODE_GET_CAL(mstatus) < cal)
  315: 		cal = AGP_MODE_GET_CAL(mstatus);
  316: 
  317: 	/* SBA must be supported for AGP v3. */
  318: 	sba = 1;
  319: 
  320: 	/* Set FW if all three support it. */
  321: 	fw = (AGP_MODE_GET_FW(tstatus)
  322: 	       & AGP_MODE_GET_FW(mstatus)
  323: 	       & AGP_MODE_GET_FW(mode));
  324: 	
  325: 	/* Figure out the max rate */
  326: 	rate = (AGP_MODE_GET_RATE(tstatus)
  327: 		& AGP_MODE_GET_RATE(mstatus)
  328: 		& AGP_MODE_GET_RATE(mode));
  329: 	if (rate & AGP_MODE_V3_RATE_8x)
  330: 		rate = AGP_MODE_V3_RATE_8x;
  331: 	else
  332: 		rate = AGP_MODE_V3_RATE_4x;
  333: 	if (bootverbose)
  334: 		device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
  335: 
  336: 	pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
  337: 
  338: 	/* Construct the new mode word and tell the hardware */
  339: 	command = AGP_MODE_SET_RQ(0, rq);
  340: 	command = AGP_MODE_SET_ARQSZ(command, arqsz);
  341: 	command = AGP_MODE_SET_CAL(command, cal);
  342: 	command = AGP_MODE_SET_SBA(command, sba);
  343: 	command = AGP_MODE_SET_FW(command, fw);
  344: 	command = AGP_MODE_SET_RATE(command, rate);
  345: 	command = AGP_MODE_SET_AGP(command, 1);
  346: 	pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
  347: 	pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
  348: 
  349: 	return 0;
  350: }
  351: 
  352: static int
  353: agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
  354: {
  355: 	u_int32_t tstatus, mstatus;
  356: 	u_int32_t command;
  357: 	int rq, sba, fw, rate;
  358: 
  359: 	tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
  360: 	mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
  361: 
  362: 	/* Set RQ to the min of mode, tstatus and mstatus */
  363: 	rq = AGP_MODE_GET_RQ(mode);
  364: 	if (AGP_MODE_GET_RQ(tstatus) < rq)
  365: 		rq = AGP_MODE_GET_RQ(tstatus);
  366: 	if (AGP_MODE_GET_RQ(mstatus) < rq)
  367: 		rq = AGP_MODE_GET_RQ(mstatus);
  368: 
  369: 	/* Set SBA if all three can deal with SBA */
  370: 	sba = (AGP_MODE_GET_SBA(tstatus)
  371: 	       & AGP_MODE_GET_SBA(mstatus)
  372: 	       & AGP_MODE_GET_SBA(mode));
  373: 
  374: 	/* Similar for FW */
  375: 	fw = (AGP_MODE_GET_FW(tstatus)
  376: 	       & AGP_MODE_GET_FW(mstatus)
  377: 	       & AGP_MODE_GET_FW(mode));
  378: 
  379: 	/* Figure out the max rate */
  380: 	rate = (AGP_MODE_GET_RATE(tstatus)
  381: 		& AGP_MODE_GET_RATE(mstatus)
  382: 		& AGP_MODE_GET_RATE(mode));
  383: 	if (rate & AGP_MODE_V2_RATE_4x)
  384: 		rate = AGP_MODE_V2_RATE_4x;
  385: 	else if (rate & AGP_MODE_V2_RATE_2x)
  386: 		rate = AGP_MODE_V2_RATE_2x;
  387: 	else
  388: 		rate = AGP_MODE_V2_RATE_1x;
  389: 	if (bootverbose)
  390: 		device_printf(dev, "Setting AGP v2 mode %d\n", rate);
  391: 
  392: 	/* Construct the new mode word and tell the hardware */
  393: 	command = AGP_MODE_SET_RQ(0, rq);
  394: 	command = AGP_MODE_SET_SBA(command, sba);
  395: 	command = AGP_MODE_SET_FW(command, fw);
  396: 	command = AGP_MODE_SET_RATE(command, rate);
  397: 	command = AGP_MODE_SET_AGP(command, 1);
  398: 	pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
  399: 	pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
  400: 
  401: 	return 0;
  402: }
  403: 
  404: int
  405: agp_generic_enable(device_t dev, u_int32_t mode)
  406: {
  407: 	device_t mdev = agp_find_display();
  408: 	u_int32_t tstatus, mstatus;
  409: 
  410: 	if (!mdev) {
  411: 		AGP_DPF("can't find display\n");
  412: 		return ENXIO;
  413: 	}
  414: 
  415: 	tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
  416: 	mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
  417: 
  418: 	/*
  419: 	 * Check display and bridge for AGP v3 support.  AGP v3 allows
  420: 	 * more variety in topology than v2, e.g. multiple AGP devices
  421: 	 * attached to one bridge, or multiple AGP bridges in one
  422: 	 * system.  This doesn't attempt to address those situations,
  423: 	 * but should work fine for a classic single AGP slot system
  424: 	 * with AGP v3.
  425: 	 */
  426: 	if (AGP_MODE_GET_MODE_3(tstatus) && AGP_MODE_GET_MODE_3(mstatus))
  427: 		return (agp_v3_enable(dev, mdev, mode));
  428: 	else
  429: 		return (agp_v2_enable(dev, mdev, mode));	    
  430: }
  431: 
  432: struct agp_memory *
  433: agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
  434: {
  435: 	struct agp_softc *sc = device_get_softc(dev);
  436: 	struct agp_memory *mem;
  437: 
  438: 	if ((size & (AGP_PAGE_SIZE - 1)) != 0)
  439: 		return 0;
  440: 
  441: 	if (sc->as_allocated + size > sc->as_maxmem)
  442: 		return 0;
  443: 
  444: 	if (type != 0) {
  445: 		printf("agp_generic_alloc_memory: unsupported type %d\n",
  446: 		       type);
  447: 		return 0;
  448: 	}
  449: 
  450: 	mem = malloc(sizeof *mem, M_AGP, M_INTWAIT);
  451: 	mem->am_id = sc->as_nextid++;
  452: 	mem->am_size = size;
  453: 	mem->am_type = 0;
  454: 	mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
  455: 	mem->am_physical = 0;
  456: 	mem->am_offset = 0;
  457: 	mem->am_is_bound = 0;
  458: 	TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
  459: 	sc->as_allocated += size;
  460: 
  461: 	return mem;
  462: }
  463: 
  464: int
  465: agp_generic_free_memory(device_t dev, struct agp_memory *mem)
  466: {
  467: 	struct agp_softc *sc = device_get_softc(dev);
  468: 
  469: 	if (mem->am_is_bound)
  470: 		return EBUSY;
  471: 
  472: 	sc->as_allocated -= mem->am_size;
  473: 	TAILQ_REMOVE(&sc->as_memory, mem, am_link);
  474: 	vm_object_deallocate(mem->am_obj);
  475: 	free(mem, M_AGP);
  476: 	return 0;
  477: }
  478: 
  479: int
  480: agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
  481: 			vm_offset_t offset)
  482: {
  483: 	struct agp_softc *sc = device_get_softc(dev);
  484: 	vm_offset_t i, j, k;
  485: 	vm_page_t m;
  486: 	int error;
  487: 
  488: 	lockmgr(&sc->as_lock, LK_EXCLUSIVE, NULL, curthread); /* XXX */
  489: 
  490: 	if (mem->am_is_bound) {
  491: 		device_printf(dev, "memory already bound\n");
  492: 		return EINVAL;
  493: 	}
  494: 	
  495: 	if (offset < 0
  496: 	    || (offset & (AGP_PAGE_SIZE - 1)) != 0
  497: 	    || offset + mem->am_size > AGP_GET_APERTURE(dev)) {
  498: 		device_printf(dev, "binding memory at bad offset %#x\n",
  499: 			      (int) offset);
  500: 		return EINVAL;
  501: 	}
  502: 
  503: 	/*
  504: 	 * Bind the individual pages and flush the chipset's
  505: 	 * TLB.
  506: 	 *
  507: 	 * XXX Presumably, this needs to be the pci address on alpha
  508: 	 * (i.e. use alpha_XXX_dmamap()). I don't have access to any
  509: 	 * alpha AGP hardware to check.
  510: 	 */
  511: 	for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
  512: 		/*
  513: 		 * Find a page from the object and wire it
  514: 		 * down. This page will be mapped using one or more
  515: 		 * entries in the GATT (assuming that PAGE_SIZE >=
  516: 		 * AGP_PAGE_SIZE. If this is the first call to bind,
  517: 		 * the pages will be allocated and zeroed.
  518: 		 */
  519: 		m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
  520: 			 VM_ALLOC_NORMAL | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
  521: 		if ((m->flags & PG_ZERO) == 0)
  522: 			vm_page_zero_fill(m);
  523: 		AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
  524: 		vm_page_wire(m);
  525: 
  526: 		/*
  527: 		 * Install entries in the GATT, making sure that if
  528: 		 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
  529: 		 * aligned to PAGE_SIZE, we don't modify too many GATT 
  530: 		 * entries.
  531: 		 */
  532: 		for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
  533: 		     j += AGP_PAGE_SIZE) {
  534: 			vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
  535: 			AGP_DPF("binding offset %#x to pa %#x\n",
  536: 				offset + i + j, pa);
  537: 			error = AGP_BIND_PAGE(dev, offset + i + j, pa);
  538: 			if (error) {
  539: 				/*
  540: 				 * Bail out. Reverse all the mappings
  541: 				 * and unwire the pages.
  542: 				 */
  543: 				vm_page_wakeup(m);
  544: 				for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
  545: 					AGP_UNBIND_PAGE(dev, offset + k);
  546: 				for (k = 0; k <= i; k += PAGE_SIZE) {
  547: 					m = vm_page_lookup(mem->am_obj,
  548: 							   OFF_TO_IDX(k));
  549: 					vm_page_unwire(m, 0);
  550: 				}
  551: 				lockmgr(&sc->as_lock, LK_RELEASE, NULL, curthread); /* XXX */
  552: 				return error;
  553: 			}
  554: 		}
  555: 		vm_page_wakeup(m);
  556: 	}
  557: 
  558: 	/*
  559: 	 * Flush the cpu cache since we are providing a new mapping
  560: 	 * for these pages.
  561: 	 */
  562: 	agp_flush_cache();
  563: 
  564: 	/*
  565: 	 * Make sure the chipset gets the new mappings.
  566: 	 */
  567: 	AGP_FLUSH_TLB(dev);
  568: 
  569: 	mem->am_offset = offset;
  570: 	mem->am_is_bound = 1;
  571: 
  572: 	lockmgr(&sc->as_lock, LK_RELEASE, NULL, curthread); /* XXX */
  573: 
  574: 	return 0;
  575: }
  576: 
  577: int
  578: agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
  579: {
  580: 	struct agp_softc *sc = device_get_softc(dev);
  581: 	vm_page_t m;
  582: 	int i;
  583: 
  584: 	lockmgr(&sc->as_lock, LK_EXCLUSIVE, NULL, curthread); /* XXX */
  585: 
  586: 	if (!mem->am_is_bound) {
  587: 		device_printf(dev, "memory is not bound\n");
  588: 		return EINVAL;
  589: 	}
  590: 
  591: 
  592: 	/*
  593: 	 * Unbind the individual pages and flush the chipset's
  594: 	 * TLB. Unwire the pages so they can be swapped.
  595: 	 */
  596: 	for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
  597: 		AGP_UNBIND_PAGE(dev, mem->am_offset + i);
  598: 	for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
  599: 		m = vm_page_lookup(mem->am_obj, atop(i));
  600: 		vm_page_unwire(m, 0);
  601: 	}
  602: 		
  603: 	agp_flush_cache();
  604: 	AGP_FLUSH_TLB(dev);
  605: 
  606: 	mem->am_offset = 0;
  607: 	mem->am_is_bound = 0;
  608: 
  609: 	lockmgr(&sc->as_lock, LK_RELEASE, NULL, curthread); /* XXX */
  610: 
  611: 	return 0;
  612: }
  613: 
  614: /* Helper functions for implementing user/kernel api */
  615: 
  616: static int
  617: agp_acquire_helper(device_t dev, enum agp_acquire_state state)
  618: {
  619: 	struct agp_softc *sc = device_get_softc(dev);
  620: 
  621: 	if (sc->as_state != AGP_ACQUIRE_FREE)
  622: 		return EBUSY;
  623: 	sc->as_state = state;
  624: 
  625: 	return 0;
  626: }
  627: 
  628: static int
  629: agp_release_helper(device_t dev, enum agp_acquire_state state)
  630: {
  631: 	struct agp_softc *sc = device_get_softc(dev);
  632: 
  633: 	if (sc->as_state == AGP_ACQUIRE_FREE)
  634: 		return 0;
  635: 
  636: 	if (sc->as_state != state)
  637: 		return EBUSY;
  638: 
  639: 	sc->as_state = AGP_ACQUIRE_FREE;
  640: 	return 0;
  641: }
  642: 
  643: static struct agp_memory *
  644: agp_find_memory(device_t dev, int id)
  645: {
  646: 	struct agp_softc *sc = device_get_softc(dev);
  647: 	struct agp_memory *mem;
  648: 
  649: 	AGP_DPF("searching for memory block %d\n", id);
  650: 	TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
  651: 		AGP_DPF("considering memory block %d\n", mem->am_id);
  652: 		if (mem->am_id == id)
  653: 			return mem;
  654: 	}
  655: 	return 0;
  656: }
  657: 
  658: /* Implementation of the userland ioctl api */
  659: 
  660: static int
  661: agp_info_user(device_t dev, agp_info *info)
  662: {
  663: 	struct agp_softc *sc = device_get_softc(dev);
  664: 
  665: 	bzero(info, sizeof *info);
  666: 	info->bridge_id = pci_get_devid(dev);
  667: 	info->agp_mode = 
  668: 	    pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
  669: 	info->aper_base = rman_get_start(sc->as_aperture);
  670: 	info->aper_size = AGP_GET_APERTURE(dev) >> 20;
  671: 	info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
  672: 	info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
  673: 
  674: 	return 0;
  675: }
  676: 
  677: static int
  678: agp_setup_user(device_t dev, agp_setup *setup)
  679: {
  680: 	return AGP_ENABLE(dev, setup->agp_mode);
  681: }
  682: 
  683: static int
  684: agp_allocate_user(device_t dev, agp_allocate *alloc)
  685: {
  686: 	struct agp_memory *mem;
  687: 
  688: 	mem = AGP_ALLOC_MEMORY(dev,
  689: 			       alloc->type,
  690: 			       alloc->pg_count << AGP_PAGE_SHIFT);
  691: 	if (mem) {
  692: 		alloc->key = mem->am_id;
  693: 		alloc->physical = mem->am_physical;
  694: 		return 0;
  695: 	} else {
  696: 		return ENOMEM;
  697: 	}
  698: }
  699: 
  700: static int
  701: agp_deallocate_user(device_t dev, int id)
  702: {
  703: 	struct agp_memory *mem = agp_find_memory(dev, id);;
  704: 
  705: 	if (mem) {
  706: 		AGP_FREE_MEMORY(dev, mem);
  707: 		return 0;
  708: 	} else {
  709: 		return ENOENT;
  710: 	}
  711: }
  712: 
  713: static int
  714: agp_bind_user(device_t dev, agp_bind *bind)
  715: {
  716: 	struct agp_memory *mem = agp_find_memory(dev, bind->key);
  717: 
  718: 	if (!mem)
  719: 		return ENOENT;
  720: 
  721: 	return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
  722: }
  723: 
  724: static int
  725: agp_unbind_user(device_t dev, agp_unbind *unbind)
  726: {
  727: 	struct agp_memory *mem = agp_find_memory(dev, unbind->key);
  728: 
  729: 	if (!mem)
  730: 		return ENOENT;
  731: 
  732: 	return AGP_UNBIND_MEMORY(dev, mem);
  733: }
  734: 
  735: static int
  736: agp_open(dev_t kdev, int oflags, int devtype, struct thread *td)
  737: {
  738: 	device_t dev = KDEV2DEV(kdev);
  739: 	struct agp_softc *sc = device_get_softc(dev);
  740: 
  741: 	if (!sc->as_isopen) {
  742: 		sc->as_isopen = 1;
  743: 		device_busy(dev);
  744: 	}
  745: 
  746: 	return 0;
  747: }
  748: 
  749: static int
  750: agp_close(dev_t kdev, int fflag, int devtype, struct thread *td)
  751: {
  752: 	device_t dev = KDEV2DEV(kdev);
  753: 	struct agp_softc *sc = device_get_softc(dev);
  754: 	struct agp_memory *mem;
  755: 
  756: 	/*
  757: 	 * Clear the GATT and force release on last close
  758: 	 */
  759: 	while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
  760: 		if (mem->am_is_bound)
  761: 			AGP_UNBIND_MEMORY(dev, mem);
  762: 		AGP_FREE_MEMORY(dev, mem);
  763: 	}
  764: 	if (sc->as_state == AGP_ACQUIRE_USER)
  765: 		agp_release_helper(dev, AGP_ACQUIRE_USER);
  766: 	sc->as_isopen = 0;
  767: 	device_unbusy(dev);
  768: 
  769: 	return 0;
  770: }
  771: 
  772: static int
  773: agp_ioctl(dev_t kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
  774: {
  775: 	device_t dev = KDEV2DEV(kdev);
  776: 
  777: 	switch (cmd) {
  778: 	case AGPIOC_INFO:
  779: 		return agp_info_user(dev, (agp_info *) data);
  780: 
  781: 	case AGPIOC_ACQUIRE:
  782: 		return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
  783: 
  784: 	case AGPIOC_RELEASE:
  785: 		return agp_release_helper(dev, AGP_ACQUIRE_USER);
  786: 
  787: 	case AGPIOC_SETUP:
  788: 		return agp_setup_user(dev, (agp_setup *)data);
  789: 
  790: 	case AGPIOC_ALLOCATE:
  791: 		return agp_allocate_user(dev, (agp_allocate *)data);
  792: 
  793: 	case AGPIOC_DEALLOCATE:
  794: 		return agp_deallocate_user(dev, *(int *) data);
  795: 
  796: 	case AGPIOC_BIND:
  797: 		return agp_bind_user(dev, (agp_bind *)data);
  798: 
  799: 	case AGPIOC_UNBIND:
  800: 		return agp_unbind_user(dev, (agp_unbind *)data);
  801: 
  802: 	}
  803: 
  804: 	return EINVAL;
  805: }
  806: 
  807: static int
  808: agp_mmap(dev_t kdev, vm_offset_t offset, int prot)
  809: {
  810: 	device_t dev = KDEV2DEV(kdev);
  811: 	struct agp_softc *sc = device_get_softc(dev);
  812: 
  813: 	if (offset > AGP_GET_APERTURE(dev))
  814: 		return -1;
  815: 	return atop(rman_get_start(sc->as_aperture) + offset);
  816: }
  817: 
  818: /* Implementation of the kernel api */
  819: 
  820: device_t
  821: agp_find_device()
  822: {
  823: 	if (!agp_devclass)
  824: 		return 0;
  825: 	return devclass_get_device(agp_devclass, 0);
  826: }
  827: 
  828: enum agp_acquire_state
  829: agp_state(device_t dev)
  830: {
  831: 	struct agp_softc *sc = device_get_softc(dev);
  832: 	return sc->as_state;
  833: }
  834: 
  835: void
  836: agp_get_info(device_t dev, struct agp_info *info)
  837: {
  838: 	struct agp_softc *sc = device_get_softc(dev);
  839: 
  840: 	info->ai_mode =
  841: 		pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
  842: 	info->ai_aperture_base = rman_get_start(sc->as_aperture);
  843: 	info->ai_aperture_size = (rman_get_end(sc->as_aperture)
  844: 				  - rman_get_start(sc->as_aperture)) + 1;
  845: 	info->ai_aperture_va = (vm_offset_t) rman_get_virtual(sc->as_aperture);
  846: 	info->ai_memory_allowed = sc->as_maxmem;
  847: 	info->ai_memory_used = sc->as_allocated;
  848: }
  849: 
  850: int
  851: agp_acquire(device_t dev)
  852: {
  853: 	return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
  854: }
  855: 
  856: int
  857: agp_release(device_t dev)
  858: {
  859: 	return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
  860: }
  861: 
  862: int
  863: agp_enable(device_t dev, u_int32_t mode)
  864: {
  865: 	return AGP_ENABLE(dev, mode);
  866: }
  867: 
  868: void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
  869: {
  870: 	return  (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
  871: }
  872: 
  873: void agp_free_memory(device_t dev, void *handle)
  874: {
  875: 	struct agp_memory *mem = (struct agp_memory *) handle;
  876: 	AGP_FREE_MEMORY(dev, mem);
  877: }
  878: 
  879: int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
  880: {
  881: 	struct agp_memory *mem = (struct agp_memory *) handle;
  882: 	return AGP_BIND_MEMORY(dev, mem, offset);
  883: }
  884: 
  885: int agp_unbind_memory(device_t dev, void *handle)
  886: {
  887: 	struct agp_memory *mem = (struct agp_memory *) handle;
  888: 	return AGP_UNBIND_MEMORY(dev, mem);
  889: }
  890: 
  891: void agp_memory_info(device_t dev, void *handle, struct
  892: 		     agp_memory_info *mi)
  893: {
  894: 	struct agp_memory *mem = (struct agp_memory *) handle;
  895: 
  896: 	mi->ami_size = mem->am_size;
  897: 	mi->ami_physical = mem->am_physical;
  898: 	mi->ami_offset = mem->am_offset;
  899: 	mi->ami_is_bound = mem->am_is_bound;
  900: }