File:  [DragonFly] / src / sys / dev / disk / ata / ata-disk.c
Revision 1.18: download - view: text, annotated - select for diffs
Wed May 19 22:52:40 2004 UTC (10 years, 2 months ago) by dillon
Branches: MAIN
CVS tags: HEAD
Device layer rollup commit.

* cdevsw_add() is now required.  cdevsw_add() and cdevsw_remove() may specify
  a mask/match indicating the range of supported minor numbers.  Multiple
  cdevsw_add()'s using the same major number, but distinctly different
  ranges, may be issued.  All devices that failed to call cdevsw_add() before
  now do.

* cdevsw_remove() now automatically marks all devices within its supported
  range as being destroyed.

* vnode->v_rdev is no longer resolved when the vnode is created.  Instead,
  only v_udev (a newly added field) is resolved.  v_rdev is resolved when
  the vnode is opened and cleared on the last close.

* A great deal of code was making rather dubious assumptions with regards
  to the validity of devices associated with vnodes, primarily due to
  the persistence of a device structure due to being indexed by (major, minor)
  instead of by (cdevsw, major, minor).  In particular, if you run a program
  which connects to a USB device and then you pull the USB device and plug
  it back in, the vnode subsystem will continue to believe that the device
  is open when, in fact, it isn't (because it was destroyed and recreated).

  In particular, note that all the VFS mount procedures now check devices
  via v_udev instead of v_rdev prior to calling VOP_OPEN(), since v_rdev
  is NULL prior to the first open.

* The disk layer's device interaction has been rewritten.  The disk layer
  (i.e. the slice and disklabel management layer) no longer overloads
  its data onto the device structure representing the underlying physical
  disk.  Instead, the disk layer uses the new cdevsw_add() functionality
  to register its own cdevsw using the underlying device's major number,
  and simply does NOT register the underlying device's cdevsw.  No
  confusion is created because the device hash is now based on
  (cdevsw,major,minor) rather then (major,minor).

  NOTE: This also means that underlying raw disk devices may use the entire
  device minor number instead of having to reserve the bits used by the disk
  layer, and also means that can we (theoretically) stack a fully
  disklabel-supported 'disk' on top of any block device.

* The new reference counting scheme prevents this by associating a device
  with a cdevsw and disconnecting the device from its cdevsw when the cdevsw
  is removed.  Additionally, all udev2dev() lookups run through the cdevsw
  mask/match and only successfully find devices still associated with an
  active cdevsw.

* Major work on MFS:  MFS no longer shortcuts vnode and device creation.  It
  now creates a real vnode and a real device and implements real open and
  close VOPs.  Additionally, due to the disk layer changes, MFS is no longer
  limited to 255 mounts.  The new limit is 16 million.  Since MFS creates a
  real device node, mount_mfs will now create a real /dev/mfs<PID> device
  that can be read from userland (e.g. so you can dump an MFS filesystem).

* BUF AND DEVICE STRATEGY changes.  The struct buf contains a b_dev field.
  In order to properly handle stacked devices we now require that the b_dev
  field be initialized before the device strategy routine is called.  This
  required some additional work in various VFS implementations.  To enforce
  this requirement, biodone() now sets b_dev to NODEV.  The new disk layer
  will adjust b_dev before forwarding a request to the actual physical
  device.

* A bug in the ISO CD boot sequence which resulted in a panic has been fixed.

Testing by: lots of people, but David Rhodus found the most aggregious bugs.

    1: /*-
    2:  * Copyright (c) 1998,1999,2000,2001,2002 Søren Schmidt <sos@FreeBSD.org>
    3:  * All rights reserved.
    4:  *
    5:  * Redistribution and use in source and binary forms, with or without
    6:  * modification, are permitted provided that the following conditions
    7:  * are met:
    8:  * 1. Redistributions of source code must retain the above copyright
    9:  *    notice, this list of conditions and the following disclaimer,
   10:  *    without modification, immediately at the beginning of the file.
   11:  * 2. Redistributions in binary form must reproduce the above copyright
   12:  *    notice, this list of conditions and the following disclaimer in the
   13:  *    documentation and/or other materials provided with the distribution.
   14:  * 3. The name of the author may not be used to endorse or promote products
   15:  *    derived from this software without specific prior written permission.
   16:  *
   17:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26:  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27:  *
   28:  * $FreeBSD: src/sys/dev/ata/ata-disk.c,v 1.60.2.24 2003/01/30 07:19:59 sos Exp $
   29:  * $DragonFly: src/sys/dev/disk/ata/ata-disk.c,v 1.18 2004/05/19 22:52:40 dillon Exp $
   30:  */
   31: 
   32: #include "opt_ata.h"
   33: #include <sys/param.h>
   34: #include <sys/systm.h>
   35: #include <sys/ata.h>
   36: #include <sys/kernel.h>
   37: #include <sys/malloc.h>
   38: #include <sys/buf.h>
   39: #include <sys/bus.h>
   40: #include <sys/conf.h>
   41: #include <sys/disk.h>
   42: #include <sys/devicestat.h>
   43: #include <sys/cons.h>
   44: #include <sys/sysctl.h>
   45: #include <sys/syslog.h>
   46: #include <vm/vm.h>
   47: #include <vm/pmap.h>
   48: #include <machine/md_var.h>
   49: #include <machine/bus.h>
   50: #include <machine/clock.h>
   51: #include <sys/rman.h>
   52: #include "ata-all.h"
   53: #include "ata-disk.h"
   54: #include "ata-raid.h"
   55: #include <sys/proc.h>
   56: #include <sys/buf2.h>
   57: 
   58: /* device structures */
   59: static d_open_t		adopen;
   60: static d_close_t	adclose;
   61: static d_strategy_t	adstrategy;
   62: static d_dump_t		addump;
   63: 
   64: static struct cdevsw ad_cdevsw = {
   65: 	/* name */	"ad",
   66: 	/* maj */	116,
   67: 	/* flags */	D_DISK,
   68: 	/* port */      NULL,
   69: 	/* clone */	NULL,
   70: 
   71: 	/* open */	adopen,
   72: 	/* close */	adclose,
   73: 	/* read */	physread,
   74: 	/* write */	physwrite,
   75: 	/* ioctl */	noioctl,
   76: 	/* poll */	nopoll,
   77: 	/* mmap */	nommap,
   78: 	/* strategy */	adstrategy,
   79: 	/* dump */	addump,
   80: 	/* psize */	nopsize
   81: };
   82: 
   83: /* prototypes */
   84: static void ad_invalidatequeue(struct ad_softc *, struct ad_request *);
   85: static int ad_tagsupported(struct ad_softc *);
   86: static void ad_timeout(struct ad_request *);
   87: static void ad_free(struct ad_request *);
   88: static int ad_version(u_int16_t);
   89: 
   90: /* misc defines */
   91: #define AD_MAX_RETRIES	3
   92: 
   93: /* internal vars */
   94: static u_int32_t adp_lun_map = 0;
   95: static int ata_dma = 1;
   96: static int ata_wc = 1;
   97: static int ata_tags = 0; 
   98: TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
   99: TUNABLE_INT("hw.ata.wc", &ata_wc);
  100: TUNABLE_INT("hw.ata.tags", &ata_tags);
  101: static MALLOC_DEFINE(M_AD, "AD driver", "ATA disk driver");
  102: 
  103: /* sysctl vars */
  104: SYSCTL_DECL(_hw_ata);
  105: SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RD, &ata_dma, 0,
  106: 	   "ATA disk DMA mode control");
  107: SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RD, &ata_wc, 0,
  108: 	   "ATA disk write caching");
  109: SYSCTL_INT(_hw_ata, OID_AUTO, tags, CTLFLAG_RD, &ata_tags, 0,
  110: 	   "ATA disk tagged queuing support");
  111: 
  112: void
  113: ad_attach(struct ata_device *atadev, int alreadylocked)
  114: {
  115:     struct ad_softc *adp;
  116:     dev_t dev;
  117: 
  118:     adp = malloc(sizeof(struct ad_softc), M_AD, M_WAITOK | M_ZERO);
  119: 
  120:     KKASSERT(atadev->channel->req_mpipe.max_count != 0);
  121: 
  122:     adp->device = atadev;
  123: #ifdef ATA_STATIC_ID
  124:     adp->lun = (device_get_unit(atadev->channel->dev)<<1)+ATA_DEV(atadev->unit);
  125: #else
  126:     adp->lun = ata_get_lun(&adp_lun_map);
  127: #endif
  128:     ata_set_name(atadev, "ad", adp->lun);
  129:     adp->heads = atadev->param->heads;
  130:     adp->sectors = atadev->param->sectors;
  131:     adp->total_secs = atadev->param->cylinders * adp->heads * adp->sectors;	
  132:     bufq_init(&adp->queue);
  133: 
  134:     /* does this device need oldstyle CHS addressing */
  135:     if (!ad_version(atadev->param->version_major) || 
  136: 	!(atadev->param->atavalid & ATA_FLAG_54_58) || !atadev->param->lba_size)
  137: 	adp->flags |= AD_F_CHS_USED;
  138: 
  139:     /* use the 28bit LBA size if valid */
  140:     if (atadev->param->cylinders == 16383 &&
  141: 	adp->total_secs < atadev->param->lba_size)
  142: 	adp->total_secs = atadev->param->lba_size;
  143: 
  144:     /* use the 48bit LBA size if valid */
  145:     if (atadev->param->support.address48 &&
  146: 	atadev->param->lba_size48 > 268435455)
  147: 	adp->total_secs = atadev->param->lba_size48;
  148:     
  149:     if (!alreadylocked)
  150: 	ATA_SLEEPLOCK_CH(atadev->channel, ATA_CONTROL);
  151:     /* use multiple sectors/interrupt if device supports it */
  152:     adp->transfersize = DEV_BSIZE;
  153:     if (ad_version(atadev->param->version_major)) {
  154: 	int secsperint = max(1, min(atadev->param->sectors_intr, 16));
  155: 
  156: 	if (!ata_command(atadev, ATA_C_SET_MULTI, 0, secsperint,
  157: 			 0, ATA_WAIT_INTR) && !ata_wait(atadev, 0))
  158: 	adp->transfersize *= secsperint;
  159:     }
  160: 
  161:     /* enable read caching if not default on device */
  162:     if (ata_command(atadev, ATA_C_SETFEATURES,
  163: 		    0, 0, ATA_C_F_ENAB_RCACHE, ATA_WAIT_INTR))
  164: 	ata_prtdev(atadev, "enabling readahead cache failed\n");
  165: 
  166:     /* enable write caching if allowed and not default on device */
  167:     if (ata_wc || (ata_tags && ad_tagsupported(adp))) {
  168: 	if (ata_command(atadev, ATA_C_SETFEATURES,
  169: 			0, 0, ATA_C_F_ENAB_WCACHE, ATA_WAIT_INTR))
  170: 	    ata_prtdev(atadev, "enabling write cache failed\n");
  171:     }
  172:     else {
  173: 	if (ata_command(atadev, ATA_C_SETFEATURES,
  174: 			0, 0, ATA_C_F_DIS_WCACHE, ATA_WAIT_INTR))
  175: 	    ata_prtdev(atadev, "disabling write cache failed\n");
  176:     }
  177: 
  178:     /* use DMA if allowed and if drive/controller supports it */
  179:     if (ata_dma)
  180: 	ata_dmainit(atadev, ata_pmode(atadev->param), 
  181: 		    ata_wmode(atadev->param), ata_umode(atadev->param));
  182:     else
  183: 	ata_dmainit(atadev, ata_pmode(atadev->param), -1, -1);
  184: 
  185:     /* use tagged queueing if allowed and supported */
  186:     if (ata_tags && ad_tagsupported(adp)) {
  187: 	adp->num_tags = atadev->param->queuelen;
  188: 	adp->flags |= AD_F_TAG_ENABLED;
  189: 	adp->device->channel->flags |= ATA_QUEUED;
  190: 	if (ata_command(atadev, ATA_C_SETFEATURES,
  191: 			0, 0, ATA_C_F_DIS_RELIRQ, ATA_WAIT_INTR))
  192: 	    ata_prtdev(atadev, "disabling release interrupt failed\n");
  193: 	if (ata_command(atadev, ATA_C_SETFEATURES,
  194: 			0, 0, ATA_C_F_DIS_SRVIRQ, ATA_WAIT_INTR))
  195: 	    ata_prtdev(atadev, "disabling service interrupt failed\n");
  196:     }
  197: 
  198:     ATA_UNLOCK_CH(atadev->channel);
  199: 
  200:     devstat_add_entry(&adp->stats, "ad", adp->lun, DEV_BSIZE,
  201: 		      DEVSTAT_NO_ORDERED_TAGS,
  202: 		      DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_IDE,
  203: 		      DEVSTAT_PRIORITY_DISK);
  204: 
  205:     dev = disk_create(adp->lun, &adp->disk, 0, &ad_cdevsw);
  206:     dev->si_drv1 = adp;
  207:     dev->si_iosize_max = 256 * DEV_BSIZE;
  208:     adp->dev = dev;
  209: 
  210:     /* construct the disklabel */
  211:     bzero(&adp->disk.d_label, sizeof(struct disklabel));
  212:     adp->disk.d_label.d_secsize = DEV_BSIZE;
  213:     adp->disk.d_label.d_nsectors = adp->sectors;
  214:     adp->disk.d_label.d_ntracks = adp->heads;
  215:     adp->disk.d_label.d_ncylinders = adp->total_secs/(adp->heads*adp->sectors);
  216:     adp->disk.d_label.d_secpercyl = adp->sectors * adp->heads;
  217:     adp->disk.d_label.d_secperunit = adp->total_secs;
  218: 
  219:     atadev->driver = adp;
  220:     atadev->flags = 0;
  221: 
  222:     /* if this disk belongs to an ATA RAID dont print the probe */
  223:     if (ata_raiddisk_attach(adp))
  224: 	adp->flags |= AD_F_RAID_SUBDISK;
  225:     else {
  226: 	if (atadev->driver) {
  227: 	    ad_print(adp);
  228: 	    ata_enclosure_print(atadev);
  229: 	}
  230:     }
  231: }
  232: 
  233: void
  234: ad_detach(struct ata_device *atadev, int flush) /* get rid of flush XXX SOS */
  235: {
  236:     struct ad_softc *adp = atadev->driver;
  237:     struct ad_request *request;
  238:     struct buf *bp;
  239: 
  240:     atadev->flags |= ATA_D_DETACHING;
  241:     ata_prtdev(atadev, "removed from configuration\n");
  242:     ad_invalidatequeue(adp, NULL);
  243:     TAILQ_FOREACH(request, &atadev->channel->ata_queue, chain) {
  244: 	if (request->softc != adp)
  245: 	    continue;
  246: 	TAILQ_REMOVE(&atadev->channel->ata_queue, request, chain);
  247: 	request->bp->b_error = ENXIO;
  248: 	request->bp->b_flags |= B_ERROR;
  249: 	biodone(request->bp);
  250: 	ad_free(request);
  251:     }
  252:     ata_dmafree(atadev);
  253:     while ((bp = bufq_first(&adp->queue))) {
  254: 	bufq_remove(&adp->queue, bp); 
  255: 	bp->b_error = ENXIO;
  256: 	bp->b_flags |= B_ERROR;
  257: 	biodone(bp);
  258:     }
  259:     disk_invalidate(&adp->disk);
  260:     devstat_remove_entry(&adp->stats);
  261:     disk_destroy(&adp->disk);
  262:     if (flush) {
  263: 	if (ata_command(atadev, ATA_C_FLUSHCACHE, 0, 0, 0, ATA_WAIT_READY))
  264: 	    ata_prtdev(atadev, "flushing cache on detach failed\n");
  265:     }
  266:     if (adp->flags & AD_F_RAID_SUBDISK)
  267: 	ata_raiddisk_detach(adp);
  268:     ata_free_name(atadev);
  269:     ata_free_lun(&adp_lun_map, adp->lun);
  270:     atadev->driver = NULL;
  271:     atadev->flags = 0;
  272:     free(adp, M_AD);
  273: }
  274: 
  275: static int
  276: adopen(dev_t dev, int flags, int fmt, struct thread *td)
  277: {
  278:     struct ad_softc *adp = dev->si_drv1;
  279: 
  280:     if (adp->flags & AD_F_RAID_SUBDISK)
  281: 	return EBUSY;
  282:     return 0;
  283: }
  284: 
  285: static int
  286: adclose(dev_t dev, int flags, int fmt, struct thread *td)
  287: {
  288:     struct ad_softc *adp = dev->si_drv1;
  289:     int s;
  290: 
  291:     s = splbio();	/* interlock non-atomic channel lock */
  292:     ATA_SLEEPLOCK_CH(adp->device->channel, ATA_CONTROL);
  293:     if (ata_command(adp->device, ATA_C_FLUSHCACHE, 0, 0, 0, ATA_WAIT_READY))
  294: 	ata_prtdev(adp->device, "flushing cache on close failed\n");
  295:     ATA_UNLOCK_CH(adp->device->channel);
  296:     splx(s);
  297:     return 0;
  298: }
  299: 
  300: /*
  301:  * note: always use the passed device rather then bp->b_dev, as the bp
  302:  * may have been translated through several layers.
  303:  */
  304: static void 
  305: adstrategy(struct buf *bp)
  306: {
  307:     struct ad_softc *adp = bp->b_dev->si_drv1;
  308:     int s;
  309: 
  310:     if (adp->device->flags & ATA_D_DETACHING) {
  311: 	bp->b_error = ENXIO;
  312: 	bp->b_flags |= B_ERROR;
  313: 	biodone(bp);
  314: 	return;
  315:     }
  316:     s = splbio();
  317:     bufqdisksort(&adp->queue, bp);
  318:     splx(s);
  319:     ata_start(adp->device->channel);
  320: }
  321: 
  322: int
  323: addump(dev_t dev, u_int count, u_int blkno, u_int secsize)
  324: {
  325:     struct ad_softc *adp = dev->si_drv1;
  326:     struct ad_request request;
  327:     vm_paddr_t addr = 0;
  328:     long blkcnt;
  329:     int dumppages = MAXDUMPPGS;
  330:     int i;
  331: 
  332:     if (!adp)
  333: 	return ENXIO;
  334: 
  335:     /* force PIO mode for dumps */
  336:     adp->device->mode = ATA_PIO;
  337:     ata_reinit(adp->device->channel);
  338: 
  339:     blkcnt = howmany(PAGE_SIZE, secsize);
  340: 
  341:     while (count > 0) {
  342: 	caddr_t va = NULL;
  343: 	DELAY(1000);
  344: 
  345: 	if ((count / blkcnt) < dumppages)
  346: 	    dumppages = count / blkcnt;
  347: 
  348: 	for (i = 0; i < dumppages; ++i) {
  349: 	    vm_paddr_t a = addr + (i * PAGE_SIZE);
  350: 	    if (is_physical_memory(a))
  351: 		va = pmap_kenter_temporary(trunc_page(a), i);
  352: 	    else
  353: 		va = pmap_kenter_temporary(trunc_page(0), i);
  354: 	}
  355: 
  356: 	bzero(&request, sizeof(struct ad_request));
  357: 	request.softc = adp;
  358: 	request.blockaddr = blkno;
  359: 	request.bytecount = PAGE_SIZE * dumppages;
  360: 	request.data = va;
  361: 
  362: 	while (request.bytecount > 0) {
  363: 	    ad_transfer(&request);
  364: 	    if (request.flags & ADR_F_ERROR)
  365: 		return EIO;
  366: 	    request.donecount += request.currentsize;
  367: 	    request.bytecount -= request.currentsize;
  368: 	    DELAY(20);
  369: 	}
  370: 
  371: 	if (dumpstatus(addr, (off_t)count * DEV_BSIZE) < 0)
  372: 	    return EINTR;
  373: 
  374: 	blkno += blkcnt * dumppages;
  375: 	count -= blkcnt * dumppages;
  376: 	addr += PAGE_SIZE * dumppages;
  377:     }
  378: 
  379:     if (ata_wait(adp->device, ATA_S_READY | ATA_S_DSC) < 0)
  380: 	ata_prtdev(adp->device, "timeout waiting for final ready\n");
  381:     return 0;
  382: }
  383: 
  384: void
  385: ad_start(struct ata_device *atadev)
  386: {
  387:     struct ad_softc *adp = atadev->driver;
  388:     struct buf *bp = bufq_first(&adp->queue);
  389:     struct ad_request *request;
  390:     int tag = 0;
  391: 
  392:     if (!bp)
  393: 	return;
  394: 
  395:     /* if tagged queueing enabled get next free tag */
  396:     if (adp->flags & AD_F_TAG_ENABLED) {
  397: 	while (tag <= adp->num_tags && adp->tags[tag])
  398: 	    tag++;
  399: 	if (tag > adp->num_tags )
  400: 	    return;
  401:     }
  402: 
  403:     /*
  404:      * Allocate a request.  The allocation can only fail if the pipeline
  405:      * is full, in which case the request will be picked up later when
  406:      * ad_start() is called after another request completes.
  407:      */
  408:     request = mpipe_alloc_nowait(&atadev->channel->req_mpipe);
  409:     if (request == NULL) {
  410: 	ata_prtdev(atadev, "pipeline full allocating request in ad_start\n");
  411: 	return;
  412:     }
  413: 
  414:     /* setup request */
  415:     request->softc = adp;
  416:     request->bp = bp;
  417:     request->blockaddr = bp->b_pblkno;
  418:     request->bytecount = bp->b_bcount;
  419:     request->data = bp->b_data;
  420:     request->tag = tag;
  421:     if (bp->b_flags & B_READ) 
  422: 	request->flags |= ADR_F_READ;
  423:     if (adp->device->mode >= ATA_DMA) {
  424: 	if (ata_dmaalloc(atadev, M_NOWAIT) != 0) {
  425: 	    mpipe_free(&atadev->channel->req_mpipe, request);
  426: 	    ata_prtdev(atadev, "pipeline full allocated dmabuf in ad_start\n");
  427: 	    /* do not revert to PIO, wait for ad_start after I/O completion */
  428: 	    return;
  429: 	}
  430:     }
  431: 
  432:     /* insert in tag array */
  433:     adp->tags[tag] = request;
  434: 
  435:     /* remove from drive queue */
  436:     bufq_remove(&adp->queue, bp); 
  437: 
  438:     /* link onto controller queue */
  439:     TAILQ_INSERT_TAIL(&atadev->channel->ata_queue, request, chain);
  440: }
  441: 
  442: int
  443: ad_transfer(struct ad_request *request)
  444: {
  445:     struct ad_softc *adp;
  446:     u_int64_t lba;
  447:     u_int32_t count, max_count;
  448:     u_int8_t cmd;
  449:     int flags = ATA_IMMEDIATE;
  450: 
  451:     /* get request params */
  452:     adp = request->softc;
  453: 
  454:     /* calculate transfer details */
  455:     lba = request->blockaddr + (request->donecount / DEV_BSIZE);
  456:    
  457:     if (request->donecount == 0) {
  458: 
  459: 	/* start timeout for this transfer */
  460: 	if (dumping)
  461: 	    request->timeout_handle.callout = NULL;
  462: 	else
  463: 	    request->timeout_handle = 
  464: 		timeout((timeout_t*)ad_timeout, request, 10 * hz);
  465: 
  466: 	/* setup transfer parameters */
  467: 	count = howmany(request->bytecount, DEV_BSIZE);
  468: 	max_count = adp->device->param->support.address48 ? 65536 : 256;
  469: 	if (count > max_count) {
  470: 	    ata_prtdev(adp->device,
  471: 		       "count %d size transfers not supported\n", count);
  472: 	    count = max_count;
  473: 	}
  474: 
  475: 	if (adp->flags & AD_F_CHS_USED) {
  476: 	    int sector = (lba % adp->sectors) + 1;
  477: 	    int cylinder = lba / (adp->sectors * adp->heads);
  478: 	    int head = (lba % (adp->sectors * adp->heads)) / adp->sectors;
  479: 
  480: 	    lba = (sector&0xff) | ((cylinder&0xffff)<<8) | ((head&0xf)<<24);
  481: 	    adp->device->flags |= ATA_D_USE_CHS;
  482: 	}
  483: 
  484: 	/* setup first transfer length */
  485: 	request->currentsize = min(request->bytecount, adp->transfersize);
  486: 
  487: 	devstat_start_transaction(&adp->stats);
  488: 
  489: 	/* does this drive & transfer work with DMA ? */
  490: 	request->flags &= ~ADR_F_DMA_USED;
  491: 	if (adp->device->mode >= ATA_DMA &&
  492: 	    !ata_dmasetup(adp->device, request->data, request->bytecount)) {
  493: 	    request->flags |= ADR_F_DMA_USED;
  494: 	    request->currentsize = request->bytecount;
  495: 
  496: 	    /* do we have tags enabled ? */
  497: 	    if (adp->flags & AD_F_TAG_ENABLED) {
  498: 		cmd = (request->flags & ADR_F_READ) ?
  499: 		    ATA_C_READ_DMA_QUEUED : ATA_C_WRITE_DMA_QUEUED;
  500: 
  501: 		if (ata_command(adp->device, cmd, lba,
  502: 				request->tag << 3, count, flags)) {
  503: 		    ata_prtdev(adp->device, "error executing command");
  504: 		    goto transfer_failed;
  505: 		}
  506: 		if (ata_wait(adp->device, ATA_S_READY)) {
  507: 		    ata_prtdev(adp->device, "timeout waiting for READY\n");
  508: 		    goto transfer_failed;
  509: 		}
  510: 		adp->outstanding++;
  511: 
  512: 		/* if ATA bus RELEASE check for SERVICE */
  513: 		if (adp->flags & AD_F_TAG_ENABLED &&
  514: 		    ATA_INB(adp->device->channel->r_io, ATA_IREASON) &
  515: 		    ATA_I_RELEASE)
  516: 		    return ad_service(adp, 1);
  517: 	    }
  518: 	    else {
  519: 		cmd = (request->flags & ADR_F_READ) ?
  520: 		    ATA_C_READ_DMA : ATA_C_WRITE_DMA;
  521: 
  522: 		if (ata_command(adp->device, cmd, lba, count, 0, flags)) {
  523: 		    ata_prtdev(adp->device, "error executing command");
  524: 		    goto transfer_failed;
  525: 		}
  526: #if 0
  527: 		/*
  528: 		 * wait for data transfer phase
  529: 		 *
  530: 		 * well this should be here acording to specs, but older
  531: 		 * promise controllers doesn't like it, they lockup!
  532: 		 */
  533: 		if (ata_wait(adp->device, ATA_S_READY | ATA_S_DRQ)) {
  534: 		    ata_prtdev(adp->device, "timeout waiting for data phase\n");
  535: 		    goto transfer_failed;
  536: 		}
  537: #endif
  538: 	    }
  539: 
  540: 	    /* start transfer, return and wait for interrupt */
  541: 	    ata_dmastart(adp->device, request->data, request->bytecount,
  542: 			request->flags & ADR_F_READ);
  543: 	    return ATA_OP_CONTINUES;
  544: 	}
  545: 
  546: 	/* does this drive support multi sector transfers ? */
  547: 	if (request->currentsize > DEV_BSIZE)
  548: 	    cmd = request->flags&ADR_F_READ ? ATA_C_READ_MUL : ATA_C_WRITE_MUL;
  549: 
  550: 	/* just plain old single sector transfer */
  551: 	else
  552: 	    cmd = request->flags&ADR_F_READ ? ATA_C_READ : ATA_C_WRITE;
  553: 
  554: 	if (ata_command(adp->device, cmd, lba, count, 0, flags)){
  555: 	    ata_prtdev(adp->device, "error executing command");
  556: 	    goto transfer_failed;
  557: 	}
  558:     }
  559:    
  560:     /* calculate this transfer length */
  561:     request->currentsize = min(request->bytecount, adp->transfersize);
  562: 
  563:     /* if this is a PIO read operation, return and wait for interrupt */
  564:     if (request->flags & ADR_F_READ)
  565: 	return ATA_OP_CONTINUES;
  566: 
  567:     /* ready to write PIO data ? */
  568:     if (ata_wait(adp->device, (ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) < 0) {
  569: 	ata_prtdev(adp->device, "timeout waiting for DRQ");
  570: 	goto transfer_failed;
  571:     }
  572: 
  573:     /* output the data */
  574:     if (adp->device->channel->flags & ATA_USE_16BIT)
  575: 	ATA_OUTSW(adp->device->channel->r_io, ATA_DATA,
  576: 		  (void *)((uintptr_t)request->data + request->donecount),
  577: 		  request->currentsize / sizeof(int16_t));
  578:     else
  579: 	ATA_OUTSL(adp->device->channel->r_io, ATA_DATA,
  580: 		  (void *)((uintptr_t)request->data + request->donecount),
  581: 		  request->currentsize / sizeof(int32_t));
  582:     return ATA_OP_CONTINUES;
  583: 
  584: transfer_failed:
  585:     untimeout((timeout_t *)ad_timeout, request, request->timeout_handle);
  586:     ad_invalidatequeue(adp, request);
  587:     printf(" - resetting\n");
  588: 
  589:     /* if retries still permit, reinject this request */
  590:     if (request->retries++ < AD_MAX_RETRIES)
  591: 	TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, request, chain);
  592:     else {
  593: 	/* retries all used up, return error */
  594: 	request->bp->b_error = EIO;
  595: 	request->bp->b_flags |= B_ERROR;
  596: 	request->bp->b_resid = request->bytecount;
  597: 	devstat_end_transaction_buf(&adp->stats, request->bp);
  598: 	biodone(request->bp);
  599: 	ad_free(request);
  600:     }
  601:     ata_reinit(adp->device->channel);
  602:     return ATA_OP_CONTINUES;
  603: }
  604: 
  605: int
  606: ad_interrupt(struct ad_request *request)
  607: {
  608:     struct ad_softc *adp = request->softc;
  609:     int dma_stat = 0;
  610: 
  611:     /* finish DMA transfer */
  612:     if (request->flags & ADR_F_DMA_USED)
  613: 	dma_stat = ata_dmadone(adp->device);
  614: 
  615:     /* do we have a corrected soft error ? */
  616:     if (adp->device->channel->status & ATA_S_CORR)
  617: 	diskerr(request->bp, "soft error (ECC corrected)", LOG_PRINTF,
  618: 		request->blockaddr + (request->donecount / DEV_BSIZE),
  619: 		&adp->disk.d_label);
  620: 
  621:     /* did any real errors happen ? */
  622:     if ((adp->device->channel->status & ATA_S_ERROR) ||
  623: 	(request->flags & ADR_F_DMA_USED && dma_stat & ATA_BMSTAT_ERROR)) {
  624: 	adp->device->channel->error =
  625: 	    ATA_INB(adp->device->channel->r_io, ATA_ERROR);
  626: 	diskerr(request->bp, (adp->device->channel->error & ATA_E_ICRC) ?
  627: 		"UDMA ICRC error" : "hard error", LOG_PRINTF,
  628: 		request->blockaddr + (request->donecount / DEV_BSIZE),
  629: 		&adp->disk.d_label);
  630: 
  631: 	/* if this is a UDMA CRC error, reinject request */
  632: 	if (request->flags & ADR_F_DMA_USED &&
  633: 	    adp->device->channel->error & ATA_E_ICRC) {
  634: 	    untimeout((timeout_t *)ad_timeout, request,request->timeout_handle);
  635: 	    ad_invalidatequeue(adp, request);
  636: 
  637: 	    if (request->retries++ < AD_MAX_RETRIES)
  638: 		printf(" retrying\n");
  639: 	    else {
  640: 		ata_dmainit(adp->device, ata_pmode(adp->device->param), -1, -1);
  641: 		printf(" falling back to PIO mode\n");
  642: 	    }
  643: 	    TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, request, chain);
  644: 	    return ATA_OP_FINISHED;
  645: 	}
  646: 
  647: 	/* if using DMA, try once again in PIO mode */
  648: 	if (request->flags & ADR_F_DMA_USED) {
  649: 	    untimeout((timeout_t *)ad_timeout, request,request->timeout_handle);
  650: 	    ad_invalidatequeue(adp, request);
  651: 	    ata_dmainit(adp->device, ata_pmode(adp->device->param), -1, -1);
  652: 	    request->flags |= ADR_F_FORCE_PIO;
  653: 	    printf(" trying PIO mode\n");
  654: 	    TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, request, chain);
  655: 	    return ATA_OP_FINISHED;
  656: 	}
  657: 
  658: 	request->flags |= ADR_F_ERROR;
  659: 	printf(" status=%02x error=%02x\n", 
  660: 	       adp->device->channel->status, adp->device->channel->error);
  661:     }
  662: 
  663:     /* if we arrived here with forced PIO mode, DMA doesn't work right */
  664:     if (request->flags & ADR_F_FORCE_PIO && !(request->flags & ADR_F_ERROR))
  665: 	ata_prtdev(adp->device, "DMA problem fallback to PIO mode\n");
  666: 
  667:     /* if this was a PIO read operation, get the data */
  668:     if (!(request->flags & ADR_F_DMA_USED) &&
  669: 	(request->flags & (ADR_F_READ | ADR_F_ERROR)) == ADR_F_READ) {
  670: 
  671: 	/* ready to receive data? */
  672: 	if ((adp->device->channel->status & ATA_S_READY) == 0)
  673: 	    ata_prtdev(adp->device, "read interrupt arrived early");
  674: 
  675: 	if (ata_wait(adp->device, (ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) != 0) {
  676: 	    ata_prtdev(adp->device, "read error detected (too) late");
  677: 	    request->flags |= ADR_F_ERROR;
  678: 	}
  679: 	else {
  680: 	    /* data ready, read in */
  681: 	    if (adp->device->channel->flags & ATA_USE_16BIT)
  682: 		ATA_INSW(adp->device->channel->r_io, ATA_DATA,
  683: 			 (void*)((uintptr_t)request->data + request->donecount),
  684: 			 request->currentsize / sizeof(int16_t));
  685: 	    else
  686: 		ATA_INSL(adp->device->channel->r_io, ATA_DATA,
  687: 			 (void*)((uintptr_t)request->data + request->donecount),
  688: 			 request->currentsize / sizeof(int32_t));
  689: 	}
  690:     }
  691: 
  692:     /* finish up transfer */
  693:     if (request->flags & ADR_F_ERROR) {
  694: 	request->bp->b_error = EIO;
  695: 	request->bp->b_flags |= B_ERROR;
  696:     } 
  697:     else {
  698: 	request->bytecount -= request->currentsize;
  699: 	request->donecount += request->currentsize;
  700: 	if (request->bytecount > 0) {
  701: 	    ad_transfer(request);
  702: 	    return ATA_OP_CONTINUES;
  703: 	}
  704:     }
  705: 
  706:     /* disarm timeout for this transfer */
  707:     untimeout((timeout_t *)ad_timeout, request, request->timeout_handle);
  708: 
  709:     request->bp->b_resid = request->bytecount;
  710: 
  711:     devstat_end_transaction_buf(&adp->stats, request->bp);
  712:     biodone(request->bp);
  713:     ad_free(request);
  714:     adp->outstanding--;
  715: 
  716:     /* check for SERVICE (tagged operations only) */
  717:     return ad_service(adp, 1);
  718: }
  719: 
  720: int
  721: ad_service(struct ad_softc *adp, int change)
  722: {
  723:     /* do we have to check the other device on this channel ? */
  724:     if (adp->device->channel->flags & ATA_QUEUED && change) {
  725: 	int device = adp->device->unit;
  726: 
  727: 	if (adp->device->unit == ATA_MASTER) {
  728: 	    if ((adp->device->channel->devices & ATA_ATA_SLAVE) &&
  729: 		(adp->device->channel->device[SLAVE].driver) &&
  730: 		((struct ad_softc *) (adp->device->channel->
  731: 		 device[SLAVE].driver))->flags & AD_F_TAG_ENABLED)
  732: 		device = ATA_SLAVE;
  733: 	}
  734: 	else {
  735: 	    if ((adp->device->channel->devices & ATA_ATA_MASTER) &&
  736: 		(adp->device->channel->device[MASTER].driver) &&
  737: 		((struct ad_softc *) (adp->device->channel->
  738: 		 device[MASTER].driver))->flags & AD_F_TAG_ENABLED)
  739: 		device = ATA_MASTER;
  740: 	}
  741: 	if (device != adp->device->unit &&
  742: 	    ((struct ad_softc *)
  743: 	     (adp->device->channel->
  744: 	      device[ATA_DEV(device)].driver))->outstanding > 0) {
  745: 	    ATA_OUTB(adp->device->channel->r_io, ATA_DRIVE, ATA_D_IBM | device);
  746: 	    adp = adp->device->channel->device[ATA_DEV(device)].driver;
  747: 	    DELAY(1);
  748: 	}
  749:     }
  750:     adp->device->channel->status =
  751: 	ATA_INB(adp->device->channel->r_altio, ATA_ALTSTAT);
  752:  
  753:     /* do we have a SERVICE request from the drive ? */
  754:     if (adp->flags & AD_F_TAG_ENABLED &&
  755: 	adp->outstanding > 0 &&
  756: 	adp->device->channel->status & ATA_S_SERVICE) {
  757: 	struct ad_request *request;
  758: 	int tag;
  759: 
  760: 	/* check for error */
  761: 	if (adp->device->channel->status & ATA_S_ERROR) {
  762: 	    ata_prtdev(adp->device, "Oops! controller says s=0x%02x e=0x%02x\n",
  763: 		       adp->device->channel->status,
  764: 		       adp->device->channel->error);
  765: 	    ad_invalidatequeue(adp, NULL);
  766: 	    return ATA_OP_FINISHED;
  767: 	}
  768: 
  769: 	/* issue SERVICE cmd */
  770: 	if (ata_command(adp->device, ATA_C_SERVICE, 0, 0, 0, ATA_IMMEDIATE)) {
  771: 	    ata_prtdev(adp->device, "problem executing SERVICE cmd\n");
  772: 	    ad_invalidatequeue(adp, NULL);
  773: 	    return ATA_OP_FINISHED;
  774: 	}
  775: 
  776: 	/* setup the transfer environment when ready */
  777: 	if (ata_wait(adp->device, ATA_S_READY)) {
  778: 	    ata_prtdev(adp->device, "SERVICE timeout tag=%d s=%02x e=%02x\n",
  779: 		       ATA_INB(adp->device->channel->r_io, ATA_COUNT) >> 3,
  780: 		       adp->device->channel->status,
  781: 		       adp->device->channel->error);
  782: 	    ad_invalidatequeue(adp, NULL);
  783: 	    return ATA_OP_FINISHED;
  784: 	}
  785: 	tag = ATA_INB(adp->device->channel->r_io, ATA_COUNT) >> 3;
  786: 	if (!(request = adp->tags[tag])) {
  787: 	    ata_prtdev(adp->device, "no request for tag=%d\n", tag);	
  788: 	    ad_invalidatequeue(adp, NULL);
  789: 	    return ATA_OP_FINISHED;
  790: 	}
  791: 	ATA_FORCELOCK_CH(adp->device->channel, ATA_ACTIVE_ATA);
  792: 	adp->device->channel->running = request;
  793: 	request->serv++;
  794: 
  795: 	/* start DMA transfer when ready */
  796: 	if (ata_wait(adp->device, ATA_S_READY | ATA_S_DRQ)) {
  797: 	    ata_prtdev(adp->device, "timeout starting DMA s=%02x e=%02x\n",
  798: 		       adp->device->channel->status,
  799: 		       adp->device->channel->error);
  800: 	    ad_invalidatequeue(adp, NULL);
  801: 	    return ATA_OP_FINISHED;
  802: 	}
  803: 	ata_dmastart(adp->device, request->data, request->bytecount,
  804: 		    request->flags & ADR_F_READ);
  805: 	return ATA_OP_CONTINUES;
  806:     }
  807:     return ATA_OP_FINISHED;
  808: }
  809: 
  810: static void
  811: ad_free(struct ad_request *request)
  812: {
  813:     int s = splbio();
  814:     ata_dmafree(request->softc->device);
  815:     request->softc->tags[request->tag] = NULL;
  816:     mpipe_free(&request->softc->device->channel->req_mpipe, request);
  817:     splx(s);
  818: }
  819: 
  820: static void
  821: ad_invalidatequeue(struct ad_softc *adp, struct ad_request *request)
  822: {
  823:     /* if tags used invalidate all other tagged transfers */
  824:     if (adp->flags & AD_F_TAG_ENABLED) {
  825: 	struct ad_request *tmpreq;
  826: 	int tag;
  827: 
  828: 	ata_prtdev(adp->device, "invalidating queued requests\n");
  829: 	for (tag = 0; tag <= adp->num_tags; tag++) {
  830: 	    tmpreq = adp->tags[tag];
  831: 	    adp->tags[tag] = NULL;
  832: 	    if (tmpreq == request || tmpreq == NULL)
  833: 		continue;
  834: 	    untimeout((timeout_t *)ad_timeout, tmpreq, tmpreq->timeout_handle);
  835: 	    TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, tmpreq, chain);
  836: 	}
  837: 	if (ata_command(adp->device, ATA_C_NOP,
  838: 			0, 0, ATA_C_F_FLUSHQUEUE, ATA_WAIT_READY))
  839: 	    ata_prtdev(adp->device, "flush queue failed\n");
  840: 	adp->outstanding = 0;
  841:     }
  842: }
  843: 
  844: static int
  845: ad_tagsupported(struct ad_softc *adp)
  846: {
  847:     const char *good[] = {"IBM-DPTA", "IBM-DTLA", NULL};
  848:     int i = 0;
  849: 
  850:     switch (adp->device->channel->chiptype) {
  851:     case 0x4d33105a: /* Promises before TX2 doesn't work with tagged queuing */
  852:     case 0x4d38105a:
  853:     case 0x0d30105a:
  854:     case 0x4d30105a:  
  855: 	return 0;
  856:     }
  857: 
  858:     /* check that drive does DMA, has tags enabled, and is one we know works */
  859:     if (adp->device->mode >= ATA_DMA && adp->device->param->support.queued && 
  860: 	adp->device->param->enabled.queued) {
  861: 	while (good[i] != NULL) {
  862: 	    if (!strncmp(adp->device->param->model, good[i], strlen(good[i])))
  863: 		return 1;
  864: 	    i++;
  865: 	}
  866: 	/* 
  867: 	 * check IBM's new obscure way of naming drives 
  868: 	 * we want "IC" (IBM CORP) and "AT" or "AV" (ATA interface)
  869: 	 * but doesn't care about the other info (size, capacity etc)
  870: 	 */
  871: 	if (!strncmp(adp->device->param->model, "IC", 2) &&
  872: 	    (!strncmp(adp->device->param->model + 8, "AT", 2) ||
  873: 	     !strncmp(adp->device->param->model + 8, "AV", 2)))
  874: 		return 1;
  875:     }
  876:     return 0;
  877: }
  878: 
  879: static void
  880: ad_timeout(struct ad_request *request)
  881: {
  882:     struct ad_softc *adp = request->softc;
  883: 
  884:     adp->device->channel->running = NULL;
  885:     ata_prtdev(adp->device, "%s command timeout tag=%d serv=%d - resetting\n",
  886: 	       (request->flags & ADR_F_READ) ? "READ" : "WRITE",
  887: 	       request->tag, request->serv);
  888: 
  889:     if (request->flags & ADR_F_DMA_USED) {
  890: 	ata_dmadone(adp->device);
  891: 	ad_invalidatequeue(adp, request);
  892: 	if (request->retries == AD_MAX_RETRIES) {
  893: 	    ata_dmainit(adp->device, ata_pmode(adp->device->param), -1, -1);
  894: 	    ata_prtdev(adp->device, "trying fallback to PIO mode\n");
  895: 	    request->retries = 0;
  896: 	}
  897:     }
  898: 
  899:     /* if retries still permit, reinject this request */
  900:     if (request->retries++ < AD_MAX_RETRIES) {
  901: 	TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, request, chain);
  902:     }
  903:     else {
  904: 	/* retries all used up, return error */
  905: 	request->bp->b_error = EIO;
  906: 	request->bp->b_flags |= B_ERROR;
  907: 	devstat_end_transaction_buf(&adp->stats, request->bp);
  908: 	biodone(request->bp);
  909: 	ad_free(request);
  910:     }
  911:     ata_reinit(adp->device->channel);
  912: }
  913: 
  914: void
  915: ad_reinit(struct ata_device *atadev)
  916: {
  917:     struct ad_softc *adp = atadev->driver;
  918: 
  919:     /* reinit disk parameters */
  920:     ad_invalidatequeue(atadev->driver, NULL);
  921:     ata_command(atadev, ATA_C_SET_MULTI, 0,
  922: 		adp->transfersize / DEV_BSIZE, 0, ATA_WAIT_READY);
  923:     if (adp->device->mode >= ATA_DMA)
  924: 	ata_dmainit(atadev, ata_pmode(adp->device->param),
  925: 		    ata_wmode(adp->device->param),
  926: 		    ata_umode(adp->device->param));
  927:     else
  928: 	ata_dmainit(atadev, ata_pmode(adp->device->param), -1, -1);
  929: }
  930: 
  931: void
  932: ad_print(struct ad_softc *adp) 
  933: {
  934:     if (bootverbose) {
  935: 	ata_prtdev(adp->device, "<%.40s/%.8s> ATA-%d disk at ata%d-%s\n", 
  936: 		   adp->device->param->model, adp->device->param->revision,
  937: 		   ad_version(adp->device->param->version_major), 
  938: 		   device_get_unit(adp->device->channel->dev),
  939: 		   (adp->device->unit == ATA_MASTER) ? "master" : "slave");
  940: 
  941: 	ata_prtdev(adp->device,
  942: 		   "%lluMB (%llu sectors), %llu C, %u H, %u S, %u B\n",
  943: 		   (unsigned long long)(adp->total_secs /
  944: 		   ((1024L*1024L)/DEV_BSIZE)),
  945: 		   (unsigned long long) adp->total_secs,
  946: 		   (unsigned long long) (adp->total_secs /
  947: 		    (adp->heads * adp->sectors)),
  948: 		   adp->heads, adp->sectors, DEV_BSIZE);
  949: 
  950: 	ata_prtdev(adp->device, "%d secs/int, %d depth queue, %s%s\n", 
  951: 		   adp->transfersize / DEV_BSIZE, adp->num_tags + 1,
  952: 		   (adp->flags & AD_F_TAG_ENABLED) ? "tagged " : "",
  953: 		   ata_mode2str(adp->device->mode));
  954: 
  955: 	ata_prtdev(adp->device, "piomode=%d dmamode=%d udmamode=%d cblid=%d\n",
  956: 		   ata_pmode(adp->device->param), ata_wmode(adp->device->param),
  957: 		   ata_umode(adp->device->param), 
  958: 		   adp->device->param->hwres_cblid);
  959: 
  960:     }
  961:     else
  962: 	ata_prtdev(adp->device,"%lluMB <%.40s> [%lld/%d/%d] at ata%d-%s %s%s\n",
  963: 		   (unsigned long long)(adp->total_secs /
  964: 		   ((1024L * 1024L) / DEV_BSIZE)),
  965: 		   adp->device->param->model,
  966: 		   (unsigned long long)(adp->total_secs /
  967: 		    (adp->heads*adp->sectors)),
  968: 		   adp->heads, adp->sectors,
  969: 		   device_get_unit(adp->device->channel->dev),
  970: 		   (adp->device->unit == ATA_MASTER) ? "master" : "slave",
  971: 		   (adp->flags & AD_F_TAG_ENABLED) ? "tagged " : "",
  972: 		   ata_mode2str(adp->device->mode));
  973: }
  974: 
  975: static int
  976: ad_version(u_int16_t version)
  977: {
  978:     int bit;
  979: 
  980:     if (version == 0xffff)
  981: 	return 0;
  982:     for (bit = 15; bit >= 0; bit--)
  983: 	if (version & (1<<bit))
  984: 	    return bit;
  985:     return 0;
  986: }