File:  [DragonFly] / src / sys / dev / disk / ata / ata-disk.c
Revision 1.17: download - view: text, annotated - select for diffs
Thu May 13 23:49:14 2004 UTC (10 years, 2 months ago) by dillon
Branches: MAIN
CVS tags: HEAD
device switch 1/many: Remove d_autoq, add d_clone (where d_autoq was).

d_autoq was used to allow the device port dispatch to mix old-style synchronous
calls with new style messaging calls within a particular device.  It was never
used for that purpose.

d_clone will be more fully implemented as work continues.  We are going to
install d_port in the dev_t (struct specinfo) structure itself and d_clone
will be needed to allow devices to 'revector' the port on a minor-number
by minor-number basis, in particular allowing minor numbers to be directly
dispatched to distinct threads.  This is something we will be needing later
on.

    1: /*-
    2:  * Copyright (c) 1998,1999,2000,2001,2002 Søren Schmidt <sos@FreeBSD.org>
    3:  * All rights reserved.
    4:  *
    5:  * Redistribution and use in source and binary forms, with or without
    6:  * modification, are permitted provided that the following conditions
    7:  * are met:
    8:  * 1. Redistributions of source code must retain the above copyright
    9:  *    notice, this list of conditions and the following disclaimer,
   10:  *    without modification, immediately at the beginning of the file.
   11:  * 2. Redistributions in binary form must reproduce the above copyright
   12:  *    notice, this list of conditions and the following disclaimer in the
   13:  *    documentation and/or other materials provided with the distribution.
   14:  * 3. The name of the author may not be used to endorse or promote products
   15:  *    derived from this software without specific prior written permission.
   16:  *
   17:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26:  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27:  *
   28:  * $FreeBSD: src/sys/dev/ata/ata-disk.c,v 1.60.2.24 2003/01/30 07:19:59 sos Exp $
   29:  * $DragonFly: src/sys/dev/disk/ata/ata-disk.c,v 1.17 2004/05/13 23:49:14 dillon Exp $
   30:  */
   31: 
   32: #include "opt_ata.h"
   33: #include <sys/param.h>
   34: #include <sys/systm.h>
   35: #include <sys/ata.h>
   36: #include <sys/kernel.h>
   37: #include <sys/malloc.h>
   38: #include <sys/buf.h>
   39: #include <sys/bus.h>
   40: #include <sys/conf.h>
   41: #include <sys/disk.h>
   42: #include <sys/devicestat.h>
   43: #include <sys/cons.h>
   44: #include <sys/sysctl.h>
   45: #include <sys/syslog.h>
   46: #include <vm/vm.h>
   47: #include <vm/pmap.h>
   48: #include <machine/md_var.h>
   49: #include <machine/bus.h>
   50: #include <machine/clock.h>
   51: #include <sys/rman.h>
   52: #include "ata-all.h"
   53: #include "ata-disk.h"
   54: #include "ata-raid.h"
   55: #include <sys/proc.h>
   56: #include <sys/buf2.h>
   57: 
   58: /* device structures */
   59: static d_open_t		adopen;
   60: static d_close_t	adclose;
   61: static d_strategy_t	adstrategy;
   62: static d_dump_t		addump;
   63: 
   64: static struct cdevsw ad_cdevsw = {
   65: 	/* name */	"ad",
   66: 	/* maj */	116,
   67: 	/* flags */	D_DISK,
   68: 	/* port */      NULL,
   69: 	/* clone */	NULL,
   70: 
   71: 	/* open */	adopen,
   72: 	/* close */	adclose,
   73: 	/* read */	physread,
   74: 	/* write */	physwrite,
   75: 	/* ioctl */	noioctl,
   76: 	/* poll */	nopoll,
   77: 	/* mmap */	nommap,
   78: 	/* strategy */	adstrategy,
   79: 	/* dump */	addump,
   80: 	/* psize */	nopsize
   81: };
   82: 
   83: /* prototypes */
   84: static void ad_invalidatequeue(struct ad_softc *, struct ad_request *);
   85: static int ad_tagsupported(struct ad_softc *);
   86: static void ad_timeout(struct ad_request *);
   87: static void ad_free(struct ad_request *);
   88: static int ad_version(u_int16_t);
   89: 
   90: /* misc defines */
   91: #define AD_MAX_RETRIES	3
   92: 
   93: /* internal vars */
   94: static u_int32_t adp_lun_map = 0;
   95: static int ata_dma = 1;
   96: static int ata_wc = 1;
   97: static int ata_tags = 0; 
   98: TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
   99: TUNABLE_INT("hw.ata.wc", &ata_wc);
  100: TUNABLE_INT("hw.ata.tags", &ata_tags);
  101: static MALLOC_DEFINE(M_AD, "AD driver", "ATA disk driver");
  102: 
  103: /* sysctl vars */
  104: SYSCTL_DECL(_hw_ata);
  105: SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RD, &ata_dma, 0,
  106: 	   "ATA disk DMA mode control");
  107: SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RD, &ata_wc, 0,
  108: 	   "ATA disk write caching");
  109: SYSCTL_INT(_hw_ata, OID_AUTO, tags, CTLFLAG_RD, &ata_tags, 0,
  110: 	   "ATA disk tagged queuing support");
  111: 
  112: void
  113: ad_attach(struct ata_device *atadev, int alreadylocked)
  114: {
  115:     struct ad_softc *adp;
  116:     dev_t dev;
  117: 
  118:     adp = malloc(sizeof(struct ad_softc), M_AD, M_WAITOK | M_ZERO);
  119: 
  120:     KKASSERT(atadev->channel->req_mpipe.max_count != 0);
  121: 
  122:     adp->device = atadev;
  123: #ifdef ATA_STATIC_ID
  124:     adp->lun = (device_get_unit(atadev->channel->dev)<<1)+ATA_DEV(atadev->unit);
  125: #else
  126:     adp->lun = ata_get_lun(&adp_lun_map);
  127: #endif
  128:     ata_set_name(atadev, "ad", adp->lun);
  129:     adp->heads = atadev->param->heads;
  130:     adp->sectors = atadev->param->sectors;
  131:     adp->total_secs = atadev->param->cylinders * adp->heads * adp->sectors;	
  132:     bufq_init(&adp->queue);
  133: 
  134:     /* does this device need oldstyle CHS addressing */
  135:     if (!ad_version(atadev->param->version_major) || 
  136: 	!(atadev->param->atavalid & ATA_FLAG_54_58) || !atadev->param->lba_size)
  137: 	adp->flags |= AD_F_CHS_USED;
  138: 
  139:     /* use the 28bit LBA size if valid */
  140:     if (atadev->param->cylinders == 16383 &&
  141: 	adp->total_secs < atadev->param->lba_size)
  142: 	adp->total_secs = atadev->param->lba_size;
  143: 
  144:     /* use the 48bit LBA size if valid */
  145:     if (atadev->param->support.address48 &&
  146: 	atadev->param->lba_size48 > 268435455)
  147: 	adp->total_secs = atadev->param->lba_size48;
  148:     
  149:     if (!alreadylocked)
  150: 	ATA_SLEEPLOCK_CH(atadev->channel, ATA_CONTROL);
  151:     /* use multiple sectors/interrupt if device supports it */
  152:     adp->transfersize = DEV_BSIZE;
  153:     if (ad_version(atadev->param->version_major)) {
  154: 	int secsperint = max(1, min(atadev->param->sectors_intr, 16));
  155: 
  156: 	if (!ata_command(atadev, ATA_C_SET_MULTI, 0, secsperint,
  157: 			 0, ATA_WAIT_INTR) && !ata_wait(atadev, 0))
  158: 	adp->transfersize *= secsperint;
  159:     }
  160: 
  161:     /* enable read caching if not default on device */
  162:     if (ata_command(atadev, ATA_C_SETFEATURES,
  163: 		    0, 0, ATA_C_F_ENAB_RCACHE, ATA_WAIT_INTR))
  164: 	ata_prtdev(atadev, "enabling readahead cache failed\n");
  165: 
  166:     /* enable write caching if allowed and not default on device */
  167:     if (ata_wc || (ata_tags && ad_tagsupported(adp))) {
  168: 	if (ata_command(atadev, ATA_C_SETFEATURES,
  169: 			0, 0, ATA_C_F_ENAB_WCACHE, ATA_WAIT_INTR))
  170: 	    ata_prtdev(atadev, "enabling write cache failed\n");
  171:     }
  172:     else {
  173: 	if (ata_command(atadev, ATA_C_SETFEATURES,
  174: 			0, 0, ATA_C_F_DIS_WCACHE, ATA_WAIT_INTR))
  175: 	    ata_prtdev(atadev, "disabling write cache failed\n");
  176:     }
  177: 
  178:     /* use DMA if allowed and if drive/controller supports it */
  179:     if (ata_dma)
  180: 	ata_dmainit(atadev, ata_pmode(atadev->param), 
  181: 		    ata_wmode(atadev->param), ata_umode(atadev->param));
  182:     else
  183: 	ata_dmainit(atadev, ata_pmode(atadev->param), -1, -1);
  184: 
  185:     /* use tagged queueing if allowed and supported */
  186:     if (ata_tags && ad_tagsupported(adp)) {
  187: 	adp->num_tags = atadev->param->queuelen;
  188: 	adp->flags |= AD_F_TAG_ENABLED;
  189: 	adp->device->channel->flags |= ATA_QUEUED;
  190: 	if (ata_command(atadev, ATA_C_SETFEATURES,
  191: 			0, 0, ATA_C_F_DIS_RELIRQ, ATA_WAIT_INTR))
  192: 	    ata_prtdev(atadev, "disabling release interrupt failed\n");
  193: 	if (ata_command(atadev, ATA_C_SETFEATURES,
  194: 			0, 0, ATA_C_F_DIS_SRVIRQ, ATA_WAIT_INTR))
  195: 	    ata_prtdev(atadev, "disabling service interrupt failed\n");
  196:     }
  197: 
  198:     ATA_UNLOCK_CH(atadev->channel);
  199: 
  200:     devstat_add_entry(&adp->stats, "ad", adp->lun, DEV_BSIZE,
  201: 		      DEVSTAT_NO_ORDERED_TAGS,
  202: 		      DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_IDE,
  203: 		      DEVSTAT_PRIORITY_DISK);
  204: 
  205:     dev = disk_create(adp->lun, &adp->disk, 0, &ad_cdevsw);
  206:     dev->si_drv1 = adp;
  207:     dev->si_iosize_max = 256 * DEV_BSIZE;
  208:     adp->dev = dev;
  209: 
  210:     /* construct the disklabel */
  211:     bzero(&adp->disk.d_label, sizeof(struct disklabel));
  212:     adp->disk.d_label.d_secsize = DEV_BSIZE;
  213:     adp->disk.d_label.d_nsectors = adp->sectors;
  214:     adp->disk.d_label.d_ntracks = adp->heads;
  215:     adp->disk.d_label.d_ncylinders = adp->total_secs/(adp->heads*adp->sectors);
  216:     adp->disk.d_label.d_secpercyl = adp->sectors * adp->heads;
  217:     adp->disk.d_label.d_secperunit = adp->total_secs;
  218: 
  219:     atadev->driver = adp;
  220:     atadev->flags = 0;
  221: 
  222:     /* if this disk belongs to an ATA RAID dont print the probe */
  223:     if (ata_raiddisk_attach(adp))
  224: 	adp->flags |= AD_F_RAID_SUBDISK;
  225:     else {
  226: 	if (atadev->driver) {
  227: 	    ad_print(adp);
  228: 	    ata_enclosure_print(atadev);
  229: 	}
  230:     }
  231: }
  232: 
  233: void
  234: ad_detach(struct ata_device *atadev, int flush) /* get rid of flush XXX SOS */
  235: {
  236:     struct ad_softc *adp = atadev->driver;
  237:     struct ad_request *request;
  238:     struct buf *bp;
  239: 
  240:     atadev->flags |= ATA_D_DETACHING;
  241:     ata_prtdev(atadev, "removed from configuration\n");
  242:     ad_invalidatequeue(adp, NULL);
  243:     TAILQ_FOREACH(request, &atadev->channel->ata_queue, chain) {
  244: 	if (request->softc != adp)
  245: 	    continue;
  246: 	TAILQ_REMOVE(&atadev->channel->ata_queue, request, chain);
  247: 	request->bp->b_error = ENXIO;
  248: 	request->bp->b_flags |= B_ERROR;
  249: 	biodone(request->bp);
  250: 	ad_free(request);
  251:     }
  252:     ata_dmafree(atadev);
  253:     while ((bp = bufq_first(&adp->queue))) {
  254: 	bufq_remove(&adp->queue, bp); 
  255: 	bp->b_error = ENXIO;
  256: 	bp->b_flags |= B_ERROR;
  257: 	biodone(bp);
  258:     }
  259:     disk_invalidate(&adp->disk);
  260:     devstat_remove_entry(&adp->stats);
  261:     disk_destroy(&adp->disk);
  262:     if (flush) {
  263: 	if (ata_command(atadev, ATA_C_FLUSHCACHE, 0, 0, 0, ATA_WAIT_READY))
  264: 	    ata_prtdev(atadev, "flushing cache on detach failed\n");
  265:     }
  266:     if (adp->flags & AD_F_RAID_SUBDISK)
  267: 	ata_raiddisk_detach(adp);
  268:     ata_free_name(atadev);
  269:     ata_free_lun(&adp_lun_map, adp->lun);
  270:     atadev->driver = NULL;
  271:     atadev->flags = 0;
  272:     free(adp, M_AD);
  273: }
  274: 
  275: static int
  276: adopen(dev_t dev, int flags, int fmt, struct thread *td)
  277: {
  278:     struct ad_softc *adp = dev->si_drv1;
  279: 
  280:     if (adp->flags & AD_F_RAID_SUBDISK)
  281: 	return EBUSY;
  282:     return 0;
  283: }
  284: 
  285: static int
  286: adclose(dev_t dev, int flags, int fmt, struct thread *td)
  287: {
  288:     struct ad_softc *adp = dev->si_drv1;
  289:     int s;
  290: 
  291:     s = splbio();	/* interlock non-atomic channel lock */
  292:     ATA_SLEEPLOCK_CH(adp->device->channel, ATA_CONTROL);
  293:     if (ata_command(adp->device, ATA_C_FLUSHCACHE, 0, 0, 0, ATA_WAIT_READY))
  294: 	ata_prtdev(adp->device, "flushing cache on close failed\n");
  295:     ATA_UNLOCK_CH(adp->device->channel);
  296:     splx(s);
  297:     return 0;
  298: }
  299: 
  300: static void 
  301: adstrategy(struct buf *bp)
  302: {
  303:     struct ad_softc *adp = bp->b_dev->si_drv1;
  304:     int s;
  305: 
  306:     if (adp->device->flags & ATA_D_DETACHING) {
  307: 	bp->b_error = ENXIO;
  308: 	bp->b_flags |= B_ERROR;
  309: 	biodone(bp);
  310: 	return;
  311:     }
  312:     s = splbio();
  313:     bufqdisksort(&adp->queue, bp);
  314:     splx(s);
  315:     ata_start(adp->device->channel);
  316: }
  317: 
  318: int
  319: addump(dev_t dev)
  320: {
  321:     struct ad_softc *adp = dev->si_drv1;
  322:     struct ad_request request;
  323:     u_int count, blkno, secsize;
  324:     vm_paddr_t addr = 0;
  325:     long blkcnt;
  326:     int dumppages = MAXDUMPPGS;
  327:     int error;
  328:     int i;
  329: 
  330:     if ((error = disk_dumpcheck(dev, &count, &blkno, &secsize)))
  331: 	return error;
  332: 	
  333:     if (!adp)
  334: 	return ENXIO;
  335: 
  336:     /* force PIO mode for dumps */
  337:     adp->device->mode = ATA_PIO;
  338:     ata_reinit(adp->device->channel);
  339: 
  340:     blkcnt = howmany(PAGE_SIZE, secsize);
  341: 
  342:     while (count > 0) {
  343: 	caddr_t va = NULL;
  344: 	DELAY(1000);
  345: 
  346: 	if ((count / blkcnt) < dumppages)
  347: 	    dumppages = count / blkcnt;
  348: 
  349: 	for (i = 0; i < dumppages; ++i) {
  350: 	    vm_paddr_t a = addr + (i * PAGE_SIZE);
  351: 	    if (is_physical_memory(a))
  352: 		va = pmap_kenter_temporary(trunc_page(a), i);
  353: 	    else
  354: 		va = pmap_kenter_temporary(trunc_page(0), i);
  355: 	}
  356: 
  357: 	bzero(&request, sizeof(struct ad_request));
  358: 	request.softc = adp;
  359: 	request.blockaddr = blkno;
  360: 	request.bytecount = PAGE_SIZE * dumppages;
  361: 	request.data = va;
  362: 
  363: 	while (request.bytecount > 0) {
  364: 	    ad_transfer(&request);
  365: 	    if (request.flags & ADR_F_ERROR)
  366: 		return EIO;
  367: 	    request.donecount += request.currentsize;
  368: 	    request.bytecount -= request.currentsize;
  369: 	    DELAY(20);
  370: 	}
  371: 
  372: 	if (dumpstatus(addr, (off_t)count * DEV_BSIZE) < 0)
  373: 	    return EINTR;
  374: 
  375: 	blkno += blkcnt * dumppages;
  376: 	count -= blkcnt * dumppages;
  377: 	addr += PAGE_SIZE * dumppages;
  378:     }
  379: 
  380:     if (ata_wait(adp->device, ATA_S_READY | ATA_S_DSC) < 0)
  381: 	ata_prtdev(adp->device, "timeout waiting for final ready\n");
  382:     return 0;
  383: }
  384: 
  385: void
  386: ad_start(struct ata_device *atadev)
  387: {
  388:     struct ad_softc *adp = atadev->driver;
  389:     struct buf *bp = bufq_first(&adp->queue);
  390:     struct ad_request *request;
  391:     int tag = 0;
  392: 
  393:     if (!bp)
  394: 	return;
  395: 
  396:     /* if tagged queueing enabled get next free tag */
  397:     if (adp->flags & AD_F_TAG_ENABLED) {
  398: 	while (tag <= adp->num_tags && adp->tags[tag])
  399: 	    tag++;
  400: 	if (tag > adp->num_tags )
  401: 	    return;
  402:     }
  403: 
  404:     /*
  405:      * Allocate a request.  The allocation can only fail if the pipeline
  406:      * is full, in which case the request will be picked up later when
  407:      * ad_start() is called after another request completes.
  408:      */
  409:     request = mpipe_alloc_nowait(&atadev->channel->req_mpipe);
  410:     if (request == NULL) {
  411: 	ata_prtdev(atadev, "pipeline full allocating request in ad_start\n");
  412: 	return;
  413:     }
  414: 
  415:     /* setup request */
  416:     request->softc = adp;
  417:     request->bp = bp;
  418:     request->blockaddr = bp->b_pblkno;
  419:     request->bytecount = bp->b_bcount;
  420:     request->data = bp->b_data;
  421:     request->tag = tag;
  422:     if (bp->b_flags & B_READ) 
  423: 	request->flags |= ADR_F_READ;
  424:     if (adp->device->mode >= ATA_DMA) {
  425: 	if (ata_dmaalloc(atadev, M_NOWAIT) != 0) {
  426: 	    mpipe_free(&atadev->channel->req_mpipe, request);
  427: 	    ata_prtdev(atadev, "pipeline full allocated dmabuf in ad_start\n");
  428: 	    /* do not revert to PIO, wait for ad_start after I/O completion */
  429: 	    return;
  430: 	}
  431:     }
  432: 
  433:     /* insert in tag array */
  434:     adp->tags[tag] = request;
  435: 
  436:     /* remove from drive queue */
  437:     bufq_remove(&adp->queue, bp); 
  438: 
  439:     /* link onto controller queue */
  440:     TAILQ_INSERT_TAIL(&atadev->channel->ata_queue, request, chain);
  441: }
  442: 
  443: int
  444: ad_transfer(struct ad_request *request)
  445: {
  446:     struct ad_softc *adp;
  447:     u_int64_t lba;
  448:     u_int32_t count, max_count;
  449:     u_int8_t cmd;
  450:     int flags = ATA_IMMEDIATE;
  451: 
  452:     /* get request params */
  453:     adp = request->softc;
  454: 
  455:     /* calculate transfer details */
  456:     lba = request->blockaddr + (request->donecount / DEV_BSIZE);
  457:    
  458:     if (request->donecount == 0) {
  459: 
  460: 	/* start timeout for this transfer */
  461: 	if (dumping)
  462: 	    request->timeout_handle.callout = NULL;
  463: 	else
  464: 	    request->timeout_handle = 
  465: 		timeout((timeout_t*)ad_timeout, request, 10 * hz);
  466: 
  467: 	/* setup transfer parameters */
  468: 	count = howmany(request->bytecount, DEV_BSIZE);
  469: 	max_count = adp->device->param->support.address48 ? 65536 : 256;
  470: 	if (count > max_count) {
  471: 	    ata_prtdev(adp->device,
  472: 		       "count %d size transfers not supported\n", count);
  473: 	    count = max_count;
  474: 	}
  475: 
  476: 	if (adp->flags & AD_F_CHS_USED) {
  477: 	    int sector = (lba % adp->sectors) + 1;
  478: 	    int cylinder = lba / (adp->sectors * adp->heads);
  479: 	    int head = (lba % (adp->sectors * adp->heads)) / adp->sectors;
  480: 
  481: 	    lba = (sector&0xff) | ((cylinder&0xffff)<<8) | ((head&0xf)<<24);
  482: 	    adp->device->flags |= ATA_D_USE_CHS;
  483: 	}
  484: 
  485: 	/* setup first transfer length */
  486: 	request->currentsize = min(request->bytecount, adp->transfersize);
  487: 
  488: 	devstat_start_transaction(&adp->stats);
  489: 
  490: 	/* does this drive & transfer work with DMA ? */
  491: 	request->flags &= ~ADR_F_DMA_USED;
  492: 	if (adp->device->mode >= ATA_DMA &&
  493: 	    !ata_dmasetup(adp->device, request->data, request->bytecount)) {
  494: 	    request->flags |= ADR_F_DMA_USED;
  495: 	    request->currentsize = request->bytecount;
  496: 
  497: 	    /* do we have tags enabled ? */
  498: 	    if (adp->flags & AD_F_TAG_ENABLED) {
  499: 		cmd = (request->flags & ADR_F_READ) ?
  500: 		    ATA_C_READ_DMA_QUEUED : ATA_C_WRITE_DMA_QUEUED;
  501: 
  502: 		if (ata_command(adp->device, cmd, lba,
  503: 				request->tag << 3, count, flags)) {
  504: 		    ata_prtdev(adp->device, "error executing command");
  505: 		    goto transfer_failed;
  506: 		}
  507: 		if (ata_wait(adp->device, ATA_S_READY)) {
  508: 		    ata_prtdev(adp->device, "timeout waiting for READY\n");
  509: 		    goto transfer_failed;
  510: 		}
  511: 		adp->outstanding++;
  512: 
  513: 		/* if ATA bus RELEASE check for SERVICE */
  514: 		if (adp->flags & AD_F_TAG_ENABLED &&
  515: 		    ATA_INB(adp->device->channel->r_io, ATA_IREASON) &
  516: 		    ATA_I_RELEASE)
  517: 		    return ad_service(adp, 1);
  518: 	    }
  519: 	    else {
  520: 		cmd = (request->flags & ADR_F_READ) ?
  521: 		    ATA_C_READ_DMA : ATA_C_WRITE_DMA;
  522: 
  523: 		if (ata_command(adp->device, cmd, lba, count, 0, flags)) {
  524: 		    ata_prtdev(adp->device, "error executing command");
  525: 		    goto transfer_failed;
  526: 		}
  527: #if 0
  528: 		/*
  529: 		 * wait for data transfer phase
  530: 		 *
  531: 		 * well this should be here acording to specs, but older
  532: 		 * promise controllers doesn't like it, they lockup!
  533: 		 */
  534: 		if (ata_wait(adp->device, ATA_S_READY | ATA_S_DRQ)) {
  535: 		    ata_prtdev(adp->device, "timeout waiting for data phase\n");
  536: 		    goto transfer_failed;
  537: 		}
  538: #endif
  539: 	    }
  540: 
  541: 	    /* start transfer, return and wait for interrupt */
  542: 	    ata_dmastart(adp->device, request->data, request->bytecount,
  543: 			request->flags & ADR_F_READ);
  544: 	    return ATA_OP_CONTINUES;
  545: 	}
  546: 
  547: 	/* does this drive support multi sector transfers ? */
  548: 	if (request->currentsize > DEV_BSIZE)
  549: 	    cmd = request->flags&ADR_F_READ ? ATA_C_READ_MUL : ATA_C_WRITE_MUL;
  550: 
  551: 	/* just plain old single sector transfer */
  552: 	else
  553: 	    cmd = request->flags&ADR_F_READ ? ATA_C_READ : ATA_C_WRITE;
  554: 
  555: 	if (ata_command(adp->device, cmd, lba, count, 0, flags)){
  556: 	    ata_prtdev(adp->device, "error executing command");
  557: 	    goto transfer_failed;
  558: 	}
  559:     }
  560:    
  561:     /* calculate this transfer length */
  562:     request->currentsize = min(request->bytecount, adp->transfersize);
  563: 
  564:     /* if this is a PIO read operation, return and wait for interrupt */
  565:     if (request->flags & ADR_F_READ)
  566: 	return ATA_OP_CONTINUES;
  567: 
  568:     /* ready to write PIO data ? */
  569:     if (ata_wait(adp->device, (ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) < 0) {
  570: 	ata_prtdev(adp->device, "timeout waiting for DRQ");
  571: 	goto transfer_failed;
  572:     }
  573: 
  574:     /* output the data */
  575:     if (adp->device->channel->flags & ATA_USE_16BIT)
  576: 	ATA_OUTSW(adp->device->channel->r_io, ATA_DATA,
  577: 		  (void *)((uintptr_t)request->data + request->donecount),
  578: 		  request->currentsize / sizeof(int16_t));
  579:     else
  580: 	ATA_OUTSL(adp->device->channel->r_io, ATA_DATA,
  581: 		  (void *)((uintptr_t)request->data + request->donecount),
  582: 		  request->currentsize / sizeof(int32_t));
  583:     return ATA_OP_CONTINUES;
  584: 
  585: transfer_failed:
  586:     untimeout((timeout_t *)ad_timeout, request, request->timeout_handle);
  587:     ad_invalidatequeue(adp, request);
  588:     printf(" - resetting\n");
  589: 
  590:     /* if retries still permit, reinject this request */
  591:     if (request->retries++ < AD_MAX_RETRIES)
  592: 	TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, request, chain);
  593:     else {
  594: 	/* retries all used up, return error */
  595: 	request->bp->b_error = EIO;
  596: 	request->bp->b_flags |= B_ERROR;
  597: 	request->bp->b_resid = request->bytecount;
  598: 	devstat_end_transaction_buf(&adp->stats, request->bp);
  599: 	biodone(request->bp);
  600: 	ad_free(request);
  601:     }
  602:     ata_reinit(adp->device->channel);
  603:     return ATA_OP_CONTINUES;
  604: }
  605: 
  606: int
  607: ad_interrupt(struct ad_request *request)
  608: {
  609:     struct ad_softc *adp = request->softc;
  610:     int dma_stat = 0;
  611: 
  612:     /* finish DMA transfer */
  613:     if (request->flags & ADR_F_DMA_USED)
  614: 	dma_stat = ata_dmadone(adp->device);
  615: 
  616:     /* do we have a corrected soft error ? */
  617:     if (adp->device->channel->status & ATA_S_CORR)
  618: 	diskerr(request->bp, "soft error (ECC corrected)", LOG_PRINTF,
  619: 		request->blockaddr + (request->donecount / DEV_BSIZE),
  620: 		&adp->disk.d_label);
  621: 
  622:     /* did any real errors happen ? */
  623:     if ((adp->device->channel->status & ATA_S_ERROR) ||
  624: 	(request->flags & ADR_F_DMA_USED && dma_stat & ATA_BMSTAT_ERROR)) {
  625: 	adp->device->channel->error =
  626: 	    ATA_INB(adp->device->channel->r_io, ATA_ERROR);
  627: 	diskerr(request->bp, (adp->device->channel->error & ATA_E_ICRC) ?
  628: 		"UDMA ICRC error" : "hard error", LOG_PRINTF,
  629: 		request->blockaddr + (request->donecount / DEV_BSIZE),
  630: 		&adp->disk.d_label);
  631: 
  632: 	/* if this is a UDMA CRC error, reinject request */
  633: 	if (request->flags & ADR_F_DMA_USED &&
  634: 	    adp->device->channel->error & ATA_E_ICRC) {
  635: 	    untimeout((timeout_t *)ad_timeout, request,request->timeout_handle);
  636: 	    ad_invalidatequeue(adp, request);
  637: 
  638: 	    if (request->retries++ < AD_MAX_RETRIES)
  639: 		printf(" retrying\n");
  640: 	    else {
  641: 		ata_dmainit(adp->device, ata_pmode(adp->device->param), -1, -1);
  642: 		printf(" falling back to PIO mode\n");
  643: 	    }
  644: 	    TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, request, chain);
  645: 	    return ATA_OP_FINISHED;
  646: 	}
  647: 
  648: 	/* if using DMA, try once again in PIO mode */
  649: 	if (request->flags & ADR_F_DMA_USED) {
  650: 	    untimeout((timeout_t *)ad_timeout, request,request->timeout_handle);
  651: 	    ad_invalidatequeue(adp, request);
  652: 	    ata_dmainit(adp->device, ata_pmode(adp->device->param), -1, -1);
  653: 	    request->flags |= ADR_F_FORCE_PIO;
  654: 	    printf(" trying PIO mode\n");
  655: 	    TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, request, chain);
  656: 	    return ATA_OP_FINISHED;
  657: 	}
  658: 
  659: 	request->flags |= ADR_F_ERROR;
  660: 	printf(" status=%02x error=%02x\n", 
  661: 	       adp->device->channel->status, adp->device->channel->error);
  662:     }
  663: 
  664:     /* if we arrived here with forced PIO mode, DMA doesn't work right */
  665:     if (request->flags & ADR_F_FORCE_PIO && !(request->flags & ADR_F_ERROR))
  666: 	ata_prtdev(adp->device, "DMA problem fallback to PIO mode\n");
  667: 
  668:     /* if this was a PIO read operation, get the data */
  669:     if (!(request->flags & ADR_F_DMA_USED) &&
  670: 	(request->flags & (ADR_F_READ | ADR_F_ERROR)) == ADR_F_READ) {
  671: 
  672: 	/* ready to receive data? */
  673: 	if ((adp->device->channel->status & ATA_S_READY) == 0)
  674: 	    ata_prtdev(adp->device, "read interrupt arrived early");
  675: 
  676: 	if (ata_wait(adp->device, (ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) != 0) {
  677: 	    ata_prtdev(adp->device, "read error detected (too) late");
  678: 	    request->flags |= ADR_F_ERROR;
  679: 	}
  680: 	else {
  681: 	    /* data ready, read in */
  682: 	    if (adp->device->channel->flags & ATA_USE_16BIT)
  683: 		ATA_INSW(adp->device->channel->r_io, ATA_DATA,
  684: 			 (void*)((uintptr_t)request->data + request->donecount),
  685: 			 request->currentsize / sizeof(int16_t));
  686: 	    else
  687: 		ATA_INSL(adp->device->channel->r_io, ATA_DATA,
  688: 			 (void*)((uintptr_t)request->data + request->donecount),
  689: 			 request->currentsize / sizeof(int32_t));
  690: 	}
  691:     }
  692: 
  693:     /* finish up transfer */
  694:     if (request->flags & ADR_F_ERROR) {
  695: 	request->bp->b_error = EIO;
  696: 	request->bp->b_flags |= B_ERROR;
  697:     } 
  698:     else {
  699: 	request->bytecount -= request->currentsize;
  700: 	request->donecount += request->currentsize;
  701: 	if (request->bytecount > 0) {
  702: 	    ad_transfer(request);
  703: 	    return ATA_OP_CONTINUES;
  704: 	}
  705:     }
  706: 
  707:     /* disarm timeout for this transfer */
  708:     untimeout((timeout_t *)ad_timeout, request, request->timeout_handle);
  709: 
  710:     request->bp->b_resid = request->bytecount;
  711: 
  712:     devstat_end_transaction_buf(&adp->stats, request->bp);
  713:     biodone(request->bp);
  714:     ad_free(request);
  715:     adp->outstanding--;
  716: 
  717:     /* check for SERVICE (tagged operations only) */
  718:     return ad_service(adp, 1);
  719: }
  720: 
  721: int
  722: ad_service(struct ad_softc *adp, int change)
  723: {
  724:     /* do we have to check the other device on this channel ? */
  725:     if (adp->device->channel->flags & ATA_QUEUED && change) {
  726: 	int device = adp->device->unit;
  727: 
  728: 	if (adp->device->unit == ATA_MASTER) {
  729: 	    if ((adp->device->channel->devices & ATA_ATA_SLAVE) &&
  730: 		(adp->device->channel->device[SLAVE].driver) &&
  731: 		((struct ad_softc *) (adp->device->channel->
  732: 		 device[SLAVE].driver))->flags & AD_F_TAG_ENABLED)
  733: 		device = ATA_SLAVE;
  734: 	}
  735: 	else {
  736: 	    if ((adp->device->channel->devices & ATA_ATA_MASTER) &&
  737: 		(adp->device->channel->device[MASTER].driver) &&
  738: 		((struct ad_softc *) (adp->device->channel->
  739: 		 device[MASTER].driver))->flags & AD_F_TAG_ENABLED)
  740: 		device = ATA_MASTER;
  741: 	}
  742: 	if (device != adp->device->unit &&
  743: 	    ((struct ad_softc *)
  744: 	     (adp->device->channel->
  745: 	      device[ATA_DEV(device)].driver))->outstanding > 0) {
  746: 	    ATA_OUTB(adp->device->channel->r_io, ATA_DRIVE, ATA_D_IBM | device);
  747: 	    adp = adp->device->channel->device[ATA_DEV(device)].driver;
  748: 	    DELAY(1);
  749: 	}
  750:     }
  751:     adp->device->channel->status =
  752: 	ATA_INB(adp->device->channel->r_altio, ATA_ALTSTAT);
  753:  
  754:     /* do we have a SERVICE request from the drive ? */
  755:     if (adp->flags & AD_F_TAG_ENABLED &&
  756: 	adp->outstanding > 0 &&
  757: 	adp->device->channel->status & ATA_S_SERVICE) {
  758: 	struct ad_request *request;
  759: 	int tag;
  760: 
  761: 	/* check for error */
  762: 	if (adp->device->channel->status & ATA_S_ERROR) {
  763: 	    ata_prtdev(adp->device, "Oops! controller says s=0x%02x e=0x%02x\n",
  764: 		       adp->device->channel->status,
  765: 		       adp->device->channel->error);
  766: 	    ad_invalidatequeue(adp, NULL);
  767: 	    return ATA_OP_FINISHED;
  768: 	}
  769: 
  770: 	/* issue SERVICE cmd */
  771: 	if (ata_command(adp->device, ATA_C_SERVICE, 0, 0, 0, ATA_IMMEDIATE)) {
  772: 	    ata_prtdev(adp->device, "problem executing SERVICE cmd\n");
  773: 	    ad_invalidatequeue(adp, NULL);
  774: 	    return ATA_OP_FINISHED;
  775: 	}
  776: 
  777: 	/* setup the transfer environment when ready */
  778: 	if (ata_wait(adp->device, ATA_S_READY)) {
  779: 	    ata_prtdev(adp->device, "SERVICE timeout tag=%d s=%02x e=%02x\n",
  780: 		       ATA_INB(adp->device->channel->r_io, ATA_COUNT) >> 3,
  781: 		       adp->device->channel->status,
  782: 		       adp->device->channel->error);
  783: 	    ad_invalidatequeue(adp, NULL);
  784: 	    return ATA_OP_FINISHED;
  785: 	}
  786: 	tag = ATA_INB(adp->device->channel->r_io, ATA_COUNT) >> 3;
  787: 	if (!(request = adp->tags[tag])) {
  788: 	    ata_prtdev(adp->device, "no request for tag=%d\n", tag);	
  789: 	    ad_invalidatequeue(adp, NULL);
  790: 	    return ATA_OP_FINISHED;
  791: 	}
  792: 	ATA_FORCELOCK_CH(adp->device->channel, ATA_ACTIVE_ATA);
  793: 	adp->device->channel->running = request;
  794: 	request->serv++;
  795: 
  796: 	/* start DMA transfer when ready */
  797: 	if (ata_wait(adp->device, ATA_S_READY | ATA_S_DRQ)) {
  798: 	    ata_prtdev(adp->device, "timeout starting DMA s=%02x e=%02x\n",
  799: 		       adp->device->channel->status,
  800: 		       adp->device->channel->error);
  801: 	    ad_invalidatequeue(adp, NULL);
  802: 	    return ATA_OP_FINISHED;
  803: 	}
  804: 	ata_dmastart(adp->device, request->data, request->bytecount,
  805: 		    request->flags & ADR_F_READ);
  806: 	return ATA_OP_CONTINUES;
  807:     }
  808:     return ATA_OP_FINISHED;
  809: }
  810: 
  811: static void
  812: ad_free(struct ad_request *request)
  813: {
  814:     int s = splbio();
  815:     ata_dmafree(request->softc->device);
  816:     request->softc->tags[request->tag] = NULL;
  817:     mpipe_free(&request->softc->device->channel->req_mpipe, request);
  818:     splx(s);
  819: }
  820: 
  821: static void
  822: ad_invalidatequeue(struct ad_softc *adp, struct ad_request *request)
  823: {
  824:     /* if tags used invalidate all other tagged transfers */
  825:     if (adp->flags & AD_F_TAG_ENABLED) {
  826: 	struct ad_request *tmpreq;
  827: 	int tag;
  828: 
  829: 	ata_prtdev(adp->device, "invalidating queued requests\n");
  830: 	for (tag = 0; tag <= adp->num_tags; tag++) {
  831: 	    tmpreq = adp->tags[tag];
  832: 	    adp->tags[tag] = NULL;
  833: 	    if (tmpreq == request || tmpreq == NULL)
  834: 		continue;
  835: 	    untimeout((timeout_t *)ad_timeout, tmpreq, tmpreq->timeout_handle);
  836: 	    TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, tmpreq, chain);
  837: 	}
  838: 	if (ata_command(adp->device, ATA_C_NOP,
  839: 			0, 0, ATA_C_F_FLUSHQUEUE, ATA_WAIT_READY))
  840: 	    ata_prtdev(adp->device, "flush queue failed\n");
  841: 	adp->outstanding = 0;
  842:     }
  843: }
  844: 
  845: static int
  846: ad_tagsupported(struct ad_softc *adp)
  847: {
  848:     const char *good[] = {"IBM-DPTA", "IBM-DTLA", NULL};
  849:     int i = 0;
  850: 
  851:     switch (adp->device->channel->chiptype) {
  852:     case 0x4d33105a: /* Promises before TX2 doesn't work with tagged queuing */
  853:     case 0x4d38105a:
  854:     case 0x0d30105a:
  855:     case 0x4d30105a:  
  856: 	return 0;
  857:     }
  858: 
  859:     /* check that drive does DMA, has tags enabled, and is one we know works */
  860:     if (adp->device->mode >= ATA_DMA && adp->device->param->support.queued && 
  861: 	adp->device->param->enabled.queued) {
  862: 	while (good[i] != NULL) {
  863: 	    if (!strncmp(adp->device->param->model, good[i], strlen(good[i])))
  864: 		return 1;
  865: 	    i++;
  866: 	}
  867: 	/* 
  868: 	 * check IBM's new obscure way of naming drives 
  869: 	 * we want "IC" (IBM CORP) and "AT" or "AV" (ATA interface)
  870: 	 * but doesn't care about the other info (size, capacity etc)
  871: 	 */
  872: 	if (!strncmp(adp->device->param->model, "IC", 2) &&
  873: 	    (!strncmp(adp->device->param->model + 8, "AT", 2) ||
  874: 	     !strncmp(adp->device->param->model + 8, "AV", 2)))
  875: 		return 1;
  876:     }
  877:     return 0;
  878: }
  879: 
  880: static void
  881: ad_timeout(struct ad_request *request)
  882: {
  883:     struct ad_softc *adp = request->softc;
  884: 
  885:     adp->device->channel->running = NULL;
  886:     ata_prtdev(adp->device, "%s command timeout tag=%d serv=%d - resetting\n",
  887: 	       (request->flags & ADR_F_READ) ? "READ" : "WRITE",
  888: 	       request->tag, request->serv);
  889: 
  890:     if (request->flags & ADR_F_DMA_USED) {
  891: 	ata_dmadone(adp->device);
  892: 	ad_invalidatequeue(adp, request);
  893: 	if (request->retries == AD_MAX_RETRIES) {
  894: 	    ata_dmainit(adp->device, ata_pmode(adp->device->param), -1, -1);
  895: 	    ata_prtdev(adp->device, "trying fallback to PIO mode\n");
  896: 	    request->retries = 0;
  897: 	}
  898:     }
  899: 
  900:     /* if retries still permit, reinject this request */
  901:     if (request->retries++ < AD_MAX_RETRIES) {
  902: 	TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, request, chain);
  903:     }
  904:     else {
  905: 	/* retries all used up, return error */
  906: 	request->bp->b_error = EIO;
  907: 	request->bp->b_flags |= B_ERROR;
  908: 	devstat_end_transaction_buf(&adp->stats, request->bp);
  909: 	biodone(request->bp);
  910: 	ad_free(request);
  911:     }
  912:     ata_reinit(adp->device->channel);
  913: }
  914: 
  915: void
  916: ad_reinit(struct ata_device *atadev)
  917: {
  918:     struct ad_softc *adp = atadev->driver;
  919: 
  920:     /* reinit disk parameters */
  921:     ad_invalidatequeue(atadev->driver, NULL);
  922:     ata_command(atadev, ATA_C_SET_MULTI, 0,
  923: 		adp->transfersize / DEV_BSIZE, 0, ATA_WAIT_READY);
  924:     if (adp->device->mode >= ATA_DMA)
  925: 	ata_dmainit(atadev, ata_pmode(adp->device->param),
  926: 		    ata_wmode(adp->device->param),
  927: 		    ata_umode(adp->device->param));
  928:     else
  929: 	ata_dmainit(atadev, ata_pmode(adp->device->param), -1, -1);
  930: }
  931: 
  932: void
  933: ad_print(struct ad_softc *adp) 
  934: {
  935:     if (bootverbose) {
  936: 	ata_prtdev(adp->device, "<%.40s/%.8s> ATA-%d disk at ata%d-%s\n", 
  937: 		   adp->device->param->model, adp->device->param->revision,
  938: 		   ad_version(adp->device->param->version_major), 
  939: 		   device_get_unit(adp->device->channel->dev),
  940: 		   (adp->device->unit == ATA_MASTER) ? "master" : "slave");
  941: 
  942: 	ata_prtdev(adp->device,
  943: 		   "%lluMB (%llu sectors), %llu C, %u H, %u S, %u B\n",
  944: 		   (unsigned long long)(adp->total_secs /
  945: 		   ((1024L*1024L)/DEV_BSIZE)),
  946: 		   (unsigned long long) adp->total_secs,
  947: 		   (unsigned long long) (adp->total_secs /
  948: 		    (adp->heads * adp->sectors)),
  949: 		   adp->heads, adp->sectors, DEV_BSIZE);
  950: 
  951: 	ata_prtdev(adp->device, "%d secs/int, %d depth queue, %s%s\n", 
  952: 		   adp->transfersize / DEV_BSIZE, adp->num_tags + 1,
  953: 		   (adp->flags & AD_F_TAG_ENABLED) ? "tagged " : "",
  954: 		   ata_mode2str(adp->device->mode));
  955: 
  956: 	ata_prtdev(adp->device, "piomode=%d dmamode=%d udmamode=%d cblid=%d\n",
  957: 		   ata_pmode(adp->device->param), ata_wmode(adp->device->param),
  958: 		   ata_umode(adp->device->param), 
  959: 		   adp->device->param->hwres_cblid);
  960: 
  961:     }
  962:     else
  963: 	ata_prtdev(adp->device,"%lluMB <%.40s> [%lld/%d/%d] at ata%d-%s %s%s\n",
  964: 		   (unsigned long long)(adp->total_secs /
  965: 		   ((1024L * 1024L) / DEV_BSIZE)),
  966: 		   adp->device->param->model,
  967: 		   (unsigned long long)(adp->total_secs /
  968: 		    (adp->heads*adp->sectors)),
  969: 		   adp->heads, adp->sectors,
  970: 		   device_get_unit(adp->device->channel->dev),
  971: 		   (adp->device->unit == ATA_MASTER) ? "master" : "slave",
  972: 		   (adp->flags & AD_F_TAG_ENABLED) ? "tagged " : "",
  973: 		   ata_mode2str(adp->device->mode));
  974: }
  975: 
  976: static int
  977: ad_version(u_int16_t version)
  978: {
  979:     int bit;
  980: 
  981:     if (version == 0xffff)
  982: 	return 0;
  983:     for (bit = 15; bit >= 0; bit--)
  984: 	if (version & (1<<bit))
  985: 	    return bit;
  986:     return 0;
  987: }