File:  [DragonFly] / src / sys / dev / disk / ata / ata-disk.c
Revision 1.11: download - view: text, annotated - select for diffs
Wed Feb 18 00:50:00 2004 UTC (10 years, 6 months ago) by dillon
Branches: MAIN
CVS tags: HEAD
ATAng stage 2: sync part of the ata_dma*() API.  No operational changes.

    1: /*-
    2:  * Copyright (c) 1998,1999,2000,2001,2002 Søren Schmidt <sos@FreeBSD.org>
    3:  * All rights reserved.
    4:  *
    5:  * Redistribution and use in source and binary forms, with or without
    6:  * modification, are permitted provided that the following conditions
    7:  * are met:
    8:  * 1. Redistributions of source code must retain the above copyright
    9:  *    notice, this list of conditions and the following disclaimer,
   10:  *    without modification, immediately at the beginning of the file.
   11:  * 2. Redistributions in binary form must reproduce the above copyright
   12:  *    notice, this list of conditions and the following disclaimer in the
   13:  *    documentation and/or other materials provided with the distribution.
   14:  * 3. The name of the author may not be used to endorse or promote products
   15:  *    derived from this software without specific prior written permission.
   16:  *
   17:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26:  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27:  *
   28:  * $FreeBSD: src/sys/dev/ata/ata-disk.c,v 1.60.2.24 2003/01/30 07:19:59 sos Exp $
   29:  * $DragonFly: src/sys/dev/disk/ata/ata-disk.c,v 1.11 2004/02/18 00:50:00 dillon Exp $
   30:  */
   31: 
   32: #include "opt_ata.h"
   33: #include <sys/param.h>
   34: #include <sys/systm.h>
   35: #include <sys/ata.h>
   36: #include <sys/kernel.h>
   37: #include <sys/malloc.h>
   38: #include <sys/buf.h>
   39: #include <sys/bus.h>
   40: #include <sys/conf.h>
   41: #include <sys/disk.h>
   42: #include <sys/devicestat.h>
   43: #include <sys/cons.h>
   44: #include <sys/sysctl.h>
   45: #include <sys/syslog.h>
   46: #include <vm/vm.h>
   47: #include <vm/pmap.h>
   48: #include <machine/md_var.h>
   49: #include <machine/bus.h>
   50: #include <machine/clock.h>
   51: #include <sys/rman.h>
   52: #include "ata-all.h"
   53: #include "ata-disk.h"
   54: #include "ata-raid.h"
   55: #include <sys/proc.h>
   56: #include <sys/buf2.h>
   57: 
   58: /* device structures */
   59: static d_open_t		adopen;
   60: static d_close_t	adclose;
   61: static d_strategy_t	adstrategy;
   62: static d_dump_t		addump;
   63: 
   64: static struct cdevsw ad_cdevsw = {
   65: 	/* name */	"ad",
   66: 	/* maj */	116,
   67: 	/* flags */	D_DISK,
   68: 	/* port */      NULL,
   69: 	/* autoq */	0,
   70: 
   71: 	/* open */	adopen,
   72: 	/* close */	adclose,
   73: 	/* read */	physread,
   74: 	/* write */	physwrite,
   75: 	/* ioctl */	noioctl,
   76: 	/* poll */	nopoll,
   77: 	/* mmap */	nommap,
   78: 	/* strategy */	adstrategy,
   79: 	/* dump */	addump,
   80: 	/* psize */	nopsize
   81: };
   82: 
   83: /* prototypes */
   84: static void ad_invalidatequeue(struct ad_softc *, struct ad_request *);
   85: static int ad_tagsupported(struct ad_softc *);
   86: static void ad_timeout(struct ad_request *);
   87: static void ad_free(struct ad_request *);
   88: static int ad_version(u_int16_t);
   89: 
   90: /* misc defines */
   91: #define AD_MAX_RETRIES	3
   92: 
   93: /* internal vars */
   94: static u_int32_t adp_lun_map = 0;
   95: static int ata_dma = 1;
   96: static int ata_wc = 1;
   97: static int ata_tags = 0; 
   98: TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
   99: TUNABLE_INT("hw.ata.wc", &ata_wc);
  100: TUNABLE_INT("hw.ata.tags", &ata_tags);
  101: static MALLOC_DEFINE(M_AD, "AD driver", "ATA disk driver");
  102: 
  103: /* sysctl vars */
  104: SYSCTL_DECL(_hw_ata);
  105: SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RD, &ata_dma, 0,
  106: 	   "ATA disk DMA mode control");
  107: SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RD, &ata_wc, 0,
  108: 	   "ATA disk write caching");
  109: SYSCTL_INT(_hw_ata, OID_AUTO, tags, CTLFLAG_RD, &ata_tags, 0,
  110: 	   "ATA disk tagged queuing support");
  111: 
  112: void
  113: ad_attach(struct ata_device *atadev, int alreadylocked)
  114: {
  115:     struct ad_softc *adp;
  116:     dev_t dev;
  117: 
  118:     if (!(adp = malloc(sizeof(struct ad_softc), M_AD, M_WAITOK | M_ZERO))) {
  119: 	ata_prtdev(atadev, "failed to allocate driver storage\n");
  120: 	return;
  121:     }
  122: 
  123:     KKASSERT(atadev->channel->req_mpipe.max_count != 0);
  124: 
  125:     adp->device = atadev;
  126: #ifdef ATA_STATIC_ID
  127:     adp->lun = (device_get_unit(atadev->channel->dev)<<1)+ATA_DEV(atadev->unit);
  128: #else
  129:     adp->lun = ata_get_lun(&adp_lun_map);
  130: #endif
  131:     ata_set_name(atadev, "ad", adp->lun);
  132:     adp->heads = atadev->param->heads;
  133:     adp->sectors = atadev->param->sectors;
  134:     adp->total_secs = atadev->param->cylinders * adp->heads * adp->sectors;	
  135:     bufq_init(&adp->queue);
  136: 
  137:     /* does this device need oldstyle CHS addressing */
  138:     if (!ad_version(atadev->param->version_major) || 
  139: 	!(atadev->param->atavalid & ATA_FLAG_54_58) || !atadev->param->lba_size)
  140: 	adp->flags |= AD_F_CHS_USED;
  141: 
  142:     /* use the 28bit LBA size if valid */
  143:     if (atadev->param->cylinders == 16383 &&
  144: 	adp->total_secs < atadev->param->lba_size)
  145: 	adp->total_secs = atadev->param->lba_size;
  146: 
  147:     /* use the 48bit LBA size if valid */
  148:     if (atadev->param->support.address48 &&
  149: 	atadev->param->lba_size48 > 268435455)
  150: 	adp->total_secs = atadev->param->lba_size48;
  151:     
  152:     if (!alreadylocked)
  153: 	ATA_SLEEPLOCK_CH(atadev->channel, ATA_CONTROL);
  154:     /* use multiple sectors/interrupt if device supports it */
  155:     adp->transfersize = DEV_BSIZE;
  156:     if (ad_version(atadev->param->version_major)) {
  157: 	int secsperint = max(1, min(atadev->param->sectors_intr, 16));
  158: 
  159: 	if (!ata_command(atadev, ATA_C_SET_MULTI, 0, secsperint,
  160: 			 0, ATA_WAIT_INTR) && !ata_wait(atadev, 0))
  161: 	adp->transfersize *= secsperint;
  162:     }
  163: 
  164:     /* enable read caching if not default on device */
  165:     if (ata_command(atadev, ATA_C_SETFEATURES,
  166: 		    0, 0, ATA_C_F_ENAB_RCACHE, ATA_WAIT_INTR))
  167: 	ata_prtdev(atadev, "enabling readahead cache failed\n");
  168: 
  169:     /* enable write caching if allowed and not default on device */
  170:     if (ata_wc || (ata_tags && ad_tagsupported(adp))) {
  171: 	if (ata_command(atadev, ATA_C_SETFEATURES,
  172: 			0, 0, ATA_C_F_ENAB_WCACHE, ATA_WAIT_INTR))
  173: 	    ata_prtdev(atadev, "enabling write cache failed\n");
  174:     }
  175:     else {
  176: 	if (ata_command(atadev, ATA_C_SETFEATURES,
  177: 			0, 0, ATA_C_F_DIS_WCACHE, ATA_WAIT_INTR))
  178: 	    ata_prtdev(atadev, "disabling write cache failed\n");
  179:     }
  180: 
  181:     /* use DMA if allowed and if drive/controller supports it */
  182:     if (ata_dma)
  183: 	ata_dmainit(atadev, ata_pmode(atadev->param), 
  184: 		    ata_wmode(atadev->param), ata_umode(atadev->param));
  185:     else
  186: 	ata_dmainit(atadev, ata_pmode(atadev->param), -1, -1);
  187: 
  188:     /* use tagged queueing if allowed and supported */
  189:     if (ata_tags && ad_tagsupported(adp)) {
  190: 	adp->num_tags = atadev->param->queuelen;
  191: 	adp->flags |= AD_F_TAG_ENABLED;
  192: 	adp->device->channel->flags |= ATA_QUEUED;
  193: 	if (ata_command(atadev, ATA_C_SETFEATURES,
  194: 			0, 0, ATA_C_F_DIS_RELIRQ, ATA_WAIT_INTR))
  195: 	    ata_prtdev(atadev, "disabling release interrupt failed\n");
  196: 	if (ata_command(atadev, ATA_C_SETFEATURES,
  197: 			0, 0, ATA_C_F_DIS_SRVIRQ, ATA_WAIT_INTR))
  198: 	    ata_prtdev(atadev, "disabling service interrupt failed\n");
  199:     }
  200: 
  201:     ATA_UNLOCK_CH(atadev->channel);
  202: 
  203:     devstat_add_entry(&adp->stats, "ad", adp->lun, DEV_BSIZE,
  204: 		      DEVSTAT_NO_ORDERED_TAGS,
  205: 		      DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_IDE,
  206: 		      DEVSTAT_PRIORITY_DISK);
  207: 
  208:     dev = disk_create(adp->lun, &adp->disk, 0, &ad_cdevsw);
  209:     dev->si_drv1 = adp;
  210:     dev->si_iosize_max = 256 * DEV_BSIZE;
  211:     adp->dev = dev;
  212: 
  213:     /* construct the disklabel */
  214:     bzero(&adp->disk.d_label, sizeof(struct disklabel));
  215:     adp->disk.d_label.d_secsize = DEV_BSIZE;
  216:     adp->disk.d_label.d_nsectors = adp->sectors;
  217:     adp->disk.d_label.d_ntracks = adp->heads;
  218:     adp->disk.d_label.d_ncylinders = adp->total_secs/(adp->heads*adp->sectors);
  219:     adp->disk.d_label.d_secpercyl = adp->sectors * adp->heads;
  220:     adp->disk.d_label.d_secperunit = adp->total_secs;
  221: 
  222:     atadev->driver = adp;
  223:     atadev->flags = 0;
  224: 
  225:     /* if this disk belongs to an ATA RAID dont print the probe */
  226:     if (ata_raiddisk_attach(adp))
  227: 	adp->flags |= AD_F_RAID_SUBDISK;
  228:     else {
  229: 	if (atadev->driver) {
  230: 	    ad_print(adp);
  231: 	    ata_enclosure_print(atadev);
  232: 	}
  233:     }
  234: }
  235: 
  236: void
  237: ad_detach(struct ata_device *atadev, int flush) /* get rid of flush XXX SOS */
  238: {
  239:     struct ad_softc *adp = atadev->driver;
  240:     struct ad_request *request;
  241:     struct buf *bp;
  242: 
  243:     atadev->flags |= ATA_D_DETACHING;
  244:     ata_prtdev(atadev, "removed from configuration\n");
  245:     ad_invalidatequeue(adp, NULL);
  246:     TAILQ_FOREACH(request, &atadev->channel->ata_queue, chain) {
  247: 	if (request->softc != adp)
  248: 	    continue;
  249: 	TAILQ_REMOVE(&atadev->channel->ata_queue, request, chain);
  250: 	request->bp->b_error = ENXIO;
  251: 	request->bp->b_flags |= B_ERROR;
  252: 	biodone(request->bp);
  253: 	ad_free(request);
  254:     }
  255:     while ((bp = bufq_first(&adp->queue))) {
  256: 	bufq_remove(&adp->queue, bp); 
  257: 	bp->b_error = ENXIO;
  258: 	bp->b_flags |= B_ERROR;
  259: 	biodone(bp);
  260:     }
  261:     disk_invalidate(&adp->disk);
  262:     devstat_remove_entry(&adp->stats);
  263:     disk_destroy(&adp->disk);
  264:     if (flush) {
  265: 	if (ata_command(atadev, ATA_C_FLUSHCACHE, 0, 0, 0, ATA_WAIT_READY))
  266: 	    ata_prtdev(atadev, "flushing cache on detach failed\n");
  267:     }
  268:     if (adp->flags & AD_F_RAID_SUBDISK)
  269: 	ata_raiddisk_detach(adp);
  270:     ata_free_name(atadev);
  271:     ata_free_lun(&adp_lun_map, adp->lun);
  272:     atadev->driver = NULL;
  273:     atadev->flags = 0;
  274:     free(adp, M_AD);
  275: }
  276: 
  277: static int
  278: adopen(dev_t dev, int flags, int fmt, struct thread *td)
  279: {
  280:     struct ad_softc *adp = dev->si_drv1;
  281: 
  282:     if (adp->flags & AD_F_RAID_SUBDISK)
  283: 	return EBUSY;
  284:     return 0;
  285: }
  286: 
  287: static int
  288: adclose(dev_t dev, int flags, int fmt, struct thread *td)
  289: {
  290:     struct ad_softc *adp = dev->si_drv1;
  291: 
  292:     ATA_SLEEPLOCK_CH(adp->device->channel, ATA_CONTROL);
  293:     if (ata_command(adp->device, ATA_C_FLUSHCACHE, 0, 0, 0, ATA_WAIT_READY))
  294: 	ata_prtdev(adp->device, "flushing cache on close failed\n");
  295:     ATA_UNLOCK_CH(adp->device->channel);
  296:     return 0;
  297: }
  298: 
  299: static void 
  300: adstrategy(struct buf *bp)
  301: {
  302:     struct ad_softc *adp = bp->b_dev->si_drv1;
  303:     int s;
  304: 
  305:     if (adp->device->flags & ATA_D_DETACHING) {
  306: 	bp->b_error = ENXIO;
  307: 	bp->b_flags |= B_ERROR;
  308: 	biodone(bp);
  309: 	return;
  310:     }
  311:     s = splbio();
  312:     bufqdisksort(&adp->queue, bp);
  313:     splx(s);
  314:     ata_start(adp->device->channel);
  315: }
  316: 
  317: int
  318: addump(dev_t dev)
  319: {
  320:     struct ad_softc *adp = dev->si_drv1;
  321:     struct ad_request request;
  322:     u_int count, blkno, secsize;
  323:     vm_offset_t addr = 0;
  324:     long blkcnt;
  325:     int dumppages = MAXDUMPPGS;
  326:     int error;
  327:     int i;
  328: 
  329:     if ((error = disk_dumpcheck(dev, &count, &blkno, &secsize)))
  330: 	return error;
  331: 	
  332:     if (!adp)
  333: 	return ENXIO;
  334: 
  335:     /* force PIO mode for dumps */
  336:     adp->device->mode = ATA_PIO;
  337:     ata_reinit(adp->device->channel);
  338: 
  339:     blkcnt = howmany(PAGE_SIZE, secsize);
  340: 
  341:     while (count > 0) {
  342: 	caddr_t va = NULL;
  343: 	DELAY(1000);
  344: 
  345: 	if ((count / blkcnt) < dumppages)
  346: 	    dumppages = count / blkcnt;
  347: 
  348: 	for (i = 0; i < dumppages; ++i) {
  349: 	    vm_offset_t a = addr + (i * PAGE_SIZE);
  350: 	    if (is_physical_memory(a))
  351: 		va = pmap_kenter_temporary(trunc_page(a), i);
  352: 	    else
  353: 		va = pmap_kenter_temporary(trunc_page(0), i);
  354: 	}
  355: 
  356: 	bzero(&request, sizeof(struct ad_request));
  357: 	request.softc = adp;
  358: 	request.blockaddr = blkno;
  359: 	request.bytecount = PAGE_SIZE * dumppages;
  360: 	request.data = va;
  361: 
  362: 	while (request.bytecount > 0) {
  363: 	    ad_transfer(&request);
  364: 	    if (request.flags & ADR_F_ERROR)
  365: 		return EIO;
  366: 	    request.donecount += request.currentsize;
  367: 	    request.bytecount -= request.currentsize;
  368: 	    DELAY(20);
  369: 	}
  370: 
  371: 	if (dumpstatus(addr, (off_t)count * DEV_BSIZE) < 0)
  372: 	    return EINTR;
  373: 
  374: 	blkno += blkcnt * dumppages;
  375: 	count -= blkcnt * dumppages;
  376: 	addr += PAGE_SIZE * dumppages;
  377:     }
  378: 
  379:     if (ata_wait(adp->device, ATA_S_READY | ATA_S_DSC) < 0)
  380: 	ata_prtdev(adp->device, "timeout waiting for final ready\n");
  381:     return 0;
  382: }
  383: 
  384: void
  385: ad_start(struct ata_device *atadev)
  386: {
  387:     struct ad_softc *adp = atadev->driver;
  388:     struct buf *bp = bufq_first(&adp->queue);
  389:     struct ad_request *request;
  390:     int tag = 0;
  391: 
  392:     if (!bp)
  393: 	return;
  394: 
  395:     /* if tagged queueing enabled get next free tag */
  396:     if (adp->flags & AD_F_TAG_ENABLED) {
  397: 	while (tag <= adp->num_tags && adp->tags[tag])
  398: 	    tag++;
  399: 	if (tag > adp->num_tags )
  400: 	    return;
  401:     }
  402: 
  403:     /*
  404:      * Allocate a request.  The allocation can only fail if the pipeline
  405:      * is full, in which case the request will be picked up later when
  406:      * ad_start() is called after another request completes.
  407:      */
  408:     request = mpipe_alloc(&atadev->channel->req_mpipe, M_NOWAIT|M_ZERO);
  409:     if (request == NULL) {
  410: 	ata_prtdev(atadev, "pipeline full allocating request in ad_start\n");
  411: 	return;
  412:     }
  413: 
  414:     /* setup request */
  415:     request->softc = adp;
  416:     request->bp = bp;
  417:     request->blockaddr = bp->b_pblkno;
  418:     request->bytecount = bp->b_bcount;
  419:     request->data = bp->b_data;
  420:     request->tag = tag;
  421:     if (bp->b_flags & B_READ) 
  422: 	request->flags |= ADR_F_READ;
  423:     if (adp->device->mode >= ATA_DMA) {
  424: 	request->dmatab = ata_dmaalloc(atadev->channel, atadev->unit, M_NOWAIT);
  425: 	if (request->dmatab == NULL) {
  426: 	    mpipe_free(&atadev->channel->req_mpipe, request);
  427: 	    ata_prtdev(atadev, "pipeline full allocated dmabuf in ad_start\n");
  428: 	    /* do not revert to PIO, wait for ad_start after I/O completion */
  429: 	    return;
  430: 	}
  431:     }
  432: 
  433:     /* insert in tag array */
  434:     adp->tags[tag] = request;
  435: 
  436:     /* remove from drive queue */
  437:     bufq_remove(&adp->queue, bp); 
  438: 
  439:     /* link onto controller queue */
  440:     TAILQ_INSERT_TAIL(&atadev->channel->ata_queue, request, chain);
  441: }
  442: 
  443: int
  444: ad_transfer(struct ad_request *request)
  445: {
  446:     struct ad_softc *adp;
  447:     u_int64_t lba;
  448:     u_int32_t count, max_count;
  449:     u_int8_t cmd;
  450:     int flags = ATA_IMMEDIATE;
  451: 
  452:     /* get request params */
  453:     adp = request->softc;
  454: 
  455:     /* calculate transfer details */
  456:     lba = request->blockaddr + (request->donecount / DEV_BSIZE);
  457:    
  458:     if (request->donecount == 0) {
  459: 
  460: 	/* start timeout for this transfer */
  461: 	if (dumping)
  462: 	    request->timeout_handle.callout = NULL;
  463: 	else
  464: 	    request->timeout_handle = 
  465: 		timeout((timeout_t*)ad_timeout, request, 10 * hz);
  466: 
  467: 	/* setup transfer parameters */
  468: 	count = howmany(request->bytecount, DEV_BSIZE);
  469: 	max_count = adp->device->param->support.address48 ? 65536 : 256;
  470: 	if (count > max_count) {
  471: 	    ata_prtdev(adp->device,
  472: 		       "count %d size transfers not supported\n", count);
  473: 	    count = max_count;
  474: 	}
  475: 
  476: 	if (adp->flags & AD_F_CHS_USED) {
  477: 	    int sector = (lba % adp->sectors) + 1;
  478: 	    int cylinder = lba / (adp->sectors * adp->heads);
  479: 	    int head = (lba % (adp->sectors * adp->heads)) / adp->sectors;
  480: 
  481: 	    lba = (sector&0xff) | ((cylinder&0xffff)<<8) | ((head&0xf)<<24);
  482: 	    adp->device->flags |= ATA_D_USE_CHS;
  483: 	}
  484: 
  485: 	/* setup first transfer length */
  486: 	request->currentsize = min(request->bytecount, adp->transfersize);
  487: 
  488: 	devstat_start_transaction(&adp->stats);
  489: 
  490: 	/* does this drive & transfer work with DMA ? */
  491: 	request->flags &= ~ADR_F_DMA_USED;
  492: 	if (adp->device->mode >= ATA_DMA &&
  493: 	    !ata_dmasetup(adp->device->channel, adp->device->unit,
  494: 			  request->dmatab, request->data, request->bytecount)) {
  495: 	    request->flags |= ADR_F_DMA_USED;
  496: 	    request->currentsize = request->bytecount;
  497: 
  498: 	    /* do we have tags enabled ? */
  499: 	    if (adp->flags & AD_F_TAG_ENABLED) {
  500: 		cmd = (request->flags & ADR_F_READ) ?
  501: 		    ATA_C_READ_DMA_QUEUED : ATA_C_WRITE_DMA_QUEUED;
  502: 
  503: 		if (ata_command(adp->device, cmd, lba,
  504: 				request->tag << 3, count, flags)) {
  505: 		    ata_prtdev(adp->device, "error executing command");
  506: 		    goto transfer_failed;
  507: 		}
  508: 		if (ata_wait(adp->device, ATA_S_READY)) {
  509: 		    ata_prtdev(adp->device, "timeout waiting for READY\n");
  510: 		    goto transfer_failed;
  511: 		}
  512: 		adp->outstanding++;
  513: 
  514: 		/* if ATA bus RELEASE check for SERVICE */
  515: 		if (adp->flags & AD_F_TAG_ENABLED &&
  516: 		    ATA_INB(adp->device->channel->r_io, ATA_IREASON) &
  517: 		    ATA_I_RELEASE)
  518: 		    return ad_service(adp, 1);
  519: 	    }
  520: 	    else {
  521: 		cmd = (request->flags & ADR_F_READ) ?
  522: 		    ATA_C_READ_DMA : ATA_C_WRITE_DMA;
  523: 
  524: 		if (ata_command(adp->device, cmd, lba, count, 0, flags)) {
  525: 		    ata_prtdev(adp->device, "error executing command");
  526: 		    goto transfer_failed;
  527: 		}
  528: #if 0
  529: 		/*
  530: 		 * wait for data transfer phase
  531: 		 *
  532: 		 * well this should be here acording to specs, but older
  533: 		 * promise controllers doesn't like it, they lockup!
  534: 		 */
  535: 		if (ata_wait(adp->device, ATA_S_READY | ATA_S_DRQ)) {
  536: 		    ata_prtdev(adp->device, "timeout waiting for data phase\n");
  537: 		    goto transfer_failed;
  538: 		}
  539: #endif
  540: 	    }
  541: 
  542: 	    /* start transfer, return and wait for interrupt */
  543: 	    ata_dmastart(adp->device->channel, adp->device->unit,
  544: 			 request->dmatab, request->flags & ADR_F_READ);
  545: 	    return ATA_OP_CONTINUES;
  546: 	}
  547: 
  548: 	/* does this drive support multi sector transfers ? */
  549: 	if (request->currentsize > DEV_BSIZE)
  550: 	    cmd = request->flags&ADR_F_READ ? ATA_C_READ_MUL : ATA_C_WRITE_MUL;
  551: 
  552: 	/* just plain old single sector transfer */
  553: 	else
  554: 	    cmd = request->flags&ADR_F_READ ? ATA_C_READ : ATA_C_WRITE;
  555: 
  556: 	if (ata_command(adp->device, cmd, lba, count, 0, flags)){
  557: 	    ata_prtdev(adp->device, "error executing command");
  558: 	    goto transfer_failed;
  559: 	}
  560:     }
  561:    
  562:     /* calculate this transfer length */
  563:     request->currentsize = min(request->bytecount, adp->transfersize);
  564: 
  565:     /* if this is a PIO read operation, return and wait for interrupt */
  566:     if (request->flags & ADR_F_READ)
  567: 	return ATA_OP_CONTINUES;
  568: 
  569:     /* ready to write PIO data ? */
  570:     if (ata_wait(adp->device, (ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) < 0) {
  571: 	ata_prtdev(adp->device, "timeout waiting for DRQ");
  572: 	goto transfer_failed;
  573:     }
  574: 
  575:     /* output the data */
  576:     if (adp->device->channel->flags & ATA_USE_16BIT)
  577: 	ATA_OUTSW(adp->device->channel->r_io, ATA_DATA,
  578: 		  (void *)((uintptr_t)request->data + request->donecount),
  579: 		  request->currentsize / sizeof(int16_t));
  580:     else
  581: 	ATA_OUTSL(adp->device->channel->r_io, ATA_DATA,
  582: 		  (void *)((uintptr_t)request->data + request->donecount),
  583: 		  request->currentsize / sizeof(int32_t));
  584:     return ATA_OP_CONTINUES;
  585: 
  586: transfer_failed:
  587:     untimeout((timeout_t *)ad_timeout, request, request->timeout_handle);
  588:     ad_invalidatequeue(adp, request);
  589:     printf(" - resetting\n");
  590: 
  591:     /* if retries still permit, reinject this request */
  592:     if (request->retries++ < AD_MAX_RETRIES)
  593: 	TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, request, chain);
  594:     else {
  595: 	/* retries all used up, return error */
  596: 	request->bp->b_error = EIO;
  597: 	request->bp->b_flags |= B_ERROR;
  598: 	request->bp->b_resid = request->bytecount;
  599: 	devstat_end_transaction_buf(&adp->stats, request->bp);
  600: 	biodone(request->bp);
  601: 	ad_free(request);
  602:     }
  603:     ata_reinit(adp->device->channel);
  604:     return ATA_OP_CONTINUES;
  605: }
  606: 
  607: int
  608: ad_interrupt(struct ad_request *request)
  609: {
  610:     struct ad_softc *adp = request->softc;
  611:     int dma_stat = 0;
  612: 
  613:     /* finish DMA transfer */
  614:     if (request->flags & ADR_F_DMA_USED)
  615: 	dma_stat = ata_dmadone(adp->device);
  616: 
  617:     /* do we have a corrected soft error ? */
  618:     if (adp->device->channel->status & ATA_S_CORR)
  619: 	diskerr(request->bp, "soft error (ECC corrected)", LOG_PRINTF,
  620: 		request->blockaddr + (request->donecount / DEV_BSIZE),
  621: 		&adp->disk.d_label);
  622: 
  623:     /* did any real errors happen ? */
  624:     if ((adp->device->channel->status & ATA_S_ERROR) ||
  625: 	(request->flags & ADR_F_DMA_USED && dma_stat & ATA_BMSTAT_ERROR)) {
  626: 	adp->device->channel->error =
  627: 	    ATA_INB(adp->device->channel->r_io, ATA_ERROR);
  628: 	diskerr(request->bp, (adp->device->channel->error & ATA_E_ICRC) ?
  629: 		"UDMA ICRC error" : "hard error", LOG_PRINTF,
  630: 		request->blockaddr + (request->donecount / DEV_BSIZE),
  631: 		&adp->disk.d_label);
  632: 
  633: 	/* if this is a UDMA CRC error, reinject request */
  634: 	if (request->flags & ADR_F_DMA_USED &&
  635: 	    adp->device->channel->error & ATA_E_ICRC) {
  636: 	    untimeout((timeout_t *)ad_timeout, request,request->timeout_handle);
  637: 	    ad_invalidatequeue(adp, request);
  638: 
  639: 	    if (request->retries++ < AD_MAX_RETRIES)
  640: 		printf(" retrying\n");
  641: 	    else {
  642: 		ata_dmainit(adp->device, ata_pmode(adp->device->param), -1, -1);
  643: 		printf(" falling back to PIO mode\n");
  644: 	    }
  645: 	    TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, request, chain);
  646: 	    return ATA_OP_FINISHED;
  647: 	}
  648: 
  649: 	/* if using DMA, try once again in PIO mode */
  650: 	if (request->flags & ADR_F_DMA_USED) {
  651: 	    untimeout((timeout_t *)ad_timeout, request,request->timeout_handle);
  652: 	    ad_invalidatequeue(adp, request);
  653: 	    ata_dmainit(adp->device, ata_pmode(adp->device->param), -1, -1);
  654: 	    request->flags |= ADR_F_FORCE_PIO;
  655: 	    printf(" trying PIO mode\n");
  656: 	    TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, request, chain);
  657: 	    return ATA_OP_FINISHED;
  658: 	}
  659: 
  660: 	request->flags |= ADR_F_ERROR;
  661: 	printf(" status=%02x error=%02x\n", 
  662: 	       adp->device->channel->status, adp->device->channel->error);
  663:     }
  664: 
  665:     /* if we arrived here with forced PIO mode, DMA doesn't work right */
  666:     if (request->flags & ADR_F_FORCE_PIO && !(request->flags & ADR_F_ERROR))
  667: 	ata_prtdev(adp->device, "DMA problem fallback to PIO mode\n");
  668: 
  669:     /* if this was a PIO read operation, get the data */
  670:     if (!(request->flags & ADR_F_DMA_USED) &&
  671: 	(request->flags & (ADR_F_READ | ADR_F_ERROR)) == ADR_F_READ) {
  672: 
  673: 	/* ready to receive data? */
  674: 	if ((adp->device->channel->status & ATA_S_READY) == 0) {
  675: 	    ata_prtdev(adp->device, "read interrupt arrived early %08x\n",
  676: 		(int)adp->device->channel->status);
  677: 	}
  678: 
  679: 	if (ata_wait(adp->device, (ATA_S_READY | ATA_S_DSC | ATA_S_DRQ)) != 0) {
  680: 	    ata_prtdev(adp->device, "read error detected (too) late");
  681: 	    request->flags |= ADR_F_ERROR;
  682: 	}
  683: 	else {
  684: 	    /* data ready, read in */
  685: 	    if (adp->device->channel->flags & ATA_USE_16BIT)
  686: 		ATA_INSW(adp->device->channel->r_io, ATA_DATA,
  687: 			 (void*)((uintptr_t)request->data + request->donecount),
  688: 			 request->currentsize / sizeof(int16_t));
  689: 	    else
  690: 		ATA_INSL(adp->device->channel->r_io, ATA_DATA,
  691: 			 (void*)((uintptr_t)request->data + request->donecount),
  692: 			 request->currentsize / sizeof(int32_t));
  693: 	}
  694:     }
  695: 
  696:     /* finish up transfer */
  697:     if (request->flags & ADR_F_ERROR) {
  698: 	request->bp->b_error = EIO;
  699: 	request->bp->b_flags |= B_ERROR;
  700:     } 
  701:     else {
  702: 	request->bytecount -= request->currentsize;
  703: 	request->donecount += request->currentsize;
  704: 	if (request->bytecount > 0) {
  705: 	    ad_transfer(request);
  706: 	    return ATA_OP_CONTINUES;
  707: 	}
  708:     }
  709: 
  710:     /* disarm timeout for this transfer */
  711:     untimeout((timeout_t *)ad_timeout, request, request->timeout_handle);
  712: 
  713:     request->bp->b_resid = request->bytecount;
  714: 
  715:     devstat_end_transaction_buf(&adp->stats, request->bp);
  716:     biodone(request->bp);
  717:     ad_free(request);
  718:     adp->outstanding--;
  719: 
  720:     /* check for SERVICE (tagged operations only) */
  721:     return ad_service(adp, 1);
  722: }
  723: 
  724: int
  725: ad_service(struct ad_softc *adp, int change)
  726: {
  727:     /* do we have to check the other device on this channel ? */
  728:     if (adp->device->channel->flags & ATA_QUEUED && change) {
  729: 	int device = adp->device->unit;
  730: 
  731: 	if (adp->device->unit == ATA_MASTER) {
  732: 	    if ((adp->device->channel->devices & ATA_ATA_SLAVE) &&
  733: 		(adp->device->channel->device[SLAVE].driver) &&
  734: 		((struct ad_softc *) (adp->device->channel->
  735: 		 device[SLAVE].driver))->flags & AD_F_TAG_ENABLED)
  736: 		device = ATA_SLAVE;
  737: 	}
  738: 	else {
  739: 	    if ((adp->device->channel->devices & ATA_ATA_MASTER) &&
  740: 		(adp->device->channel->device[MASTER].driver) &&
  741: 		((struct ad_softc *) (adp->device->channel->
  742: 		 device[MASTER].driver))->flags & AD_F_TAG_ENABLED)
  743: 		device = ATA_MASTER;
  744: 	}
  745: 	if (device != adp->device->unit &&
  746: 	    ((struct ad_softc *)
  747: 	     (adp->device->channel->
  748: 	      device[ATA_DEV(device)].driver))->outstanding > 0) {
  749: 	    ATA_OUTB(adp->device->channel->r_io, ATA_DRIVE, ATA_D_IBM | device);
  750: 	    adp = adp->device->channel->device[ATA_DEV(device)].driver;
  751: 	    DELAY(1);
  752: 	}
  753:     }
  754:     adp->device->channel->status =
  755: 	ATA_INB(adp->device->channel->r_altio, ATA_ALTSTAT);
  756:  
  757:     /* do we have a SERVICE request from the drive ? */
  758:     if (adp->flags & AD_F_TAG_ENABLED &&
  759: 	adp->outstanding > 0 &&
  760: 	adp->device->channel->status & ATA_S_SERVICE) {
  761: 	struct ad_request *request;
  762: 	int tag;
  763: 
  764: 	/* check for error */
  765: 	if (adp->device->channel->status & ATA_S_ERROR) {
  766: 	    ata_prtdev(adp->device, "Oops! controller says s=0x%02x e=0x%02x\n",
  767: 		       adp->device->channel->status,
  768: 		       adp->device->channel->error);
  769: 	    ad_invalidatequeue(adp, NULL);
  770: 	    return ATA_OP_FINISHED;
  771: 	}
  772: 
  773: 	/* issue SERVICE cmd */
  774: 	if (ata_command(adp->device, ATA_C_SERVICE, 0, 0, 0, ATA_IMMEDIATE)) {
  775: 	    ata_prtdev(adp->device, "problem executing SERVICE cmd\n");
  776: 	    ad_invalidatequeue(adp, NULL);
  777: 	    return ATA_OP_FINISHED;
  778: 	}
  779: 
  780: 	/* setup the transfer environment when ready */
  781: 	if (ata_wait(adp->device, ATA_S_READY)) {
  782: 	    ata_prtdev(adp->device, "SERVICE timeout tag=%d s=%02x e=%02x\n",
  783: 		       ATA_INB(adp->device->channel->r_io, ATA_COUNT) >> 3,
  784: 		       adp->device->channel->status,
  785: 		       adp->device->channel->error);
  786: 	    ad_invalidatequeue(adp, NULL);
  787: 	    return ATA_OP_FINISHED;
  788: 	}
  789: 	tag = ATA_INB(adp->device->channel->r_io, ATA_COUNT) >> 3;
  790: 	if (!(request = adp->tags[tag])) {
  791: 	    ata_prtdev(adp->device, "no request for tag=%d\n", tag);	
  792: 	    ad_invalidatequeue(adp, NULL);
  793: 	    return ATA_OP_FINISHED;
  794: 	}
  795: 	ATA_FORCELOCK_CH(adp->device->channel, ATA_ACTIVE_ATA);
  796: 	adp->device->channel->running = request;
  797: 	request->serv++;
  798: 
  799: 	/* start DMA transfer when ready */
  800: 	if (ata_wait(adp->device, ATA_S_READY | ATA_S_DRQ)) {
  801: 	    ata_prtdev(adp->device, "timeout starting DMA s=%02x e=%02x\n",
  802: 		       adp->device->channel->status,
  803: 		       adp->device->channel->error);
  804: 	    ad_invalidatequeue(adp, NULL);
  805: 	    return ATA_OP_FINISHED;
  806: 	}
  807: 	ata_dmastart(adp->device->channel, adp->device->unit,
  808: 		     request->dmatab, request->flags & ADR_F_READ);
  809: 	return ATA_OP_CONTINUES;
  810:     }
  811:     return ATA_OP_FINISHED;
  812: }
  813: 
  814: static void
  815: ad_free(struct ad_request *request)
  816: {
  817:     int s = splbio();
  818: 
  819:     if (request->dmatab)
  820: 	ata_dmafree(request->softc->device->channel, request->dmatab);
  821:     request->softc->tags[request->tag] = NULL;
  822:     mpipe_free(&request->softc->device->channel->req_mpipe, request);
  823:     splx(s);
  824: }
  825: 
  826: static void
  827: ad_invalidatequeue(struct ad_softc *adp, struct ad_request *request)
  828: {
  829:     /* if tags used invalidate all other tagged transfers */
  830:     if (adp->flags & AD_F_TAG_ENABLED) {
  831: 	struct ad_request *tmpreq;
  832: 	int tag;
  833: 
  834: 	ata_prtdev(adp->device, "invalidating queued requests\n");
  835: 	for (tag = 0; tag <= adp->num_tags; tag++) {
  836: 	    tmpreq = adp->tags[tag];
  837: 	    adp->tags[tag] = NULL;
  838: 	    if (tmpreq == request || tmpreq == NULL)
  839: 		continue;
  840: 	    untimeout((timeout_t *)ad_timeout, tmpreq, tmpreq->timeout_handle);
  841: 	    TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, tmpreq, chain);
  842: 	}
  843: 	if (ata_command(adp->device, ATA_C_NOP,
  844: 			0, 0, ATA_C_F_FLUSHQUEUE, ATA_WAIT_READY))
  845: 	    ata_prtdev(adp->device, "flush queue failed\n");
  846: 	adp->outstanding = 0;
  847:     }
  848: }
  849: 
  850: static int
  851: ad_tagsupported(struct ad_softc *adp)
  852: {
  853:     const char *good[] = {"IBM-DPTA", "IBM-DTLA", NULL};
  854:     int i = 0;
  855: 
  856:     switch (adp->device->channel->chiptype) {
  857:     case 0x4d33105a: /* Promises before TX2 doesn't work with tagged queuing */
  858:     case 0x4d38105a:
  859:     case 0x0d30105a:
  860:     case 0x4d30105a:  
  861: 	return 0;
  862:     }
  863: 
  864:     /* check that drive does DMA, has tags enabled, and is one we know works */
  865:     if (adp->device->mode >= ATA_DMA && adp->device->param->support.queued && 
  866: 	adp->device->param->enabled.queued) {
  867: 	while (good[i] != NULL) {
  868: 	    if (!strncmp(adp->device->param->model, good[i], strlen(good[i])))
  869: 		return 1;
  870: 	    i++;
  871: 	}
  872: 	/* 
  873: 	 * check IBM's new obscure way of naming drives 
  874: 	 * we want "IC" (IBM CORP) and "AT" or "AV" (ATA interface)
  875: 	 * but doesn't care about the other info (size, capacity etc)
  876: 	 */
  877: 	if (!strncmp(adp->device->param->model, "IC", 2) &&
  878: 	    (!strncmp(adp->device->param->model + 8, "AT", 2) ||
  879: 	     !strncmp(adp->device->param->model + 8, "AV", 2)))
  880: 		return 1;
  881:     }
  882:     return 0;
  883: }
  884: 
  885: static void
  886: ad_timeout(struct ad_request *request)
  887: {
  888:     struct ad_softc *adp = request->softc;
  889: 
  890:     adp->device->channel->running = NULL;
  891:     ata_prtdev(adp->device, "%s command timeout tag=%d serv=%d - resetting\n",
  892: 	       (request->flags & ADR_F_READ) ? "READ" : "WRITE",
  893: 	       request->tag, request->serv);
  894: 
  895:     if (request->flags & ADR_F_DMA_USED) {
  896: 	ata_dmadone(adp->device);
  897: 	ad_invalidatequeue(adp, request);
  898: 	if (request->retries == AD_MAX_RETRIES) {
  899: 	    ata_dmainit(adp->device, ata_pmode(adp->device->param), -1, -1);
  900: 	    ata_prtdev(adp->device, "trying fallback to PIO mode\n");
  901: 	    request->retries = 0;
  902: 	}
  903:     }
  904: 
  905:     /* if retries still permit, reinject this request */
  906:     if (request->retries++ < AD_MAX_RETRIES) {
  907: 	TAILQ_INSERT_HEAD(&adp->device->channel->ata_queue, request, chain);
  908:     }
  909:     else {
  910: 	/* retries all used up, return error */
  911: 	request->bp->b_error = EIO;
  912: 	request->bp->b_flags |= B_ERROR;
  913: 	devstat_end_transaction_buf(&adp->stats, request->bp);
  914: 	biodone(request->bp);
  915: 	ad_free(request);
  916:     }
  917:     ata_reinit(adp->device->channel);
  918: }
  919: 
  920: void
  921: ad_reinit(struct ata_device *atadev)
  922: {
  923:     struct ad_softc *adp = atadev->driver;
  924: 
  925:     /* reinit disk parameters */
  926:     ad_invalidatequeue(atadev->driver, NULL);
  927:     ata_command(atadev, ATA_C_SET_MULTI, 0,
  928: 		adp->transfersize / DEV_BSIZE, 0, ATA_WAIT_READY);
  929:     if (adp->device->mode >= ATA_DMA)
  930: 	ata_dmainit(atadev, ata_pmode(adp->device->param),
  931: 		    ata_wmode(adp->device->param),
  932: 		    ata_umode(adp->device->param));
  933:     else
  934: 	ata_dmainit(atadev, ata_pmode(adp->device->param), -1, -1);
  935: }
  936: 
  937: void
  938: ad_print(struct ad_softc *adp) 
  939: {
  940:     if (bootverbose) {
  941: 	ata_prtdev(adp->device, "<%.40s/%.8s> ATA-%d disk at ata%d-%s\n", 
  942: 		   adp->device->param->model, adp->device->param->revision,
  943: 		   ad_version(adp->device->param->version_major), 
  944: 		   device_get_unit(adp->device->channel->dev),
  945: 		   (adp->device->unit == ATA_MASTER) ? "master" : "slave");
  946: 
  947: 	ata_prtdev(adp->device,
  948: 		   "%lluMB (%llu sectors), %llu C, %u H, %u S, %u B\n",
  949: 		   (unsigned long long)(adp->total_secs /
  950: 		   ((1024L*1024L)/DEV_BSIZE)),
  951: 		   (unsigned long long) adp->total_secs,
  952: 		   (unsigned long long) (adp->total_secs /
  953: 		    (adp->heads * adp->sectors)),
  954: 		   adp->heads, adp->sectors, DEV_BSIZE);
  955: 
  956: 	ata_prtdev(adp->device, "%d secs/int, %d depth queue, %s%s\n", 
  957: 		   adp->transfersize / DEV_BSIZE, adp->num_tags + 1,
  958: 		   (adp->flags & AD_F_TAG_ENABLED) ? "tagged " : "",
  959: 		   ata_mode2str(adp->device->mode));
  960: 
  961: 	ata_prtdev(adp->device, "piomode=%d dmamode=%d udmamode=%d cblid=%d\n",
  962: 		   ata_pmode(adp->device->param), ata_wmode(adp->device->param),
  963: 		   ata_umode(adp->device->param), 
  964: 		   adp->device->param->hwres_cblid);
  965: 
  966:     }
  967:     else
  968: 	ata_prtdev(adp->device,"%lluMB <%.40s> [%lld/%d/%d] at ata%d-%s %s%s\n",
  969: 		   (unsigned long long)(adp->total_secs /
  970: 		   ((1024L * 1024L) / DEV_BSIZE)),
  971: 		   adp->device->param->model,
  972: 		   (unsigned long long)(adp->total_secs /
  973: 		    (adp->heads*adp->sectors)),
  974: 		   adp->heads, adp->sectors,
  975: 		   device_get_unit(adp->device->channel->dev),
  976: 		   (adp->device->unit == ATA_MASTER) ? "master" : "slave",
  977: 		   (adp->flags & AD_F_TAG_ENABLED) ? "tagged " : "",
  978: 		   ata_mode2str(adp->device->mode));
  979: }
  980: 
  981: static int
  982: ad_version(u_int16_t version)
  983: {
  984:     int bit;
  985: 
  986:     if (version == 0xffff)
  987: 	return 0;
  988:     for (bit = 15; bit >= 0; bit--)
  989: 	if (version & (1<<bit))
  990: 	    return bit;
  991:     return 0;
  992: }