File:  [DragonFly] / src / sys / dev / raid / mly / mly.c
Revision 1.9: download - view: text, annotated - select for diffs
Wed May 19 22:52:48 2004 UTC (10 years, 4 months ago) by dillon
Branches: MAIN
CVS tags: HEAD
Device layer rollup commit.

* cdevsw_add() is now required.  cdevsw_add() and cdevsw_remove() may specify
  a mask/match indicating the range of supported minor numbers.  Multiple
  cdevsw_add()'s using the same major number, but distinctly different
  ranges, may be issued.  All devices that failed to call cdevsw_add() before
  now do.

* cdevsw_remove() now automatically marks all devices within its supported
  range as being destroyed.

* vnode->v_rdev is no longer resolved when the vnode is created.  Instead,
  only v_udev (a newly added field) is resolved.  v_rdev is resolved when
  the vnode is opened and cleared on the last close.

* A great deal of code was making rather dubious assumptions with regards
  to the validity of devices associated with vnodes, primarily due to
  the persistence of a device structure due to being indexed by (major, minor)
  instead of by (cdevsw, major, minor).  In particular, if you run a program
  which connects to a USB device and then you pull the USB device and plug
  it back in, the vnode subsystem will continue to believe that the device
  is open when, in fact, it isn't (because it was destroyed and recreated).

  In particular, note that all the VFS mount procedures now check devices
  via v_udev instead of v_rdev prior to calling VOP_OPEN(), since v_rdev
  is NULL prior to the first open.

* The disk layer's device interaction has been rewritten.  The disk layer
  (i.e. the slice and disklabel management layer) no longer overloads
  its data onto the device structure representing the underlying physical
  disk.  Instead, the disk layer uses the new cdevsw_add() functionality
  to register its own cdevsw using the underlying device's major number,
  and simply does NOT register the underlying device's cdevsw.  No
  confusion is created because the device hash is now based on
  (cdevsw,major,minor) rather then (major,minor).

  NOTE: This also means that underlying raw disk devices may use the entire
  device minor number instead of having to reserve the bits used by the disk
  layer, and also means that can we (theoretically) stack a fully
  disklabel-supported 'disk' on top of any block device.

* The new reference counting scheme prevents this by associating a device
  with a cdevsw and disconnecting the device from its cdevsw when the cdevsw
  is removed.  Additionally, all udev2dev() lookups run through the cdevsw
  mask/match and only successfully find devices still associated with an
  active cdevsw.

* Major work on MFS:  MFS no longer shortcuts vnode and device creation.  It
  now creates a real vnode and a real device and implements real open and
  close VOPs.  Additionally, due to the disk layer changes, MFS is no longer
  limited to 255 mounts.  The new limit is 16 million.  Since MFS creates a
  real device node, mount_mfs will now create a real /dev/mfs<PID> device
  that can be read from userland (e.g. so you can dump an MFS filesystem).

* BUF AND DEVICE STRATEGY changes.  The struct buf contains a b_dev field.
  In order to properly handle stacked devices we now require that the b_dev
  field be initialized before the device strategy routine is called.  This
  required some additional work in various VFS implementations.  To enforce
  this requirement, biodone() now sets b_dev to NODEV.  The new disk layer
  will adjust b_dev before forwarding a request to the actual physical
  device.

* A bug in the ISO CD boot sequence which resulted in a panic has been fixed.

Testing by: lots of people, but David Rhodus found the most aggregious bugs.

    1: /*-
    2:  * Copyright (c) 2000, 2001 Michael Smith
    3:  * Copyright (c) 2000 BSDi
    4:  * All rights reserved.
    5:  *
    6:  * Redistribution and use in source and binary forms, with or without
    7:  * modification, are permitted provided that the following conditions
    8:  * are met:
    9:  * 1. Redistributions of source code must retain the above copyright
   10:  *    notice, this list of conditions and the following disclaimer.
   11:  * 2. Redistributions in binary form must reproduce the above copyright
   12:  *    notice, this list of conditions and the following disclaimer in the
   13:  *    documentation and/or other materials provided with the distribution.
   14:  *
   15:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25:  * SUCH DAMAGE.
   26:  *
   27:  *	$FreeBSD: src/sys/dev/mly/mly.c,v 1.3.2.3 2001/03/05 20:17:24 msmith Exp $
   28:  *	$DragonFly: src/sys/dev/raid/mly/mly.c,v 1.9 2004/05/19 22:52:48 dillon Exp $
   29:  */
   30: 
   31: #include <sys/param.h>
   32: #include <sys/systm.h>
   33: #include <sys/malloc.h>
   34: #include <sys/kernel.h>
   35: #include <sys/bus.h>
   36: #include <sys/conf.h>
   37: #include <sys/ctype.h>
   38: #include <sys/ioccom.h>
   39: #include <sys/stat.h>
   40: 
   41: #include <machine/bus_memio.h>
   42: #include <machine/bus.h>
   43: #include <machine/resource.h>
   44: #include <sys/rman.h>
   45: 
   46: #include <bus/cam/scsi/scsi_all.h>
   47: 
   48: #include "mlyreg.h"
   49: #include "mlyio.h"
   50: #include "mlyvar.h"
   51: #define MLY_DEFINE_TABLES
   52: #include "mly_tables.h"
   53: 
   54: static int	mly_get_controllerinfo(struct mly_softc *sc);
   55: static void	mly_scan_devices(struct mly_softc *sc);
   56: static void	mly_rescan_btl(struct mly_softc *sc, int bus, int target);
   57: static void	mly_complete_rescan(struct mly_command *mc);
   58: static int	mly_get_eventstatus(struct mly_softc *sc);
   59: static int	mly_enable_mmbox(struct mly_softc *sc);
   60: static int	mly_flush(struct mly_softc *sc);
   61: static int	mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, 
   62: 			  size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length);
   63: static void	mly_fetch_event(struct mly_softc *sc);
   64: static void	mly_complete_event(struct mly_command *mc);
   65: static void	mly_process_event(struct mly_softc *sc, struct mly_event *me);
   66: static void	mly_periodic(void *data);
   67: 
   68: static int	mly_immediate_command(struct mly_command *mc);
   69: static int	mly_start(struct mly_command *mc);
   70: static void	mly_complete(void *context, int pending);
   71: 
   72: static void	mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error);
   73: static int	mly_alloc_commands(struct mly_softc *sc);
   74: static void	mly_map_command(struct mly_command *mc);
   75: static void	mly_unmap_command(struct mly_command *mc);
   76: 
   77: static int	mly_fwhandshake(struct mly_softc *sc);
   78: 
   79: static void	mly_describe_controller(struct mly_softc *sc);
   80: #ifdef MLY_DEBUG
   81: static void	mly_printstate(struct mly_softc *sc);
   82: static void	mly_print_command(struct mly_command *mc);
   83: static void	mly_print_packet(struct mly_command *mc);
   84: static void	mly_panic(struct mly_softc *sc, char *reason);
   85: #endif
   86: void		mly_print_controller(int controller);
   87: 
   88: static d_open_t		mly_user_open;
   89: static d_close_t	mly_user_close;
   90: static d_ioctl_t	mly_user_ioctl;
   91: static int	mly_user_command(struct mly_softc *sc, struct mly_user_command *uc);
   92: static int	mly_user_health(struct mly_softc *sc, struct mly_user_health *uh);
   93: 
   94: #define MLY_CDEV_MAJOR  158
   95: 
   96: static struct cdevsw mly_cdevsw = {
   97:     /* name */	"mly",
   98:     /* cmaj */	MLY_CDEV_MAJOR,
   99:     /* flags */	0,
  100:     /* port */	NULL,
  101:     /* clone */	NULL,
  102: 
  103:     mly_user_open,
  104:     mly_user_close,
  105:     noread,
  106:     nowrite,
  107:     mly_user_ioctl,
  108:     nopoll,
  109:     nommap,
  110:     nostrategy,
  111:     nodump,
  112:     nopsize
  113: };
  114: 
  115: /********************************************************************************
  116:  ********************************************************************************
  117:                                                                  Device Interface
  118:  ********************************************************************************
  119:  ********************************************************************************/
  120: 
  121: /********************************************************************************
  122:  * Initialise the controller and softc
  123:  */
  124: int
  125: mly_attach(struct mly_softc *sc)
  126: {
  127:     int		error;
  128: 
  129:     debug_called(1);
  130: 
  131:     /*
  132:      * Initialise per-controller queues.
  133:      */
  134:     mly_initq_free(sc);
  135:     mly_initq_ready(sc);
  136:     mly_initq_busy(sc);
  137:     mly_initq_complete(sc);
  138: 
  139: #if defined(__FreeBSD__) && __FreeBSD_version >= 500005
  140:     /*
  141:      * Initialise command-completion task.
  142:      */
  143:     TASK_INIT(&sc->mly_task_complete, 0, mly_complete, sc);
  144: #endif
  145: 
  146:     /* disable interrupts before we start talking to the controller */
  147:     MLY_MASK_INTERRUPTS(sc);
  148: 
  149:     /* 
  150:      * Wait for the controller to come ready, handshake with the firmware if required.
  151:      * This is typically only necessary on platforms where the controller BIOS does not
  152:      * run.
  153:      */
  154:     if ((error = mly_fwhandshake(sc)))
  155: 	return(error);
  156: 
  157:     /*
  158:      * Allocate command buffers
  159:      */
  160:     if ((error = mly_alloc_commands(sc)))
  161: 	return(error);
  162: 
  163:     /* 
  164:      * Obtain controller feature information
  165:      */
  166:     if ((error = mly_get_controllerinfo(sc)))
  167: 	return(error);
  168: 
  169:     /*
  170:      * Get the current event counter for health purposes, populate the initial
  171:      * health status buffer.
  172:      */
  173:     if ((error = mly_get_eventstatus(sc)))
  174: 	return(error);
  175: 
  176:     /*
  177:      * Enable memory-mailbox mode
  178:      */
  179:     if ((error = mly_enable_mmbox(sc)))
  180: 	return(error);
  181: 
  182:     /*
  183:      * Attach to CAM.
  184:      */
  185:     if ((error = mly_cam_attach(sc)))
  186: 	return(error);
  187: 
  188:     /* 
  189:      * Print a little information about the controller 
  190:      */
  191:     mly_describe_controller(sc);
  192: 
  193:     /*
  194:      * Mark all attached devices for rescan
  195:      */
  196:     mly_scan_devices(sc);
  197: 
  198:     /*
  199:      * Instigate the first status poll immediately.  Rescan completions won't
  200:      * happen until interrupts are enabled, which should still be before
  201:      * the SCSI subsystem gets to us. (XXX assuming CAM and interrupt-driven
  202:      * discovery here...)
  203:      */
  204:     mly_periodic((void *)sc);
  205: 
  206:     /*
  207:      * Create the control device.
  208:      */
  209:     cdevsw_add(&mly_cdevsw, -1, device_get_unit(sc->mly_dev));
  210:     sc->mly_dev_t = make_dev(&mly_cdevsw, device_get_unit(sc->mly_dev),
  211:     				UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, 
  212: 				"mly%d", device_get_unit(sc->mly_dev));
  213:     sc->mly_dev_t->si_drv1 = sc;
  214: 
  215:     /* enable interrupts now */
  216:     MLY_UNMASK_INTERRUPTS(sc);
  217: 
  218:     return(0);
  219: }
  220: 
  221: /********************************************************************************
  222:  * Bring the controller to a state where it can be safely left alone.
  223:  */
  224: void
  225: mly_detach(struct mly_softc *sc)
  226: {
  227: 
  228:     debug_called(1);
  229: 
  230:     /* kill the periodic event */
  231:     untimeout(mly_periodic, sc, sc->mly_periodic);
  232: 
  233:     sc->mly_state |= MLY_STATE_SUSPEND;
  234: 
  235:     /* flush controller */
  236:     mly_printf(sc, "flushing cache...");
  237:     printf("%s\n", mly_flush(sc) ? "failed" : "done");
  238: 
  239:     MLY_MASK_INTERRUPTS(sc);
  240: }
  241: 
  242: /********************************************************************************
  243:  ********************************************************************************
  244:                                                                  Command Wrappers
  245:  ********************************************************************************
  246:  ********************************************************************************/
  247: 
  248: /********************************************************************************
  249:  * Fill in the mly_controllerinfo and mly_controllerparam fields in the softc.
  250:  */
  251: static int
  252: mly_get_controllerinfo(struct mly_softc *sc)
  253: {
  254:     struct mly_command_ioctl	mci;
  255:     u_int8_t			status;
  256:     int				error;
  257: 
  258:     debug_called(1);
  259: 
  260:     if (sc->mly_controllerinfo != NULL)
  261: 	free(sc->mly_controllerinfo, M_DEVBUF);
  262: 
  263:     /* build the getcontrollerinfo ioctl and send it */
  264:     bzero(&mci, sizeof(mci));
  265:     sc->mly_controllerinfo = NULL;
  266:     mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO;
  267:     if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerinfo, sizeof(*sc->mly_controllerinfo),
  268: 			   &status, NULL, NULL)))
  269: 	return(error);
  270:     if (status != 0)
  271: 	return(EIO);
  272: 
  273:     if (sc->mly_controllerparam != NULL)
  274: 	free(sc->mly_controllerparam, M_DEVBUF);
  275: 
  276:     /* build the getcontrollerparameter ioctl and send it */
  277:     bzero(&mci, sizeof(mci));
  278:     sc->mly_controllerparam = NULL;
  279:     mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER;
  280:     if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerparam, sizeof(*sc->mly_controllerparam),
  281: 			   &status, NULL, NULL)))
  282: 	return(error);
  283:     if (status != 0)
  284: 	return(EIO);
  285: 
  286:     return(0);
  287: }
  288: 
  289: /********************************************************************************
  290:  * Schedule all possible devices for a rescan.
  291:  *
  292:  */
  293: static void
  294: mly_scan_devices(struct mly_softc *sc)
  295: {
  296:     int		bus, target, nchn;
  297: 
  298:     debug_called(1);
  299: 
  300:     /*
  301:      * Clear any previous BTL information.
  302:      */
  303:     bzero(&sc->mly_btl, sizeof(sc->mly_btl));
  304: 
  305:     /*
  306:      * Mark all devices as requiring a rescan, and let the early periodic scan collect them.
  307:      */
  308:     nchn = sc->mly_controllerinfo->physical_channels_present +
  309: 	sc->mly_controllerinfo->virtual_channels_present;
  310:     for (bus = 0; bus < nchn; bus++)
  311: 	for (target = 0; target < MLY_MAX_TARGETS; target++)
  312: 	    sc->mly_btl[bus][target].mb_flags = MLY_BTL_RESCAN;
  313: 
  314: }
  315: 
  316: /********************************************************************************
  317:  * Rescan a device, possibly as a consequence of getting an event which suggests
  318:  * that it may have changed.
  319:  */
  320: static void
  321: mly_rescan_btl(struct mly_softc *sc, int bus, int target)
  322: {
  323:     struct mly_command		*mc;
  324:     struct mly_command_ioctl	*mci;
  325: 
  326:     debug_called(2);
  327: 
  328:     /* get a command */
  329:     mc = NULL;
  330:     if (mly_alloc_command(sc, &mc))
  331: 	return;				/* we'll be retried soon */
  332: 
  333:     /* set up the data buffer */
  334:     if ((mc->mc_data = malloc(sizeof(union mly_devinfo), M_DEVBUF, M_NOWAIT)) == NULL) {
  335: 	mly_release_command(mc);
  336: 	return;				/* we'll get retried the next time a command completes */
  337:     }
  338:     bzero(mc->mc_data, sizeof(union mly_devinfo));
  339:     mc->mc_flags |= MLY_CMD_DATAIN;
  340:     mc->mc_complete = mly_complete_rescan;
  341: 
  342:     sc->mly_btl[bus][target].mb_flags &= ~MLY_BTL_RESCAN;
  343: 
  344:     /* 
  345:      * Build the ioctl.
  346:      *
  347:      * At this point we are committed to sending this request, as it
  348:      * will be the only one constructed for this particular update.
  349:      */
  350:     mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
  351:     mci->opcode = MDACMD_IOCTL;
  352:     mci->addr.phys.controller = 0;
  353:     mci->timeout.value = 30;
  354:     mci->timeout.scale = MLY_TIMEOUT_SECONDS;
  355:     if (bus >= sc->mly_controllerinfo->physical_channels_present) {
  356: 	mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getlogdevinfovalid);
  357: 	mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID;
  358: 	mci->addr.log.logdev = ((bus - sc->mly_controllerinfo->physical_channels_present) * MLY_MAX_TARGETS) 
  359: 	    + target;
  360: 	debug(2, "logical device %d", mci->addr.log.logdev);
  361:     } else {
  362: 	mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getphysdevinfovalid);
  363: 	mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID;
  364: 	mci->addr.phys.lun = 0;
  365: 	mci->addr.phys.target = target;
  366: 	mci->addr.phys.channel = bus;
  367: 	debug(2, "physical device %d:%d", mci->addr.phys.channel, mci->addr.phys.target);
  368:     }
  369:     
  370:     /*
  371:      * Use the ready queue to get this command dispatched.
  372:      */
  373:     mly_enqueue_ready(mc);
  374:     mly_startio(sc);
  375: }
  376: 
  377: /********************************************************************************
  378:  * Handle the completion of a rescan operation
  379:  */
  380: static void
  381: mly_complete_rescan(struct mly_command *mc)
  382: {
  383:     struct mly_softc				*sc = mc->mc_sc;
  384:     struct mly_ioctl_getlogdevinfovalid		*ldi;
  385:     struct mly_ioctl_getphysdevinfovalid	*pdi;
  386:     int						bus, target;
  387: 
  388:     debug_called(2);
  389: 
  390:     /* iff the command completed OK, we should use the result to update our data */
  391:     if (mc->mc_status == 0) {
  392: 	if (mc->mc_length == sizeof(*ldi)) {
  393: 	    ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data;
  394: 	    bus = MLY_LOGDEV_BUS(sc, ldi->logical_device_number);
  395: 	    target = MLY_LOGDEV_TARGET(ldi->logical_device_number);
  396: 	    sc->mly_btl[bus][target].mb_flags = MLY_BTL_LOGICAL;	/* clears all other flags */
  397: 	    sc->mly_btl[bus][target].mb_type = ldi->raid_level;
  398: 	    sc->mly_btl[bus][target].mb_state = ldi->state;
  399: 	    debug(2, "BTL rescan for %d returns %s, %s", ldi->logical_device_number, 
  400: 		  mly_describe_code(mly_table_device_type, ldi->raid_level),
  401: 		  mly_describe_code(mly_table_device_state, ldi->state));
  402: 	} else if (mc->mc_length == sizeof(*pdi)) {
  403: 	    pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data;
  404: 	    bus = pdi->channel;
  405: 	    target = pdi->target;
  406: 	    sc->mly_btl[bus][target].mb_flags = MLY_BTL_PHYSICAL;	/* clears all other flags */
  407: 	    sc->mly_btl[bus][target].mb_type = MLY_DEVICE_TYPE_PHYSICAL;
  408: 	    sc->mly_btl[bus][target].mb_state = pdi->state;
  409: 	    sc->mly_btl[bus][target].mb_speed = pdi->speed;
  410: 	    sc->mly_btl[bus][target].mb_width = pdi->width;
  411: 	    if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED)
  412: 		sc->mly_btl[bus][target].mb_flags |= MLY_BTL_PROTECTED;
  413: 	    debug(2, "BTL rescan for %d:%d returns %s", bus, target, 
  414: 		  mly_describe_code(mly_table_device_state, pdi->state));
  415: 	} else {
  416: 	    mly_printf(sc, "BTL rescan result corrupted\n");
  417: 	}
  418:     } else {
  419: 	/*
  420: 	 * A request sent for a device beyond the last device present will fail.
  421: 	 * We don't care about this, so we do nothing about it.
  422: 	 */
  423:     }
  424:     free(mc->mc_data, M_DEVBUF);
  425:     mly_release_command(mc);
  426: }
  427: 
  428: /********************************************************************************
  429:  * Get the current health status and set the 'next event' counter to suit.
  430:  */
  431: static int
  432: mly_get_eventstatus(struct mly_softc *sc)
  433: {
  434:     struct mly_command_ioctl	mci;
  435:     struct mly_health_status	*mh;
  436:     u_int8_t			status;
  437:     int				error;
  438: 
  439:     /* build the gethealthstatus ioctl and send it */
  440:     bzero(&mci, sizeof(mci));
  441:     mh = NULL;
  442:     mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS;
  443: 
  444:     if ((error = mly_ioctl(sc, &mci, (void **)&mh, sizeof(*mh), &status, NULL, NULL)))
  445: 	return(error);
  446:     if (status != 0)
  447: 	return(EIO);
  448: 
  449:     /* get the event counter */
  450:     sc->mly_event_change = mh->change_counter;
  451:     sc->mly_event_waiting = mh->next_event;
  452:     sc->mly_event_counter = mh->next_event;
  453: 
  454:     /* save the health status into the memory mailbox */
  455:     bcopy(mh, &sc->mly_mmbox->mmm_health.status, sizeof(*mh));
  456: 
  457:     debug(1, "initial change counter %d, event counter %d", mh->change_counter, mh->next_event);
  458:     
  459:     free(mh, M_DEVBUF);
  460:     return(0);
  461: }
  462: 
  463: /********************************************************************************
  464:  * Enable the memory mailbox mode.
  465:  */
  466: static int
  467: mly_enable_mmbox(struct mly_softc *sc)
  468: {
  469:     struct mly_command_ioctl	mci;
  470:     u_int8_t			*sp, status;
  471:     int				error;
  472: 
  473:     debug_called(1);
  474: 
  475:     /* build the ioctl and send it */
  476:     bzero(&mci, sizeof(mci));
  477:     mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX;
  478:     /* set buffer addresses */
  479:     mci.param.setmemorymailbox.command_mailbox_physaddr = 
  480: 	sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command);
  481:     mci.param.setmemorymailbox.status_mailbox_physaddr = 
  482: 	sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status);
  483:     mci.param.setmemorymailbox.health_buffer_physaddr = 
  484: 	sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health);
  485: 
  486:     /* set buffer sizes - abuse of data_size field is revolting */
  487:     sp = (u_int8_t *)&mci.data_size;
  488:     sp[0] = ((sizeof(union mly_command_packet) * MLY_MMBOX_COMMANDS) / 1024);
  489:     sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) / 1024;
  490:     mci.param.setmemorymailbox.health_buffer_size = sizeof(union mly_health_region) / 1024;
  491: 
  492:     debug(1, "memory mailbox at %p (0x%llx/%d 0x%llx/%d 0x%llx/%d", sc->mly_mmbox,
  493: 	  mci.param.setmemorymailbox.command_mailbox_physaddr, sp[0],
  494: 	  mci.param.setmemorymailbox.status_mailbox_physaddr, sp[1],
  495: 	  mci.param.setmemorymailbox.health_buffer_physaddr, 
  496: 	  mci.param.setmemorymailbox.health_buffer_size);
  497: 
  498:     if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL)))
  499: 	return(error);
  500:     if (status != 0)
  501: 	return(EIO);
  502:     sc->mly_state |= MLY_STATE_MMBOX_ACTIVE;
  503:     debug(1, "memory mailbox active");
  504:     return(0);
  505: }
  506: 
  507: /********************************************************************************
  508:  * Flush all pending I/O from the controller.
  509:  */
  510: static int
  511: mly_flush(struct mly_softc *sc)
  512: {
  513:     struct mly_command_ioctl	mci;
  514:     u_int8_t			status;
  515:     int				error;
  516: 
  517:     debug_called(1);
  518: 
  519:     /* build the ioctl */
  520:     bzero(&mci, sizeof(mci));
  521:     mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA;
  522:     mci.param.deviceoperation.operation_device = MLY_OPDEVICE_PHYSICAL_CONTROLLER;
  523: 
  524:     /* pass it off to the controller */
  525:     if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL)))
  526: 	return(error);
  527: 
  528:     return((status == 0) ? 0 : EIO);
  529: }
  530: 
  531: /********************************************************************************
  532:  * Perform an ioctl command.
  533:  *
  534:  * If (data) is not NULL, the command requires data transfer.  If (*data) is NULL
  535:  * the command requires data transfer from the controller, and we will allocate
  536:  * a buffer for it.  If (*data) is not NULL, the command requires data transfer
  537:  * to the controller.
  538:  *
  539:  * XXX passing in the whole ioctl structure is ugly.  Better ideas?
  540:  *
  541:  * XXX we don't even try to handle the case where datasize > 4k.  We should.
  542:  */
  543: static int
  544: mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, size_t datasize, 
  545: 	  u_int8_t *status, void *sense_buffer, size_t *sense_length)
  546: {
  547:     struct mly_command		*mc;
  548:     struct mly_command_ioctl	*mci;
  549:     int				error;
  550: 
  551:     debug_called(1);
  552: 
  553:     mc = NULL;
  554:     if (mly_alloc_command(sc, &mc)) {
  555: 	error = ENOMEM;
  556: 	goto out;
  557:     }
  558: 
  559:     /* copy the ioctl structure, but save some important fields and then fixup */
  560:     mci = &mc->mc_packet->ioctl;
  561:     ioctl->sense_buffer_address = mci->sense_buffer_address;
  562:     ioctl->maximum_sense_size = mci->maximum_sense_size;
  563:     *mci = *ioctl;
  564:     mci->opcode = MDACMD_IOCTL;
  565:     mci->timeout.value = 30;
  566:     mci->timeout.scale = MLY_TIMEOUT_SECONDS;
  567:     
  568:     /* handle the data buffer */
  569:     if (data != NULL) {
  570: 	if (*data == NULL) {
  571: 	    /* allocate data buffer */
  572: 	    if ((mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT)) == NULL) {
  573: 		error = ENOMEM;
  574: 		goto out;
  575: 	    }
  576: 	    mc->mc_flags |= MLY_CMD_DATAIN;
  577: 	} else {
  578: 	    mc->mc_data = *data;
  579: 	    mc->mc_flags |= MLY_CMD_DATAOUT;
  580: 	}
  581: 	mc->mc_length = datasize;
  582: 	mc->mc_packet->generic.data_size = datasize;
  583:     }
  584:     
  585:     /* run the command */
  586:     if ((error = mly_immediate_command(mc)))
  587: 	goto out;
  588:     
  589:     /* clean up and return any data */
  590:     *status = mc->mc_status;
  591:     if ((mc->mc_sense > 0) && (sense_buffer != NULL)) {
  592: 	bcopy(mc->mc_packet, sense_buffer, mc->mc_sense);
  593: 	*sense_length = mc->mc_sense;
  594: 	goto out;
  595:     }
  596: 
  597:     /* should we return a data pointer? */
  598:     if ((data != NULL) && (*data == NULL))
  599: 	*data = mc->mc_data;
  600: 
  601:     /* command completed OK */
  602:     error = 0;
  603: 
  604: out:
  605:     if (mc != NULL) {
  606: 	/* do we need to free a data buffer we allocated? */
  607: 	if (error && (mc->mc_data != NULL) && (*data == NULL))
  608: 	    free(mc->mc_data, M_DEVBUF);
  609: 	mly_release_command(mc);
  610:     }
  611:     return(error);
  612: }
  613: 
  614: /********************************************************************************
  615:  * Fetch one event from the controller.
  616:  */
  617: static void
  618: mly_fetch_event(struct mly_softc *sc)
  619: {
  620:     struct mly_command		*mc;
  621:     struct mly_command_ioctl	*mci;
  622:     int				s;
  623:     u_int32_t			event;
  624: 
  625:     debug_called(2);
  626: 
  627:     /* get a command */
  628:     mc = NULL;
  629:     if (mly_alloc_command(sc, &mc))
  630: 	return;				/* we'll get retried the next time a command completes */
  631: 
  632:     /* set up the data buffer */
  633:     if ((mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF, M_NOWAIT)) == NULL) {
  634: 	mly_release_command(mc);
  635: 	return;				/* we'll get retried the next time a command completes */
  636:     }
  637:     bzero(mc->mc_data, sizeof(struct mly_event));
  638:     mc->mc_length = sizeof(struct mly_event);
  639:     mc->mc_flags |= MLY_CMD_DATAIN;
  640:     mc->mc_complete = mly_complete_event;
  641: 
  642:     /*
  643:      * Get an event number to fetch.  It's possible that we've raced with another
  644:      * context for the last event, in which case there will be no more events.
  645:      */
  646:     s = splcam();
  647:     if (sc->mly_event_counter == sc->mly_event_waiting) {
  648: 	mly_release_command(mc);
  649: 	splx(s);
  650: 	return;
  651:     }
  652:     event = sc->mly_event_counter++;
  653:     splx(s);
  654: 
  655:     /* 
  656:      * Build the ioctl.
  657:      *
  658:      * At this point we are committed to sending this request, as it
  659:      * will be the only one constructed for this particular event number.
  660:      */
  661:     mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
  662:     mci->opcode = MDACMD_IOCTL;
  663:     mci->data_size = sizeof(struct mly_event);
  664:     mci->addr.phys.lun = (event >> 16) & 0xff;
  665:     mci->addr.phys.target = (event >> 24) & 0xff;
  666:     mci->addr.phys.channel = 0;
  667:     mci->addr.phys.controller = 0;
  668:     mci->timeout.value = 30;
  669:     mci->timeout.scale = MLY_TIMEOUT_SECONDS;
  670:     mci->sub_ioctl = MDACIOCTL_GETEVENT;
  671:     mci->param.getevent.sequence_number_low = event & 0xffff;
  672: 
  673:     debug(2, "fetch event %u", event);
  674: 
  675:     /*
  676:      * Use the ready queue to get this command dispatched.
  677:      */
  678:     mly_enqueue_ready(mc);
  679:     mly_startio(sc);
  680: }
  681: 
  682: /********************************************************************************
  683:  * Handle the completion of an event poll.
  684:  *
  685:  * Note that we don't actually have to instigate another poll; the completion of
  686:  * this command will trigger that if there are any more events to poll for.
  687:  */
  688: static void
  689: mly_complete_event(struct mly_command *mc)
  690: {
  691:     struct mly_softc	*sc = mc->mc_sc;
  692:     struct mly_event	*me = (struct mly_event *)mc->mc_data;
  693: 
  694:     debug_called(2);
  695: 
  696:     /* 
  697:      * If the event was successfully fetched, process it.
  698:      */
  699:     if (mc->mc_status == SCSI_STATUS_OK) {
  700: 	mly_process_event(sc, me);
  701: 	free(me, M_DEVBUF);
  702:     }
  703:     mly_release_command(mc);
  704: }
  705: 
  706: /********************************************************************************
  707:  * Process a controller event.
  708:  */
  709: static void
  710: mly_process_event(struct mly_softc *sc, struct mly_event *me)
  711: {
  712:     struct scsi_sense_data	*ssd = (struct scsi_sense_data *)&me->sense[0];
  713:     char			*fp, *tp;
  714:     int				bus, target, event, class, action;
  715: 
  716:     /* 
  717:      * Errors can be reported using vendor-unique sense data.  In this case, the
  718:      * event code will be 0x1c (Request sense data present), the sense key will
  719:      * be 0x09 (vendor specific), the MSB of the ASC will be set, and the 
  720:      * actual event code will be a 16-bit value comprised of the ASCQ (low byte)
  721:      * and low seven bits of the ASC (low seven bits of the high byte).
  722:      */
  723:     if ((me->code == 0x1c) && 
  724: 	((ssd->flags & SSD_KEY) == SSD_KEY_Vendor_Specific) &&
  725: 	(ssd->add_sense_code & 0x80)) {
  726: 	event = ((int)(ssd->add_sense_code & ~0x80) << 8) + ssd->add_sense_code_qual;
  727:     } else {
  728: 	event = me->code;
  729:     }
  730: 
  731:     /* look up event, get codes */
  732:     fp = mly_describe_code(mly_table_event, event);
  733: 
  734:     debug(2, "Event %d  code 0x%x", me->sequence_number, me->code);
  735: 
  736:     /* quiet event? */
  737:     class = fp[0];
  738:     if (isupper(class) && bootverbose)
  739: 	class = tolower(class);
  740: 
  741:     /* get action code, text string */
  742:     action = fp[1];
  743:     tp = &fp[2];
  744: 
  745:     /*
  746:      * Print some information about the event.
  747:      *
  748:      * This code uses a table derived from the corresponding portion of the Linux
  749:      * driver, and thus the parser is very similar.
  750:      */
  751:     switch(class) {
  752:     case 'p':		/* error on physical device */
  753: 	mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
  754: 	if (action == 'r')
  755: 	    sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
  756: 	break;
  757:     case 'l':		/* error on logical unit */
  758:     case 'm':		/* message about logical unit */
  759: 	bus = MLY_LOGDEV_BUS(sc, me->lun);
  760: 	target = MLY_LOGDEV_TARGET(me->lun);
  761: 	mly_name_device(sc, bus, target);
  762: 	mly_printf(sc, "logical device %d (%s) %s\n", me->lun, sc->mly_btl[bus][target].mb_name, tp);
  763: 	if (action == 'r')
  764: 	    sc->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN;
  765: 	break;
  766:       break;
  767:     case 's':		/* report of sense data */
  768: 	if (((ssd->flags & SSD_KEY) == SSD_KEY_NO_SENSE) ||
  769: 	    (((ssd->flags & SSD_KEY) == SSD_KEY_NOT_READY) && 
  770: 	     (ssd->add_sense_code == 0x04) && 
  771: 	     ((ssd->add_sense_code_qual == 0x01) || (ssd->add_sense_code_qual == 0x02))))
  772: 	    break;	/* ignore NO_SENSE or NOT_READY in one case */
  773: 
  774: 	mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
  775: 	mly_printf(sc, "  sense key %d  asc %02x  ascq %02x\n", 
  776: 		      ssd->flags & SSD_KEY, ssd->add_sense_code, ssd->add_sense_code_qual);
  777: 	mly_printf(sc, "  info %4D  csi %4D\n", ssd->info, "", ssd->cmd_spec_info, "");
  778: 	if (action == 'r')
  779: 	    sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
  780: 	break;
  781:     case 'e':
  782: 	mly_printf(sc, tp, me->target, me->lun);
  783: 	break;
  784:     case 'c':
  785: 	mly_printf(sc, "controller %s\n", tp);
  786: 	break;
  787:     case '?':
  788: 	mly_printf(sc, "%s - %d\n", tp, me->code);
  789: 	break;
  790:     default:	/* probably a 'noisy' event being ignored */
  791: 	break;
  792:     }
  793: }
  794: 
  795: /********************************************************************************
  796:  * Perform periodic activities.
  797:  */
  798: static void
  799: mly_periodic(void *data)
  800: {
  801:     struct mly_softc	*sc = (struct mly_softc *)data;
  802:     int			nchn, bus, target;
  803: 
  804:     debug_called(2);
  805: 
  806:     /*
  807:      * Scan devices.
  808:      */
  809:     nchn = sc->mly_controllerinfo->physical_channels_present +
  810: 	sc->mly_controllerinfo->virtual_channels_present;
  811:     for (bus = 0; bus < nchn; bus++) {
  812: 	for (target = 0; target < MLY_MAX_TARGETS; target++) {
  813: 
  814: 	    /* ignore the controller in this scan */
  815: 	    if (target == sc->mly_controllerparam->initiator_id)
  816: 		continue;
  817: 
  818: 	    /* perform device rescan? */
  819: 	    if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_RESCAN)
  820: 		mly_rescan_btl(sc, bus, target);
  821: 	}
  822:     }
  823: 
  824:     sc->mly_periodic = timeout(mly_periodic, sc, hz);
  825: }
  826: 
  827: /********************************************************************************
  828:  ********************************************************************************
  829:                                                                Command Processing
  830:  ********************************************************************************
  831:  ********************************************************************************/
  832: 
  833: /********************************************************************************
  834:  * Run a command and wait for it to complete.
  835:  *
  836:  */
  837: static int
  838: mly_immediate_command(struct mly_command *mc)
  839: {
  840:     struct mly_softc	*sc = mc->mc_sc;
  841:     int			error, s;
  842: 
  843:     debug_called(2);
  844: 
  845:     /* spinning at splcam is ugly, but we're only used during controller init */
  846:     s = splcam();
  847:     if ((error = mly_start(mc)))
  848: 	return(error);
  849: 
  850:     if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) {
  851: 	/* sleep on the command */
  852: 	while(!(mc->mc_flags & MLY_CMD_COMPLETE)) {
  853: 	    tsleep(mc, 0, "mlywait", 0);
  854: 	}
  855:     } else {
  856: 	/* spin and collect status while we do */
  857: 	while(!(mc->mc_flags & MLY_CMD_COMPLETE)) {
  858: 	    mly_done(mc->mc_sc);
  859: 	}
  860:     }
  861:     splx(s);
  862:     return(0);
  863: }
  864: 
  865: /********************************************************************************
  866:  * Start as much queued I/O as possible on the controller
  867:  */
  868: void
  869: mly_startio(struct mly_softc *sc)
  870: {
  871:     struct mly_command	*mc;
  872: 
  873:     debug_called(2);
  874: 
  875:     for (;;) {
  876: 
  877: 	/* try for a ready command */
  878: 	mc = mly_dequeue_ready(sc);
  879: 
  880: 	/* try to build a command from a queued ccb */
  881: 	if (!mc)
  882: 	    mly_cam_command(sc, &mc);
  883: 
  884: 	/* no command == nothing to do */
  885: 	if (!mc)
  886: 	    break;
  887: 
  888: 	/* try to post the command */
  889: 	if (mly_start(mc)) {
  890: 	    /* controller busy, or no resources - defer for later */
  891: 	    mly_requeue_ready(mc);
  892: 	    break;
  893: 	}
  894:     }
  895: }
  896: 
  897: /********************************************************************************
  898:  * Deliver a command to the controller; allocate controller resources at the
  899:  * last moment.
  900:  */
  901: static int
  902: mly_start(struct mly_command *mc)
  903: {
  904:     struct mly_softc		*sc = mc->mc_sc;
  905:     union mly_command_packet	*pkt;
  906:     int				s;
  907: 
  908:     debug_called(2);
  909: 
  910:     /* 
  911:      * Set the command up for delivery to the controller. 
  912:      */
  913:     mly_map_command(mc);
  914:     mc->mc_packet->generic.command_id = mc->mc_slot;
  915: 
  916:     s = splcam();
  917: 
  918:     /*
  919:      * Do we have to use the hardware mailbox?
  920:      */
  921:     if (!(sc->mly_state & MLY_STATE_MMBOX_ACTIVE)) {
  922: 	/*
  923: 	 * Check to see if the controller is ready for us.
  924: 	 */
  925: 	if (MLY_IDBR_TRUE(sc, MLY_HM_CMDSENT)) {
  926: 	    splx(s);
  927: 	    return(EBUSY);
  928: 	}
  929: 	mc->mc_flags |= MLY_CMD_BUSY;
  930: 	
  931: 	/*
  932: 	 * It's ready, send the command.
  933: 	 */
  934: 	MLY_SET_MBOX(sc, sc->mly_command_mailbox, &mc->mc_packetphys);
  935: 	MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_CMDSENT);
  936: 
  937:     } else {	/* use memory-mailbox mode */
  938: 
  939: 	pkt = &sc->mly_mmbox->mmm_command[sc->mly_mmbox_command_index];
  940: 
  941: 	/* check to see if the next index is free yet */
  942: 	if (pkt->mmbox.flag != 0) {
  943: 	    splx(s);
  944: 	    return(EBUSY);
  945: 	}
  946: 	mc->mc_flags |= MLY_CMD_BUSY;
  947: 	
  948: 	/* copy in new command */
  949: 	bcopy(mc->mc_packet->mmbox.data, pkt->mmbox.data, sizeof(pkt->mmbox.data));
  950: 	/* barrier to ensure completion of previous write before we write the flag */
  951: 	bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE);	/* tag/handle? */
  952: 	/* copy flag last */
  953: 	pkt->mmbox.flag = mc->mc_packet->mmbox.flag;
  954: 	/* barrier to ensure completion of previous write before we notify the controller */
  955: 	bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE);	/* tag/handle */
  956: 
  957: 	/* signal controller, update index */
  958: 	MLY_SET_REG(sc, sc->mly_idbr, MLY_AM_CMDSENT);
  959: 	sc->mly_mmbox_command_index = (sc->mly_mmbox_command_index + 1) % MLY_MMBOX_COMMANDS;
  960:     }
  961: 
  962:     mly_enqueue_busy(mc);
  963:     splx(s);
  964:     return(0);
  965: }
  966: 
  967: /********************************************************************************
  968:  * Pick up command status from the controller, schedule a completion event
  969:  */
  970: void
  971: mly_done(struct mly_softc *sc) 
  972: {
  973:     struct mly_command		*mc;
  974:     union mly_status_packet	*sp;
  975:     u_int16_t			slot;
  976:     int				s, worked;
  977: 
  978:     s = splcam();
  979:     worked = 0;
  980: 
  981:     /* pick up hardware-mailbox commands */
  982:     if (MLY_ODBR_TRUE(sc, MLY_HM_STSREADY)) {
  983: 	slot = MLY_GET_REG2(sc, sc->mly_status_mailbox);
  984: 	if (slot < MLY_SLOT_MAX) {
  985: 	    mc = &sc->mly_command[slot - MLY_SLOT_START];
  986: 	    mc->mc_status = MLY_GET_REG(sc, sc->mly_status_mailbox + 2);
  987: 	    mc->mc_sense = MLY_GET_REG(sc, sc->mly_status_mailbox + 3);
  988: 	    mc->mc_resid = MLY_GET_REG4(sc, sc->mly_status_mailbox + 4);
  989: 	    mly_remove_busy(mc);
  990: 	    mc->mc_flags &= ~MLY_CMD_BUSY;
  991: 	    mly_enqueue_complete(mc);
  992: 	    worked = 1;
  993: 	} else {
  994: 	    /* slot 0xffff may mean "extremely bogus command" */
  995: 	    mly_printf(sc, "got HM completion for illegal slot %u\n", slot);
  996: 	}
  997: 	/* unconditionally acknowledge status */
  998: 	MLY_SET_REG(sc, sc->mly_odbr, MLY_HM_STSREADY);
  999: 	MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK);
 1000:     }
 1001: 
 1002:     /* pick up memory-mailbox commands */
 1003:     if (MLY_ODBR_TRUE(sc, MLY_AM_STSREADY)) {
 1004: 	for (;;) {
 1005: 	    sp = &sc->mly_mmbox->mmm_status[sc->mly_mmbox_status_index];
 1006: 
 1007: 	    /* check for more status */
 1008: 	    if (sp->mmbox.flag == 0)
 1009: 		break;
 1010: 
 1011: 	    /* get slot number */
 1012: 	    slot = sp->status.command_id;
 1013: 	    if (slot < MLY_SLOT_MAX) {
 1014: 		mc = &sc->mly_command[slot - MLY_SLOT_START];
 1015: 		mc->mc_status = sp->status.status;
 1016: 		mc->mc_sense = sp->status.sense_length;
 1017: 		mc->mc_resid = sp->status.residue;
 1018: 		mly_remove_busy(mc);
 1019: 		mc->mc_flags &= ~MLY_CMD_BUSY;
 1020: 		mly_enqueue_complete(mc);
 1021: 		worked = 1;
 1022: 	    } else {
 1023: 		/* slot 0xffff may mean "extremely bogus command" */
 1024: 		mly_printf(sc, "got AM completion for illegal slot %u at %d\n", 
 1025: 			   slot, sc->mly_mmbox_status_index);
 1026: 	    }
 1027: 
 1028: 	    /* clear and move to next index */
 1029: 	    sp->mmbox.flag = 0;
 1030: 	    sc->mly_mmbox_status_index = (sc->mly_mmbox_status_index + 1) % MLY_MMBOX_STATUS;
 1031: 	}
 1032: 	/* acknowledge that we have collected status value(s) */
 1033: 	MLY_SET_REG(sc, sc->mly_odbr, MLY_AM_STSREADY);
 1034:     }
 1035: 
 1036:     splx(s);
 1037:     if (worked) {
 1038: #if defined(__FreeBSD__) && __FreeBSD_version >= 500005
 1039: 	if (sc->mly_state & MLY_STATE_INTERRUPTS_ON)
 1040: 	    taskqueue_enqueue(taskqueue_swi, &sc->mly_task_complete);
 1041: 	else
 1042: #endif
 1043: 	    mly_complete(sc, 0);
 1044:     }
 1045: }
 1046: 
 1047: /********************************************************************************
 1048:  * Process completed commands
 1049:  */
 1050: static void
 1051: mly_complete(void *context, int pending)
 1052: {
 1053:     struct mly_softc	*sc = (struct mly_softc *)context;
 1054:     struct mly_command	*mc;
 1055:     void	        (* mc_complete)(struct mly_command *mc);
 1056: 
 1057: 
 1058:     debug_called(2);
 1059: 
 1060:     /* 
 1061:      * Spin pulling commands off the completed queue and processing them.
 1062:      */
 1063:     while ((mc = mly_dequeue_complete(sc)) != NULL) {
 1064: 
 1065: 	/*
 1066: 	 * Free controller resources, mark command complete.
 1067: 	 *
 1068: 	 * Note that as soon as we mark the command complete, it may be freed
 1069: 	 * out from under us, so we need to save the mc_complete field in
 1070: 	 * order to later avoid dereferencing mc.  (We would not expect to
 1071: 	 * have a polling/sleeping consumer with mc_complete != NULL).
 1072: 	 */
 1073: 	mly_unmap_command(mc);
 1074: 	mc_complete = mc->mc_complete;
 1075: 	mc->mc_flags |= MLY_CMD_COMPLETE;
 1076: 
 1077: 	/* 
 1078: 	 * Call completion handler or wake up sleeping consumer.
 1079: 	 */
 1080: 	if (mc_complete != NULL) {
 1081: 	    mc_complete(mc);
 1082: 	} else {
 1083: 	    wakeup(mc);
 1084: 	}
 1085:     }
 1086: 
 1087:     /*
 1088:      * We may have freed up controller resources which would allow us
 1089:      * to push more commands onto the controller, so we check here.
 1090:      */
 1091:     mly_startio(sc);
 1092: 
 1093:     /*
 1094:      * The controller may have updated the health status information,
 1095:      * so check for it here.
 1096:      *
 1097:      * Note that we only check for health status after a completed command.  It
 1098:      * might be wise to ping the controller occasionally if it's been idle for
 1099:      * a while just to check up on it.  While a filesystem is mounted, or I/O is
 1100:      * active this isn't really an issue.
 1101:      */
 1102:     if (sc->mly_mmbox->mmm_health.status.change_counter != sc->mly_event_change) {
 1103: 	sc->mly_event_change = sc->mly_mmbox->mmm_health.status.change_counter;
 1104: 	debug(1, "event change %d, event status update, %d -> %d", sc->mly_event_change,
 1105: 	      sc->mly_event_waiting, sc->mly_mmbox->mmm_health.status.next_event);
 1106: 	sc->mly_event_waiting = sc->mly_mmbox->mmm_health.status.next_event;
 1107: 
 1108: 	/* wake up anyone that might be interested in this */
 1109: 	wakeup(&sc->mly_event_change);
 1110:     }
 1111:     if (sc->mly_event_counter != sc->mly_event_waiting)
 1112: 	mly_fetch_event(sc);
 1113: }
 1114: 
 1115: /********************************************************************************
 1116:  ********************************************************************************
 1117:                                                         Command Buffer Management
 1118:  ********************************************************************************
 1119:  ********************************************************************************/
 1120: 
 1121: /********************************************************************************
 1122:  * Allocate a command.
 1123:  */
 1124: int
 1125: mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp)
 1126: {
 1127:     struct mly_command	*mc;
 1128: 
 1129:     debug_called(3);
 1130: 
 1131:     if ((mc = mly_dequeue_free(sc)) == NULL)
 1132: 	return(ENOMEM);
 1133: 
 1134:     *mcp = mc;
 1135:     return(0);
 1136: }
 1137: 
 1138: /********************************************************************************
 1139:  * Release a command back to the freelist.
 1140:  */
 1141: void
 1142: mly_release_command(struct mly_command *mc)
 1143: {
 1144:     debug_called(3);
 1145: 
 1146:     /*
 1147:      * Fill in parts of the command that may cause confusion if
 1148:      * a consumer doesn't when we are later allocated.
 1149:      */
 1150:     mc->mc_data = NULL;
 1151:     mc->mc_flags = 0;
 1152:     mc->mc_complete = NULL;
 1153:     mc->mc_private = NULL;
 1154: 
 1155:     /*
 1156:      * By default, we set up to overwrite the command packet with
 1157:      * sense information.
 1158:      */
 1159:     mc->mc_packet->generic.sense_buffer_address = mc->mc_packetphys;
 1160:     mc->mc_packet->generic.maximum_sense_size = sizeof(union mly_command_packet);
 1161: 
 1162:     mly_enqueue_free(mc);
 1163: }
 1164: 
 1165: /********************************************************************************
 1166:  * Map helper for command allocation.
 1167:  */
 1168: static void
 1169: mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 1170: {
 1171:     struct mly_softc	*sc = (struct mly_softc *)arg
 1172: 
 1173:     debug_called(2);
 1174: 
 1175:     sc->mly_packetphys = segs[0].ds_addr;
 1176: }
 1177: 
 1178: /********************************************************************************
 1179:  * Allocate and initialise command and packet structures.
 1180:  */
 1181: static int
 1182: mly_alloc_commands(struct mly_softc *sc)
 1183: {
 1184:     struct mly_command		*mc;
 1185:     int				i;
 1186:  
 1187:     /*
 1188:      * Allocate enough space for all the command packets in one chunk and
 1189:      * map them permanently into controller-visible space.
 1190:      */
 1191:     if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&sc->mly_packet, 
 1192: 			 BUS_DMA_NOWAIT, &sc->mly_packetmap)) {
 1193: 	return(ENOMEM);
 1194:     }
 1195:     bus_dmamap_load(sc->mly_packet_dmat, sc->mly_packetmap, sc->mly_packet, 
 1196: 		    MLY_MAXCOMMANDS * sizeof(union mly_command_packet), 
 1197: 		    mly_alloc_commands_map, sc, 0);
 1198: 
 1199:     for (i = 0; i < MLY_MAXCOMMANDS; i++) {
 1200: 	mc = &sc->mly_command[i];
 1201: 	bzero(mc, sizeof(*mc));
 1202: 	mc->mc_sc = sc;
 1203: 	mc->mc_slot = MLY_SLOT_START + i;
 1204: 	mc->mc_packet = sc->mly_packet + i;
 1205: 	mc->mc_packetphys = sc->mly_packetphys + (i * sizeof(union mly_command_packet));
 1206: 	if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap))
 1207: 	    mly_release_command(mc);
 1208:     }
 1209:     return(0);
 1210: }
 1211: 
 1212: /********************************************************************************
 1213:  * Command-mapping helper function - populate this command's s/g table
 1214:  * with the s/g entries for its data.
 1215:  */
 1216: static void
 1217: mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 1218: {
 1219:     struct mly_command		*mc = (struct mly_command *)arg;
 1220:     struct mly_softc		*sc = mc->mc_sc;
 1221:     struct mly_command_generic	*gen = &(mc->mc_packet->generic);
 1222:     struct mly_sg_entry		*sg;
 1223:     int				i, tabofs;
 1224: 
 1225:     debug_called(3);
 1226: 
 1227:     /* can we use the transfer structure directly? */
 1228:     if (nseg <= 2) {
 1229: 	sg = &gen->transfer.direct.sg[0];
 1230: 	gen->command_control.extended_sg_table = 0;
 1231:     } else {
 1232: 	tabofs = ((mc->mc_slot - MLY_SLOT_START) * MLY_MAXSGENTRIES);
 1233: 	sg = sc->mly_sg_table + tabofs;
 1234: 	gen->transfer.indirect.entries[0] = nseg;
 1235: 	gen->transfer.indirect.table_physaddr[0] = sc->mly_sg_busaddr + (tabofs * sizeof(struct mly_sg_entry));
 1236: 	gen->command_control.extended_sg_table = 1;
 1237:     }
 1238: 
 1239:     /* copy the s/g table */
 1240:     for (i = 0; i < nseg; i++) {
 1241: 	sg[i].physaddr = segs[i].ds_addr;
 1242: 	sg[i].length = segs[i].ds_len;
 1243:     }
 1244: 
 1245: }
 1246: 
 1247: #if 0
 1248: /********************************************************************************
 1249:  * Command-mapping helper function - save the cdb's physical address.
 1250:  *
 1251:  * We don't support 'large' SCSI commands at this time, so this is unused.
 1252:  */
 1253: static void
 1254: mly_map_command_cdb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 1255: {
 1256:     struct mly_command			*mc = (struct mly_command *)arg;
 1257: 
 1258:     debug_called(3);
 1259: 
 1260:     /* XXX can we safely assume that a CDB will never cross a page boundary? */
 1261:     if ((segs[0].ds_addr % PAGE_SIZE) > 
 1262: 	((segs[0].ds_addr + mc->mc_packet->scsi_large.cdb_length) % PAGE_SIZE))
 1263: 	panic("cdb crosses page boundary");
 1264: 
 1265:     /* fix up fields in the command packet */
 1266:     mc->mc_packet->scsi_large.cdb_physaddr = segs[0].ds_addr;
 1267: }
 1268: #endif
 1269: 
 1270: /********************************************************************************
 1271:  * Map a command into controller-visible space
 1272:  */
 1273: static void
 1274: mly_map_command(struct mly_command *mc)
 1275: {
 1276:     struct mly_softc	*sc = mc->mc_sc;
 1277: 
 1278:     debug_called(2);
 1279: 
 1280:     /* don't map more than once */
 1281:     if (mc->mc_flags & MLY_CMD_MAPPED)
 1282: 	return;
 1283: 
 1284:     /* does the command have a data buffer? */
 1285:     if (mc->mc_data != NULL)
 1286: 	bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length, 
 1287: 			mly_map_command_sg, mc, 0);
 1288: 	
 1289:     if (mc->mc_flags & MLY_CMD_DATAIN)
 1290: 	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD);
 1291:     if (mc->mc_flags & MLY_CMD_DATAOUT)
 1292: 	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREWRITE);
 1293: 
 1294:     mc->mc_flags |= MLY_CMD_MAPPED;
 1295: }
 1296: 
 1297: /********************************************************************************
 1298:  * Unmap a command from controller-visible space
 1299:  */
 1300: static void
 1301: mly_unmap_command(struct mly_command *mc)
 1302: {
 1303:     struct mly_softc	*sc = mc->mc_sc;
 1304: 
 1305:     debug_called(2);
 1306: 
 1307:     if (!(mc->mc_flags & MLY_CMD_MAPPED))
 1308: 	return;
 1309: 
 1310:     if (mc->mc_flags & MLY_CMD_DATAIN)
 1311: 	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTREAD);
 1312:     if (mc->mc_flags & MLY_CMD_DATAOUT)
 1313: 	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTWRITE);
 1314: 
 1315:     /* does the command have a data buffer? */
 1316:     if (mc->mc_data != NULL)
 1317: 	bus_dmamap_unload(sc->mly_buffer_dmat, mc->mc_datamap);
 1318: 
 1319:     mc->mc_flags &= ~MLY_CMD_MAPPED;
 1320: }
 1321: 
 1322: /********************************************************************************
 1323:  ********************************************************************************
 1324:                                                                  Hardware Control
 1325:  ********************************************************************************
 1326:  ********************************************************************************/
 1327: 
 1328: /********************************************************************************
 1329:  * Handshake with the firmware while the card is being initialised.
 1330:  */
 1331: static int
 1332: mly_fwhandshake(struct mly_softc *sc) 
 1333: {
 1334:     u_int8_t	error, param0, param1;
 1335:     int		spinup = 0;
 1336: 
 1337:     debug_called(1);
 1338: 
 1339:     /* set HM_STSACK and let the firmware initialise */
 1340:     MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK);
 1341:     DELAY(1000);	/* too short? */
 1342: 
 1343:     /* if HM_STSACK is still true, the controller is initialising */
 1344:     if (!MLY_IDBR_TRUE(sc, MLY_HM_STSACK))
 1345: 	return(0);
 1346:     mly_printf(sc, "controller initialisation started\n");
 1347: 
 1348:     /* spin waiting for initialisation to finish, or for a message to be delivered */
 1349:     while (MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) {
 1350: 	/* check for a message */
 1351: 	if (MLY_ERROR_VALID(sc)) {
 1352: 	    error = MLY_GET_REG(sc, sc->mly_error_status) & ~MLY_MSG_EMPTY;
 1353: 	    param0 = MLY_GET_REG(sc, sc->mly_command_mailbox);
 1354: 	    param1 = MLY_GET_REG(sc, sc->mly_command_mailbox + 1);
 1355: 
 1356: 	    switch(error) {
 1357: 	    case MLY_MSG_SPINUP:
 1358: 		if (!spinup) {
 1359: 		    mly_printf(sc, "drive spinup in progress\n");
 1360: 		    spinup = 1;			/* only print this once (should print drive being spun?) */
 1361: 		}
 1362: 		break;
 1363: 	    case MLY_MSG_RACE_RECOVERY_FAIL:
 1364: 		mly_printf(sc, "mirror race recovery failed, one or more drives offline\n");
 1365: 		break;
 1366: 	    case MLY_MSG_RACE_IN_PROGRESS:
 1367: 		mly_printf(sc, "mirror race recovery in progress\n");
 1368: 		break;
 1369: 	    case MLY_MSG_RACE_ON_CRITICAL:
 1370: 		mly_printf(sc, "mirror race recovery on a critical drive\n");
 1371: 		break;
 1372: 	    case MLY_MSG_PARITY_ERROR:
 1373: 		mly_printf(sc, "FATAL MEMORY PARITY ERROR\n");
 1374: 		return(ENXIO);
 1375: 	    default:
 1376: 		mly_printf(sc, "unknown initialisation code 0x%x\n", error);
 1377: 	    }
 1378: 	}
 1379:     }
 1380:     return(0);
 1381: }
 1382: 
 1383: /********************************************************************************
 1384:  ********************************************************************************
 1385:                                                         Debugging and Diagnostics
 1386:  ********************************************************************************
 1387:  ********************************************************************************/
 1388: 
 1389: /********************************************************************************
 1390:  * Print some information about the controller.
 1391:  */
 1392: static void
 1393: mly_describe_controller(struct mly_softc *sc)
 1394: {
 1395:     struct mly_ioctl_getcontrollerinfo	*mi = sc->mly_controllerinfo;
 1396: 
 1397:     mly_printf(sc, "%16s, %d channel%s, firmware %d.%02d-%d-%02d (%02d%02d%02d%02d), %dMB RAM\n", 
 1398: 	       mi->controller_name, mi->physical_channels_present, (mi->physical_channels_present) > 1 ? "s" : "",
 1399: 	       mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build,	/* XXX turn encoding? */
 1400: 	       mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day,
 1401: 	       mi->memory_size);
 1402: 
 1403:     if (bootverbose) {
 1404: 	mly_printf(sc, "%s %s (%x), %dMHz %d-bit %.16s\n", 
 1405: 		   mly_describe_code(mly_table_oemname, mi->oem_information), 
 1406: 		   mly_describe_code(mly_table_controllertype, mi->controller_type), mi->controller_type,
 1407: 		   mi->interface_speed, mi->interface_width, mi->interface_name);
 1408: 	mly_printf(sc, "%dMB %dMHz %d-bit %s%s%s, cache %dMB\n",
 1409: 		   mi->memory_size, mi->memory_speed, mi->memory_width, 
 1410: 		   mly_describe_code(mly_table_memorytype, mi->memory_type),
 1411: 		   mi->memory_parity ? "+parity": "",mi->memory_ecc ? "+ECC": "",
 1412: 		   mi->cache_size);
 1413: 	mly_printf(sc, "CPU: %s @ %dMHZ\n", 
 1414: 		   mly_describe_code(mly_table_cputype, mi->cpu[0].type), mi->cpu[0].speed);
 1415: 	if (mi->l2cache_size != 0)
 1416: 	    mly_printf(sc, "%dKB L2 cache\n", mi->l2cache_size);
 1417: 	if (mi->exmemory_size != 0)
 1418: 	    mly_printf(sc, "%dMB %dMHz %d-bit private %s%s%s\n",
 1419: 		       mi->exmemory_size, mi->exmemory_speed, mi->exmemory_width,
 1420: 		       mly_describe_code(mly_table_memorytype, mi->exmemory_type),
 1421: 		       mi->exmemory_parity ? "+parity": "",mi->exmemory_ecc ? "+ECC": "");
 1422: 	mly_printf(sc, "battery backup %s\n", mi->bbu_present ? "present" : "not installed");
 1423: 	mly_printf(sc, "maximum data transfer %d blocks, maximum sg entries/command %d\n",
 1424: 		   mi->maximum_block_count, mi->maximum_sg_entries);
 1425: 	mly_printf(sc, "logical devices present/critical/offline %d/%d/%d\n",
 1426: 		   mi->logical_devices_present, mi->logical_devices_critical, mi->logical_devices_offline);
 1427: 	mly_printf(sc, "physical devices present %d\n",
 1428: 		   mi->physical_devices_present);
 1429: 	mly_printf(sc, "physical disks present/offline %d/%d\n",
 1430: 		   mi->physical_disks_present, mi->physical_disks_offline);
 1431: 	mly_printf(sc, "%d physical channel%s, %d virtual channel%s of %d possible\n",
 1432: 		   mi->physical_channels_present, mi->physical_channels_present == 1 ? "" : "s",
 1433: 		   mi->virtual_channels_present, mi->virtual_channels_present == 1 ? "" : "s",
 1434: 		   mi->virtual_channels_possible);
 1435: 	mly_printf(sc, "%d parallel commands supported\n", mi->maximum_parallel_commands);
 1436: 	mly_printf(sc, "%dMB flash ROM, %d of %d maximum cycles\n",
 1437: 		   mi->flash_size, mi->flash_age, mi->flash_maximum_age);
 1438:     }
 1439: }
 1440: 
 1441: #ifdef MLY_DEBUG
 1442: /********************************************************************************
 1443:  * Print some controller state
 1444:  */
 1445: static void
 1446: mly_printstate(struct mly_softc *sc)
 1447: {
 1448:     mly_printf(sc, "IDBR %02x  ODBR %02x  ERROR %02x  (%x %x %x)\n",
 1449: 		  MLY_GET_REG(sc, sc->mly_idbr),
 1450: 		  MLY_GET_REG(sc, sc->mly_odbr),
 1451: 		  MLY_GET_REG(sc, sc->mly_error_status),
 1452: 		  sc->mly_idbr,
 1453: 		  sc->mly_odbr,
 1454: 		  sc->mly_error_status);
 1455:     mly_printf(sc, "IMASK %02x  ISTATUS %02x\n",
 1456: 		  MLY_GET_REG(sc, sc->mly_interrupt_mask),
 1457: 		  MLY_GET_REG(sc, sc->mly_interrupt_status));
 1458:     mly_printf(sc, "COMMAND %02x %02x %02x %02x %02x %02x %02x %02x\n",
 1459: 		  MLY_GET_REG(sc, sc->mly_command_mailbox),
 1460: 		  MLY_GET_REG(sc, sc->mly_command_mailbox + 1),
 1461: 		  MLY_GET_REG(sc, sc->mly_command_mailbox + 2),
 1462: 		  MLY_GET_REG(sc, sc->mly_command_mailbox + 3),
 1463: 		  MLY_GET_REG(sc, sc->mly_command_mailbox + 4),
 1464: 		  MLY_GET_REG(sc, sc->mly_command_mailbox + 5),
 1465: 		  MLY_GET_REG(sc, sc->mly_command_mailbox + 6),
 1466: 		  MLY_GET_REG(sc, sc->mly_command_mailbox + 7));
 1467:     mly_printf(sc, "STATUS  %02x %02x %02x %02x %02x %02x %02x %02x\n",
 1468: 		  MLY_GET_REG(sc, sc->mly_status_mailbox),
 1469: 		  MLY_GET_REG(sc, sc->mly_status_mailbox + 1),
 1470: 		  MLY_GET_REG(sc, sc->mly_status_mailbox + 2),
 1471: 		  MLY_GET_REG(sc, sc->mly_status_mailbox + 3),
 1472: 		  MLY_GET_REG(sc, sc->mly_status_mailbox + 4),
 1473: 		  MLY_GET_REG(sc, sc->mly_status_mailbox + 5),
 1474: 		  MLY_GET_REG(sc, sc->mly_status_mailbox + 6),
 1475: 		  MLY_GET_REG(sc, sc->mly_status_mailbox + 7));
 1476:     mly_printf(sc, "        %04x        %08x\n",
 1477: 		  MLY_GET_REG2(sc, sc->mly_status_mailbox),
 1478: 		  MLY_GET_REG4(sc, sc->mly_status_mailbox + 4));
 1479: }
 1480: 
 1481: struct mly_softc	*mly_softc0 = NULL;
 1482: void
 1483: mly_printstate0(void)
 1484: {
 1485:     if (mly_softc0 != NULL)
 1486: 	mly_printstate(mly_softc0);
 1487: }
 1488: 
 1489: /********************************************************************************
 1490:  * Print a command
 1491:  */
 1492: static void
 1493: mly_print_command(struct mly_command *mc)
 1494: {
 1495:     struct mly_softc	*sc = mc->mc_sc;
 1496:     
 1497:     mly_printf(sc, "COMMAND @ %p\n", mc);
 1498:     mly_printf(sc, "  slot      %d\n", mc->mc_slot);
 1499:     mly_printf(sc, "  status    0x%x\n", mc->mc_status);
 1500:     mly_printf(sc, "  sense len %d\n", mc->mc_sense);
 1501:     mly_printf(sc, "  resid     %d\n", mc->mc_resid);
 1502:     mly_printf(sc, "  packet    %p/0x%llx\n", mc->mc_packet, mc->mc_packetphys);
 1503:     if (mc->mc_packet != NULL)
 1504: 	mly_print_packet(mc);
 1505:     mly_printf(sc, "  data      %p/%d\n", mc->mc_data, mc->mc_length);
 1506:     mly_printf(sc, "  flags     %b\n", mc->mc_flags, "\20\1busy\2complete\3slotted\4mapped\5datain\6dataout\n");
 1507:     mly_printf(sc, "  complete  %p\n", mc->mc_complete);
 1508:     mly_printf(sc, "  private   %p\n", mc->mc_private);
 1509: }
 1510: 
 1511: /********************************************************************************
 1512:  * Print a command packet
 1513:  */
 1514: static void
 1515: mly_print_packet(struct mly_command *mc)
 1516: {
 1517:     struct mly_softc			*sc = mc->mc_sc;
 1518:     struct mly_command_generic		*ge = (struct mly_command_generic *)mc->mc_packet;
 1519:     struct mly_command_scsi_small	*ss = (struct mly_command_scsi_small *)mc->mc_packet;
 1520:     struct mly_command_scsi_large	*sl = (struct mly_command_scsi_large *)mc->mc_packet;
 1521:     struct mly_command_ioctl		*io = (struct mly_command_ioctl *)mc->mc_packet;
 1522:     int					transfer;
 1523: 
 1524:     mly_printf(sc, "   command_id           %d\n", ge->command_id);
 1525:     mly_printf(sc, "   opcode               %d\n", ge->opcode);
 1526:     mly_printf(sc, "   command_control      fua %d  dpo %d  est %d  dd %s  nas %d ddis %d\n",
 1527: 		  ge->command_control.force_unit_access,
 1528: 		  ge->command_control.disable_page_out,
 1529: 		  ge->command_control.extended_sg_table,
 1530: 		  (ge->command_control.data_direction == MLY_CCB_WRITE) ? "WRITE" : "READ",
 1531: 		  ge->command_control.no_auto_sense,
 1532: 		  ge->command_control.disable_disconnect);
 1533:     mly_printf(sc, "   data_size            %d\n", ge->data_size);
 1534:     mly_printf(sc, "   sense_buffer_address 0x%llx\n", ge->sense_buffer_address);
 1535:     mly_printf(sc, "   lun                  %d\n", ge->addr.phys.lun);
 1536:     mly_printf(sc, "   target               %d\n", ge->addr.phys.target);
 1537:     mly_printf(sc, "   channel              %d\n", ge->addr.phys.channel);
 1538:     mly_printf(sc, "   logical device       %d\n", ge->addr.log.logdev);
 1539:     mly_printf(sc, "   controller           %d\n", ge->addr.phys.controller);
 1540:     mly_printf(sc, "   timeout              %d %s\n", 
 1541: 		  ge->timeout.value,
 1542: 		  (ge->timeout.scale == MLY_TIMEOUT_SECONDS) ? "seconds" : 
 1543: 		  ((ge->timeout.scale == MLY_TIMEOUT_MINUTES) ? "minutes" : "hours"));
 1544:     mly_printf(sc, "   maximum_sense_size   %d\n", ge->maximum_sense_size);
 1545:     switch(ge->opcode) {
 1546:     case MDACMD_SCSIPT:
 1547:     case MDACMD_SCSI:
 1548: 	mly_printf(sc, "   cdb length           %d\n", ss->cdb_length);
 1549: 	mly_printf(sc, "   cdb                  %*D\n", ss->cdb_length, ss->cdb, " ");
 1550: 	transfer = 1;
 1551: 	break;
 1552:     case MDACMD_SCSILC:
 1553:     case MDACMD_SCSILCPT:
 1554: 	mly_printf(sc, "   cdb length           %d\n", sl->cdb_length);
 1555: 	mly_printf(sc, "   cdb                  0x%llx\n", sl->cdb_physaddr);
 1556: 	transfer = 1;
 1557: 	break;
 1558:     case MDACMD_IOCTL:
 1559: 	mly_printf(sc, "   sub_ioctl            0x%x\n", io->sub_ioctl);
 1560: 	switch(io->sub_ioctl) {
 1561: 	case MDACIOCTL_SETMEMORYMAILBOX:
 1562: 	    mly_printf(sc, "   health_buffer_size   %d\n", 
 1563: 			  io->param.setmemorymailbox.health_buffer_size);
 1564: 	    mly_printf(sc, "   health_buffer_phys   0x%llx\n",
 1565: 			  io->param.setmemorymailbox.health_buffer_physaddr);
 1566: 	    mly_printf(sc, "   command_mailbox      0x%llx\n",
 1567: 			  io->param.setmemorymailbox.command_mailbox_physaddr);
 1568: 	    mly_printf(sc, "   status_mailbox       0x%llx\n",
 1569: 			  io->param.setmemorymailbox.status_mailbox_physaddr);
 1570: 	    transfer = 0;
 1571: 	    break;
 1572: 
 1573: 	case MDACIOCTL_SETREALTIMECLOCK:
 1574: 	case MDACIOCTL_GETHEALTHSTATUS:
 1575: 	case MDACIOCTL_GETCONTROLLERINFO:
 1576: 	case MDACIOCTL_GETLOGDEVINFOVALID:
 1577: 	case MDACIOCTL_GETPHYSDEVINFOVALID:
 1578: 	case MDACIOCTL_GETPHYSDEVSTATISTICS:
 1579: 	case MDACIOCTL_GETLOGDEVSTATISTICS:
 1580: 	case MDACIOCTL_GETCONTROLLERSTATISTICS:
 1581: 	case MDACIOCTL_GETBDT_FOR_SYSDRIVE:	    
 1582: 	case MDACIOCTL_CREATENEWCONF:
 1583: 	case MDACIOCTL_ADDNEWCONF:
 1584: 	case MDACIOCTL_GETDEVCONFINFO:
 1585: 	case MDACIOCTL_GETFREESPACELIST:
 1586: 	case MDACIOCTL_MORE:
 1587: 	case MDACIOCTL_SETPHYSDEVPARAMETER:
 1588: 	case MDACIOCTL_GETPHYSDEVPARAMETER:
 1589: 	case MDACIOCTL_GETLOGDEVPARAMETER:
 1590: 	case MDACIOCTL_SETLOGDEVPARAMETER:
 1591: 	    mly_printf(sc, "   param                %10D\n", io->param.data.param, " ");
 1592: 	    transfer = 1;
 1593: 	    break;
 1594: 
 1595: 	case MDACIOCTL_GETEVENT:
 1596: 	    mly_printf(sc, "   event                %d\n", 
 1597: 		       io->param.getevent.sequence_number_low + ((u_int32_t)io->addr.log.logdev << 16));
 1598: 	    transfer = 1;
 1599: 	    break;
 1600: 
 1601: 	case MDACIOCTL_SETRAIDDEVSTATE:
 1602: 	    mly_printf(sc, "   state                %d\n", io->param.setraiddevstate.state);
 1603: 	    transfer = 0;
 1604: 	    break;
 1605: 
 1606: 	case MDACIOCTL_XLATEPHYSDEVTORAIDDEV:
 1607: 	    mly_printf(sc, "   raid_device          %d\n", io->param.xlatephysdevtoraiddev.raid_device);
 1608: 	    mly_printf(sc, "   controller           %d\n", io->param.xlatephysdevtoraiddev.controller);
 1609: 	    mly_printf(sc, "   channel              %d\n", io->param.xlatephysdevtoraiddev.channel);
 1610: 	    mly_printf(sc, "   target               %d\n", io->param.xlatephysdevtoraiddev.target);
 1611: 	    mly_printf(sc, "   lun                  %d\n", io->param.xlatephysdevtoraiddev.lun);
 1612: 	    transfer = 0;
 1613: 	    break;
 1614: 
 1615: 	case MDACIOCTL_GETGROUPCONFINFO:
 1616: 	    mly_printf(sc, "   group                %d\n", io->param.getgroupconfinfo.group);
 1617: 	    transfer = 1;
 1618: 	    break;
 1619: 
 1620: 	case MDACIOCTL_GET_SUBSYSTEM_DATA:
 1621: 	case MDACIOCTL_SET_SUBSYSTEM_DATA:
 1622: 	case MDACIOCTL_STARTDISOCVERY:
 1623: 	case MDACIOCTL_INITPHYSDEVSTART:
 1624: 	case MDACIOCTL_INITPHYSDEVSTOP:
 1625: 	case MDACIOCTL_INITRAIDDEVSTART:
 1626: 	case MDACIOCTL_INITRAIDDEVSTOP:
 1627: 	case MDACIOCTL_REBUILDRAIDDEVSTART:
 1628: 	case MDACIOCTL_REBUILDRAIDDEVSTOP:
 1629: 	case MDACIOCTL_MAKECONSISTENTDATASTART:
 1630: 	case MDACIOCTL_MAKECONSISTENTDATASTOP:
 1631: 	case MDACIOCTL_CONSISTENCYCHECKSTART:
 1632: 	case MDACIOCTL_CONSISTENCYCHECKSTOP:
 1633: 	case MDACIOCTL_RESETDEVICE:
 1634: 	case MDACIOCTL_FLUSHDEVICEDATA:
 1635: 	case MDACIOCTL_PAUSEDEVICE:
 1636: 	case MDACIOCTL_UNPAUSEDEVICE:
 1637: 	case MDACIOCTL_LOCATEDEVICE:
 1638: 	case MDACIOCTL_SETMASTERSLAVEMODE:
 1639: 	case MDACIOCTL_DELETERAIDDEV:
 1640: 	case MDACIOCTL_REPLACEINTERNALDEV:
 1641: 	case MDACIOCTL_CLEARCONF:
 1642: 	case MDACIOCTL_GETCONTROLLERPARAMETER:
 1643: 	case MDACIOCTL_SETCONTRLLERPARAMETER:
 1644: 	case MDACIOCTL_CLEARCONFSUSPMODE:
 1645: 	case MDACIOCTL_STOREIMAGE:
 1646: 	case MDACIOCTL_READIMAGE:
 1647: 	case MDACIOCTL_FLASHIMAGES:
 1648: 	case MDACIOCTL_RENAMERAIDDEV:
 1649: 	default:			/* no idea what to print */
 1650: 	    transfer = 0;
 1651: 	    break;
 1652: 	}
 1653: 	break;
 1654: 
 1655:     case MDACMD_IOCTLCHECK:
 1656:     case MDACMD_MEMCOPY:
 1657:     default:
 1658: 	transfer = 0;
 1659: 	break;	/* print nothing */
 1660:     }
 1661:     if (transfer) {
 1662: 	if (ge->command_control.extended_sg_table) {
 1663: 	    mly_printf(sc, "   sg table             0x%llx/%d\n",
 1664: 			  ge->transfer.indirect.table_physaddr[0], ge->transfer.indirect.entries[0]);
 1665: 	} else {
 1666: 	    mly_printf(sc, "   0000                 0x%llx/%lld\n",
 1667: 			  ge->transfer.direct.sg[0].physaddr, ge->transfer.direct.sg[0].length);
 1668: 	    mly_printf(sc, "   0001                 0x%llx/%lld\n",
 1669: 			  ge->transfer.direct.sg[1].physaddr, ge->transfer.direct.sg[1].length);
 1670: 	}
 1671:     }
 1672: }
 1673: 
 1674: /********************************************************************************
 1675:  * Panic in a slightly informative fashion
 1676:  */
 1677: static void
 1678: mly_panic(struct mly_softc *sc, char *reason)
 1679: {
 1680:     mly_printstate(sc);
 1681:     panic(reason);
 1682: }
 1683: #endif
 1684: 
 1685: /********************************************************************************
 1686:  * Print queue statistics, callable from DDB.
 1687:  */
 1688: void
 1689: mly_print_controller(int controller)
 1690: {
 1691:     struct mly_softc	*sc;
 1692:     
 1693:     if ((sc = devclass_get_softc(devclass_find("mly"), controller)) == NULL) {
 1694: 	printf("mly: controller %d invalid\n", controller);
 1695:     } else {
 1696: 	device_printf(sc->mly_dev, "queue    curr max\n");
 1697: 	device_printf(sc->mly_dev, "free     %04d/%04d\n", 
 1698: 		      sc->mly_qstat[MLYQ_FREE].q_length, sc->mly_qstat[MLYQ_FREE].q_max);
 1699: 	device_printf(sc->mly_dev, "ready    %04d/%04d\n", 
 1700: 		      sc->mly_qstat[MLYQ_READY].q_length, sc->mly_qstat[MLYQ_READY].q_max);
 1701: 	device_printf(sc->mly_dev, "busy     %04d/%04d\n", 
 1702: 		      sc->mly_qstat[MLYQ_BUSY].q_length, sc->mly_qstat[MLYQ_BUSY].q_max);
 1703: 	device_printf(sc->mly_dev, "complete %04d/%04d\n", 
 1704: 		      sc->mly_qstat[MLYQ_COMPLETE].q_length, sc->mly_qstat[MLYQ_COMPLETE].q_max);
 1705:     }
 1706: }
 1707: 
 1708: 
 1709: /********************************************************************************
 1710:  ********************************************************************************
 1711:                                                          Control device interface
 1712:  ********************************************************************************
 1713:  ********************************************************************************/
 1714: 
 1715: /********************************************************************************
 1716:  * Accept an open operation on the control device.
 1717:  */
 1718: static int
 1719: mly_user_open(dev_t dev, int flags, int fmt, d_thread_t *td)
 1720: {
 1721:     int			unit = minor(dev);
 1722:     struct mly_softc	*sc = devclass_get_softc(devclass_find("mly"), unit);
 1723: 
 1724:     sc->mly_state |= MLY_STATE_OPEN;
 1725:     return(0);
 1726: }
 1727: 
 1728: /********************************************************************************
 1729:  * Accept the last close on the control device.
 1730:  */
 1731: static int
 1732: mly_user_close(dev_t dev, int flags, int fmt, d_thread_t *td)
 1733: {
 1734:     int			unit = minor(dev);
 1735:     struct mly_softc	*sc = devclass_get_softc(devclass_find("mly"), unit);
 1736: 
 1737:     sc->mly_state &= ~MLY_STATE_OPEN;
 1738:     return (0);
 1739: }
 1740: 
 1741: /********************************************************************************
 1742:  * Handle controller-specific control operations.
 1743:  */
 1744: static int
 1745: mly_user_ioctl(dev_t dev, u_long cmd, caddr_t addr, int32_t flag, d_thread_t *td)
 1746: {
 1747:     struct mly_softc		*sc = (struct mly_softc *)dev->si_drv1;
 1748:     struct mly_user_command	*uc = (struct mly_user_command *)addr;
 1749:     struct mly_user_health	*uh = (struct mly_user_health *)addr;
 1750:     
 1751:     switch(cmd) {
 1752:     case MLYIO_COMMAND:
 1753: 	return(mly_user_command(sc, uc));
 1754:     case MLYIO_HEALTH:
 1755: 	return(mly_user_health(sc, uh));
 1756:     default:
 1757: 	return(ENOIOCTL);
 1758:     }
 1759: }
 1760: 
 1761: /********************************************************************************
 1762:  * Execute a command passed in from userspace.
 1763:  *
 1764:  * The control structure contains the actual command for the controller, as well
 1765:  * as the user-space data pointer and data size, and an optional sense buffer
 1766:  * size/pointer.  On completion, the data size is adjusted to the command
 1767:  * residual, and the sense buffer size to the size of the returned sense data.
 1768:  * 
 1769:  */
 1770: static int
 1771: mly_user_command(struct mly_softc *sc, struct mly_user_command *uc)
 1772: {
 1773:     struct mly_command			*mc;
 1774:     int					error, s;
 1775: 
 1776:     /* allocate a command */
 1777:     if (mly_alloc_command(sc, &mc)) {
 1778: 	error = ENOMEM;
 1779: 	goto out;		/* XXX Linux version will wait for a command */
 1780:     }
 1781: 
 1782:     /* handle data size/direction */
 1783:     mc->mc_length = (uc->DataTransferLength >= 0) ? uc->DataTransferLength : -uc->DataTransferLength;
 1784:     if (mc->mc_length > 0) {
 1785: 	if ((mc->mc_data = malloc(mc->mc_length, M_DEVBUF, M_NOWAIT)) == NULL) {
 1786: 	    error = ENOMEM;
 1787: 	    goto out;
 1788: 	}
 1789:     }
 1790:     if (uc->DataTransferLength > 0) {
 1791: 	mc->mc_flags |= MLY_CMD_DATAIN;
 1792: 	bzero(mc->mc_data, mc->mc_length);
 1793:     }
 1794:     if (uc->DataTransferLength < 0) {
 1795: 	mc->mc_flags |= MLY_CMD_DATAOUT;
 1796: 	if ((error = copyin(uc->DataTransferBuffer, mc->mc_data, mc->mc_length)) != 0)
 1797: 	    goto out;
 1798:     }
 1799: 
 1800:     /* copy the controller command */
 1801:     bcopy(&uc->CommandMailbox, mc->mc_packet, sizeof(uc->CommandMailbox));
 1802: 
 1803:     /* clear command completion handler so that we get woken up */
 1804:     mc->mc_complete = NULL;
 1805: 
 1806:     /* execute the command */
 1807:     s = splcam();
 1808:     mly_requeue_ready(mc);
 1809:     mly_startio(sc);
 1810:     while (!(mc->mc_flags & MLY_CMD_COMPLETE))
 1811: 	tsleep(mc, 0, "mlyioctl", 0);
 1812:     splx(s);
 1813: 
 1814:     /* return the data to userspace */
 1815:     if (uc->DataTransferLength > 0)
 1816: 	if ((error = copyout(mc->mc_data, uc->DataTransferBuffer, mc->mc_length)) != 0)
 1817: 	    goto out;
 1818:     
 1819:     /* return the sense buffer to userspace */
 1820:     if ((uc->RequestSenseLength > 0) && (mc->mc_sense > 0)) {
 1821: 	if ((error = copyout(mc->mc_packet, uc->RequestSenseBuffer, 
 1822: 			     min(uc->RequestSenseLength, mc->mc_sense))) != 0)
 1823: 	    goto out;
 1824:     }
 1825:     
 1826:     /* return command results to userspace (caller will copy out) */
 1827:     uc->DataTransferLength = mc->mc_resid;
 1828:     uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense);
 1829:     uc->CommandStatus = mc->mc_status;
 1830:     error = 0;
 1831: 
 1832:  out:
 1833:     if (mc->mc_data != NULL)
 1834: 	free(mc->mc_data, M_DEVBUF);
 1835:     if (mc != NULL)
 1836: 	mly_release_command(mc);
 1837:     return(error);
 1838: }
 1839: 
 1840: /********************************************************************************
 1841:  * Return health status to userspace.  If the health change index in the user
 1842:  * structure does not match that currently exported by the controller, we
 1843:  * return the current status immediately.  Otherwise, we block until either
 1844:  * interrupted or new status is delivered.
 1845:  */
 1846: static int
 1847: mly_user_health(struct mly_softc *sc, struct mly_user_health *uh)
 1848: {
 1849:     struct mly_health_status		mh;
 1850:     int					error, s;
 1851:     
 1852:     /* fetch the current health status from userspace */
 1853:     if ((error = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh))) != 0)
 1854: 	return(error);
 1855: 
 1856:     /* spin waiting for a status update */
 1857:     s = splcam();
 1858:     error = EWOULDBLOCK;
 1859:     while ((error != 0) && (sc->mly_event_change == mh.change_counter))
 1860: 	error = tsleep(&sc->mly_event_change, PCATCH, "mlyhealth", 0);
 1861:     splx(s);
 1862:     
 1863:     /* copy the controller's health status buffer out (there is a race here if it changes again) */
 1864:     error = copyout(&sc->mly_mmbox->mmm_health.status, uh->HealthStatusBuffer, 
 1865: 		    sizeof(uh->HealthStatusBuffer));
 1866:     return(error);
 1867: }