File:  [DragonFly] / src / sys / i386 / i386 / Attic / mplock.s
Revision 1.10: download - view: text, annotated - select for diffs
Thu Sep 25 23:49:03 2003 UTC (11 years, 2 months ago) by dillon
Branches: MAIN
CVS tags: HEAD
Fix a number of mp_lock issues.  I had outsmarted myself trying to deal with
td->td_mpcount / mp_lock races.  The new rule is: you first modify
td->td_mpcount, then you deal with mp_lock assuming that an interrupt might
have already dealt with it for you, and various other pieces of code
deal with the race if an interrupt occurs in the middle of the above two
data accesses.

    1: /*
    2:  * $FreeBSD: src/sys/i386/i386/mplock.s,v 1.29.2.2 2000/05/16 06:58:06 dillon Exp $
    3:  * $DragonFly: src/sys/i386/i386/mplock.s,v 1.10 2003/09/25 23:49:03 dillon Exp $
    4:  *
    5:  * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
    6:  * All rights reserved.
    7:  *
    8:  * Redistribution and use in source and binary forms, with or without
    9:  * modification, are permitted provided that the following conditions
   10:  * are met:
   11:  * 1. Redistributions of source code must retain the above copyright
   12:  *    notice, this list of conditions and the following disclaimer.
   13:  * 2. Redistributions in binary form must reproduce the above copyright
   14:  *    notice, this list of conditions and the following disclaimer in the
   15:  *    documentation and/or other materials provided with the distribution.
   16:  *
   17:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27:  * SUCH DAMAGE.
   28:  *
   29:  *				DragonFly MPLOCK operation
   30:  *
   31:  * Each thread as an MP lock count, td_mpcount, and there is a shared
   32:  * global called mp_lock.  mp_lock is the physical MP lock and contains either
   33:  * -1 or the cpuid of the cpu owning the lock.  The count is *NOT* integrated
   34:  * into mp_lock but instead resides in each thread td_mpcount.
   35:  *
   36:  * When obtaining or releasing the MP lock the td_mpcount is PREDISPOSED
   37:  * to the desired count *PRIOR* to operating on the mp_lock itself.  MP
   38:  * lock operations can occur outside a critical section with interrupts
   39:  * enabled with the provisio (which the routines below handle) that an
   40:  * interrupt may come along and preempt us, racing our cmpxchgl instruction
   41:  * to perform the operation we have requested by pre-dispoing td_mpcount.
   42:  *
   43:  * Additionally, the LWKT threading system manages the MP lock and
   44:  * lwkt_switch(), in particular, may be called after pre-dispoing td_mpcount
   45:  * to handle 'blocking' on the MP lock.
   46:  *
   47:  *
   48:  * Recoded from the FreeBSD original:
   49:  * ----------------------------------------------------------------------------
   50:  * "THE BEER-WARE LICENSE" (Revision 42):
   51:  * <phk@FreeBSD.org> wrote this file.  As long as you retain this notice you
   52:  * can do whatever you want with this stuff. If we meet some day, and you think
   53:  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
   54:  * ----------------------------------------------------------------------------
   55:  */
   56: 
   57: #include <machine/asmacros.h>
   58: #include <machine/smptests.h>		/** GRAB_LOPRIO */
   59: #include <machine/apic.h>
   60: 
   61: #include "assym.s"
   62: 
   63: /*
   64:  * YYY Debugging only.  Define this to be paranoid about invalidating the
   65:  * TLB when we get giant.
   66:  */
   67: #undef PARANOID_INVLTLB
   68: 
   69: 	.data
   70: 	ALIGN_DATA
   71: #ifdef SMP
   72: 	.globl	mp_lock
   73: mp_lock:
   74: 	.long	-1			/* initialized to not held */
   75: #endif
   76: 
   77: 	.text
   78: 	SUPERALIGN_TEXT
   79: 
   80: 	/*
   81: 	 * Note on cmpxchgl... exchanges ecx with mem if mem matches eax.
   82: 	 * Z=1 (jz) on success.   A lock prefix is required for MP.
   83: 	 */
   84: NON_GPROF_ENTRY(cpu_get_initial_mplock)
   85: 	movl	PCPU(curthread),%ecx
   86: 	movl	$1,TD_MPCOUNT(%ecx)	/* curthread has mpcount of 1 */
   87: 	movl	$0,mp_lock		/* owned by cpu 0 */
   88: 	NON_GPROF_RET
   89: 
   90: 	/*
   91: 	 * cpu_try_mplock() returns non-zero on success, 0 on failure.  It
   92: 	 * only adjusts mp_lock, it does not touch td_mpcount.  Callers
   93: 	 * should always increment td_mpcount *before* trying to acquire
   94: 	 * the actual lock, predisposing td_mpcount to the desired state of
   95: 	 * the lock.
   96: 	 *
   97: 	 * NOTE! Only call cpu_try_mplock() inside a critical section.  If
   98: 	 * you don't an interrupt can come along and get and release
   99: 	 * the lock before our cmpxchgl instruction, causing us to fail 
  100: 	 * but resulting in the lock being held by our cpu.
  101: 	 */
  102: NON_GPROF_ENTRY(cpu_try_mplock)
  103: 	movl	PCPU(cpuid),%ecx
  104: 	movl	$-1,%eax
  105: 	lock cmpxchgl %ecx,mp_lock	/* ecx<->mem if eax matches */
  106: 	jnz	1f
  107: #ifdef PARANOID_INVLTLB
  108: 	movl	%cr3,%eax; movl %eax,%cr3	/* YYY check and remove */
  109: #endif
  110: 	movl	$1,%eax
  111: 	NON_GPROF_RET
  112: 1:
  113: 	subl	%eax,%eax
  114: 	NON_GPROF_RET
  115: 
  116: 	/*
  117: 	 * get_mplock() Obtains the MP lock and may switch away if it cannot
  118: 	 * get it.  This routine may be called WITHOUT a critical section
  119: 	 * and with cpu interrupts enabled.
  120: 	 *
  121: 	 * To handle races in a sane fashion we predispose TD_MPCOUNT,
  122: 	 * which prevents us from losing the lock in a race if we already
  123: 	 * have it or happen to get it.  It also means that we might get
  124: 	 * the lock in an interrupt race before we have a chance to execute
  125: 	 * our cmpxchgl instruction, so we have to handle that case.
  126: 	 * Fortunately simply calling lwkt_switch() handles the situation
  127: 	 * for us and also 'blocks' us until the MP lock can be obtained.
  128: 	 */
  129: NON_GPROF_ENTRY(get_mplock)
  130: 	movl	PCPU(cpuid),%ecx
  131: 	movl	PCPU(curthread),%edx
  132: 	incl	TD_MPCOUNT(%edx)	/* predispose */
  133: 	cmpl	%ecx,mp_lock
  134: 	jne	1f
  135: 	NON_GPROF_RET			/* success! */
  136: 
  137: 	/*
  138: 	 * We don't already own the mp_lock, use cmpxchgl to try to get
  139: 	 * it.
  140: 	 */
  141: 1:
  142: 	movl	$-1,%eax
  143: 	lock cmpxchgl %ecx,mp_lock
  144: 	jnz	2f
  145: #ifdef PARANOID_INVLTLB
  146: 	movl	%cr3,%eax; movl %eax,%cr3 /* YYY check and remove */
  147: #endif
  148: 	NON_GPROF_RET			/* success */
  149: 
  150: 	/*
  151: 	 * Failure, but we could end up owning mp_lock anyway due to
  152: 	 * an interrupt race.  lwkt_switch() will clean up the mess
  153: 	 * and 'block' until the mp_lock is obtained.
  154: 	 */
  155: 2:
  156: 	call	lwkt_switch
  157: #ifdef INVARIANTS
  158: 	movl	PCPU(cpuid),%eax	/* failure */
  159: 	cmpl	%eax,mp_lock
  160: 	jne	4f
  161: #endif
  162: 	NON_GPROF_RET
  163: #ifdef INVARIANTS
  164: 4:
  165: 	cmpl	$0,panicstr		/* don't double panic */
  166: 	je	badmp_get2
  167: 	NON_GPROF_RET
  168: #endif
  169: 
  170: 	/*
  171: 	 * try_mplock() attempts to obtain the MP lock.  1 is returned on
  172: 	 * success, 0 on failure.  We do not have to be in a critical section
  173: 	 * and interrupts are almost certainly enabled.
  174: 	 *
  175: 	 * We must pre-dispose TD_MPCOUNT in order to deal with races in
  176: 	 * a reasonable way.
  177: 	 *
  178: 	 */
  179: NON_GPROF_ENTRY(try_mplock)
  180: 	movl	PCPU(cpuid),%ecx
  181: 	movl	PCPU(curthread),%edx
  182: 	incl	TD_MPCOUNT(%edx)		/* pre-dispose for race */
  183: 	cmpl	%ecx,mp_lock
  184: 	je	1f				/* trivial success */
  185: 	movl	$-1,%eax
  186: 	lock cmpxchgl %ecx,mp_lock
  187: 	jnz	2f
  188: 	/*
  189: 	 * Success
  190: 	 */
  191: #ifdef PARANOID_INVLTLB
  192: 	movl	%cr3,%eax; movl %eax,%cr3	/* YYY check and remove */
  193: #endif
  194: 1:
  195: 	movl	$1,%eax				/* success (cmpxchgl good!) */
  196: 	NON_GPROF_RET
  197: 
  198: 	/*
  199: 	 * The cmpxchgl failed but we might have raced.  Undo the mess by
  200: 	 * predispoing TD_MPCOUNT and then checking.  If TD_MPCOUNT is
  201: 	 * still non-zero we don't care what state the lock is in (since
  202: 	 * we obviously didn't own it above), just return failure even if
  203: 	 * we won the lock in an interrupt race.  If TD_MPCOUNT is zero
  204: 	 * make sure we don't own the lock in case we did win it in a race.
  205: 	 */
  206: 2:
  207: 	decl	TD_MPCOUNT(%edx)
  208: 	cmpl	$0,TD_MPCOUNT(%edx)
  209: 	jne	3f
  210: 	movl	PCPU(cpuid),%eax
  211: 	movl	$-1,%ecx
  212: 	lock cmpxchgl %ecx,mp_lock
  213: 3:
  214: 	subl	%eax,%eax
  215: 	NON_GPROF_RET
  216: 	
  217: 	/*
  218: 	 * rel_mplock() releases a previously obtained MP lock.
  219: 	 *
  220: 	 * In order to release the MP lock we pre-dispose TD_MPCOUNT for
  221: 	 * the release and basically repeat the release portion of try_mplock
  222: 	 * above.
  223: 	 */
  224: NON_GPROF_ENTRY(rel_mplock)
  225: 	movl	PCPU(curthread),%edx
  226: 	movl	TD_MPCOUNT(%edx),%eax
  227: #ifdef INVARIANTS
  228: 	cmpl	$0,%eax
  229: 	je	badmp_rel
  230: #endif
  231: 	subl	$1,%eax
  232: 	movl	%eax,TD_MPCOUNT(%edx)
  233: 	cmpl	$0,%eax
  234: 	jne	3f
  235: 	movl	PCPU(cpuid),%eax
  236: 	movl	$-1,%ecx
  237: 	lock cmpxchgl %ecx,mp_lock
  238: 3:
  239: 	NON_GPROF_RET
  240: 
  241: #ifdef INVARIANTS
  242: 
  243: badmp_get:
  244: 	pushl	$bmpsw1
  245: 	call	panic
  246: badmp_get2:
  247: 	pushl	$bmpsw1a
  248: 	call	panic
  249: badmp_rel:
  250: 	pushl	$bmpsw2
  251: 	call	panic
  252: 
  253: 	.data
  254: 
  255: bmpsw1:
  256: 	.asciz	"try/get_mplock(): already have lock! %d %p"
  257: 
  258: bmpsw1a:
  259: 	.asciz	"try/get_mplock(): failed on count or switch %d %p"
  260: 
  261: bmpsw2:
  262: 	.asciz	"rel_mplock(): mpcount already 0 @ %p %p %p %p %p %p %p %p!"
  263: 
  264: #endif
  265: 
  266: #if 0
  267: /* after 1st acquire of lock we grab all hardware INTs */
  268: #ifdef GRAB_LOPRIO
  269: #define GRAB_HWI	movl	$ALLHWI_LEVEL, lapic_tpr
  270: 
  271: /* after last release of lock give up LOW PRIO (ie, arbitrate INTerrupts) */
  272: #define ARB_HWI		movl	$LOPRIO_LEVEL, lapic_tpr /* CHEAP_TPR */
  273: #endif
  274: #endif
  275: