locks.h 2.53 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *	SMP locks primitives for building ix86 locks
 *	(not yet used).
 *
 *		Alan Cox, alan@redhat.com, 1995
 */
 
/*
 *	This would be much easier but far less clear and easy
 *	to borrow for other processors if it was just assembler.
 */

Linus Torvalds's avatar
Linus Torvalds committed
13
static __inline__ void prim_spin_lock(struct spinlock *sp)
Linus Torvalds's avatar
Linus Torvalds committed
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
{
	int processor=smp_processor_id();
	
	/*
	 *	Grab the lock bit
	 */
	 
	while(lock_set_bit(0,&sp->lock))
	{
		/*
		 *	Failed, but that's cos we own it!
		 */
		 
		if(sp->cpu==processor)
		{
			sp->users++;
			return 0;
		}
		/*
		 *	Spin in the cache S state if possible
		 */
		while(sp->lock)
		{
			/*
			 *	Wait for any invalidates to go off
			 */
			 
			if(smp_invalidate_needed&(1<<processor))
				while(lock_clear_bit(processor,&smp_invalidate_needed))
					local_flush_tlb();
			sp->spins++;
		}
		/*
		 *	Someone wrote the line, we go 'I' and get
		 *	the cache entry. Now try to regrab
		 */
	}
	sp->users++;sp->cpu=processor;
	return 1;
}

/*
 *	Release a spin lock
 */
 
Linus Torvalds's avatar
Linus Torvalds committed
59
static __inline__ int prim_spin_unlock(struct spinlock *sp)
Linus Torvalds's avatar
Linus Torvalds committed
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
{
	/* This is safe. The decrement is still guarded by the lock. A multilock would
	   not be safe this way */
	if(!--sp->users)
	{
		sp->cpu= NO_PROC_ID;lock_clear_bit(0,&sp->lock);
		return 1;
	}
	return 0;
}


/*
 *	Non blocking lock grab
 */
 
Linus Torvalds's avatar
Linus Torvalds committed
76
static __inline__ int prim_spin_lock_nb(struct spinlock *sp)
Linus Torvalds's avatar
Linus Torvalds committed
77 78 79 80 81 82 83 84 85 86 87 88
{
	if(lock_set_bit(0,&sp->lock))
		return 0;		/* Locked already */
	sp->users++;
	return 1;			/* We got the lock */
}


/*
 *	These wrap the locking primitives up for usage
 */
 
Linus Torvalds's avatar
Linus Torvalds committed
89
static __inline__ void spinlock(struct spinlock *sp)
Linus Torvalds's avatar
Linus Torvalds committed
90 91 92 93 94 95 96 97 98 99 100 101 102
{
	if(sp->priority<current->lock_order)
		panic("lock order violation: %s (%d)\n", sp->name, current->lock_order);
	if(prim_spin_lock(sp))
	{
		/*
		 *	We got a new lock. Update the priority chain
		 */
		sp->oldpri=current->lock_order;
		current->lock_order=sp->priority;
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
103
static __inline__ void spinunlock(struct spinlock *sp)
Linus Torvalds's avatar
Linus Torvalds committed
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
{
	int pri;
	if(current->lock_order!=sp->priority)
		panic("lock release order violation %s (%d)\n", sp->name, current->lock_order);
	pri=sp->oldpri;
	if(prim_spin_unlock(sp))
	{
		/*
		 *	Update the debugging lock priority chain. We dumped
		 *	our last right to the lock.
		 */
		current->lock_order=sp->pri;
	}	
}

Linus Torvalds's avatar
Linus Torvalds committed
119
static __inline__ void spintestlock(struct spinlock *sp)
Linus Torvalds's avatar
Linus Torvalds committed
120 121 122 123 124 125 126 127
{
	/*
	 *	We do no sanity checks, it's legal to optimistically
	 *	get a lower lock.
	 */
	prim_spin_lock_nb(sp);
}

Linus Torvalds's avatar
Linus Torvalds committed
128
static __inline__ void spintestunlock(struct spinlock *sp)
Linus Torvalds's avatar
Linus Torvalds committed
129 130 131 132 133 134 135
{
	/*
	 *	A testlock doesn't update the lock chain so we
	 *	must not update it on free
	 */
	prim_spin_unlock(sp);
}