Commit 5ed26a4f authored by Steven Cole's avatar Steven Cole Committed by David Mosberger

[PATCH] ia64: spelling fixes

parent 39d4a447
...@@ -122,7 +122,7 @@ ...@@ -122,7 +122,7 @@
#endif #endif
/* /*
** The number of pdir entries to "free" before issueing ** The number of pdir entries to "free" before issuing
** a read to PCOM register to flush out PCOM writes. ** a read to PCOM register to flush out PCOM writes.
** Interacts with allocation granularity (ie 4 or 8 entries ** Interacts with allocation granularity (ie 4 or 8 entries
** allocated and free'd/purged at a time might make this ** allocated and free'd/purged at a time might make this
......
...@@ -505,7 +505,7 @@ simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs) ...@@ -505,7 +505,7 @@ simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs)
} }
/* /*
* very simple loop because we get interrupts only when receving * very simple loop because we get interrupts only when receiving
*/ */
while (simeth_rx(dev)); while (simeth_rx(dev));
} }
......
...@@ -103,7 +103,7 @@ ia32_exception (struct pt_regs *regs, unsigned long isr) ...@@ -103,7 +103,7 @@ ia32_exception (struct pt_regs *regs, unsigned long isr)
* C1 reg you need in case of a stack fault, 0x040 is the stack * C1 reg you need in case of a stack fault, 0x040 is the stack
* fault bit. We should only be taking one exception at a time, * fault bit. We should only be taking one exception at a time,
* so if this combination doesn't produce any single exception, * so if this combination doesn't produce any single exception,
* then we have a bad program that isn't syncronizing its FPU usage * then we have a bad program that isn't synchronizing its FPU usage
* and it will suffer the consequences since we won't be able to * and it will suffer the consequences since we won't be able to
* fully reproduce the context of the exception * fully reproduce the context of the exception
*/ */
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
* Linux has a controller-independent x86 interrupt architecture. * Linux has a controller-independent x86 interrupt architecture.
* every controller has a 'controller-template', that is used * every controller has a 'controller-template', that is used
* by the main code to do the right thing. Each driver-visible * by the main code to do the right thing. Each driver-visible
* interrupt source is transparently wired to the apropriate * interrupt source is transparently wired to the appropriate
* controller. Thus drivers need not be aware of the * controller. Thus drivers need not be aware of the
* interrupt-controller. * interrupt-controller.
* *
...@@ -705,7 +705,7 @@ unsigned int probe_irq_mask(unsigned long val) ...@@ -705,7 +705,7 @@ unsigned int probe_irq_mask(unsigned long val)
* The interrupt probe logic state is returned to its previous * The interrupt probe logic state is returned to its previous
* value. * value.
* *
* BUGS: When used in a module (which arguably shouldnt happen) * BUGS: When used in a module (which arguably shouldn't happen)
* nothing prevents two IRQ probe callers from overlapping. The * nothing prevents two IRQ probe callers from overlapping. The
* results of this are non-optimal. * results of this are non-optimal.
*/ */
......
...@@ -1138,7 +1138,7 @@ ia64_mca_cpe_int_caller(void *dummy) ...@@ -1138,7 +1138,7 @@ ia64_mca_cpe_int_caller(void *dummy)
* ia64_mca_cpe_poll * ia64_mca_cpe_poll
* *
* Poll for Corrected Platform Errors (CPEs), dynamically adjust * Poll for Corrected Platform Errors (CPEs), dynamically adjust
* polling interval based on occurance of an event. * polling interval based on occurrence of an event.
* *
* Inputs : dummy(unused) * Inputs : dummy(unused)
* Outputs : None * Outputs : None
......
...@@ -116,7 +116,7 @@ ia64_sal_init (struct ia64_sal_systab *systab) ...@@ -116,7 +116,7 @@ ia64_sal_init (struct ia64_sal_systab *systab)
p = (char *) (systab + 1); p = (char *) (systab + 1);
for (i = 0; i < systab->entry_count; i++) { for (i = 0; i < systab->entry_count; i++) {
/* /*
* The first byte of each entry type contains the type desciptor. * The first byte of each entry type contains the type descriptor.
*/ */
switch (*p) { switch (*p) {
case SAL_DESC_ENTRY_POINT: case SAL_DESC_ENTRY_POINT:
......
...@@ -221,7 +221,7 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -221,7 +221,7 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
do { do {
/* /*
* If we're too close to the next clock tick for comfort, we increase the * If we're too close to the next clock tick for comfort, we increase the
* saftey margin by intentionally dropping the next tick(s). We do NOT update * safety margin by intentionally dropping the next tick(s). We do NOT update
* itm.next because that would force us to call do_timer() which in turn would * itm.next because that would force us to call do_timer() which in turn would
* let our clock run too fast (with the potentially devastating effect of * let our clock run too fast (with the potentially devastating effect of
* losing monotony of time). * losing monotony of time).
......
...@@ -789,7 +789,7 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs) ...@@ -789,7 +789,7 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
* *
* ldX.a (advanced load): * ldX.a (advanced load):
* - suppose ldX.a r1=[r3]. If we get to the unaligned trap it's because the * - suppose ldX.a r1=[r3]. If we get to the unaligned trap it's because the
* address doesn't match requested size alignement. This means that we would * address doesn't match requested size alignment. This means that we would
* possibly need more than one load to get the result. * possibly need more than one load to get the result.
* *
* The load part can be handled just like a normal load, however the difficult * The load part can be handled just like a normal load, however the difficult
......
...@@ -316,7 +316,7 @@ GLOBAL_ENTRY(__copy_user) ...@@ -316,7 +316,7 @@ GLOBAL_ENTRY(__copy_user)
// Beginning of long mempcy (i.e. > 16 bytes) // Beginning of long mempcy (i.e. > 16 bytes)
// //
.long_copy_user: .long_copy_user:
tbit.nz p6,p7=src1,0 // odd alignement tbit.nz p6,p7=src1,0 // odd alignment
and tmp=7,tmp and tmp=7,tmp
;; ;;
cmp.eq p10,p8=r0,tmp cmp.eq p10,p8=r0,tmp
......
...@@ -137,7 +137,7 @@ GLOBAL_ENTRY(do_csum) ...@@ -137,7 +137,7 @@ GLOBAL_ENTRY(do_csum)
mov saved_pr=pr // preserve predicates (rotation) mov saved_pr=pr // preserve predicates (rotation)
(p6) br.ret.spnt.many rp // return if zero or negative length (p6) br.ret.spnt.many rp // return if zero or negative length
mov hmask=-1 // intialize head mask mov hmask=-1 // initialize head mask
tbit.nz p15,p0=buf,0 // is buf an odd address? tbit.nz p15,p0=buf,0 // is buf an odd address?
and first1=-8,buf // 8-byte align down address of first1 element and first1=-8,buf // 8-byte align down address of first1 element
......
...@@ -239,7 +239,7 @@ unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction) ...@@ -239,7 +239,7 @@ unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
for (i = index + nslots - 1; i >= index; i--) for (i = index + nslots - 1; i >= index; i--)
io_tlb_list[i] = ++count; io_tlb_list[i] = ++count;
/* /*
* Step 2: merge the returned slots with the preceeding slots, if * Step 2: merge the returned slots with the preceding slots, if
* available (non zero) * available (non zero)
*/ */
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) &&
...@@ -399,7 +399,7 @@ swiotlb_sync_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, in ...@@ -399,7 +399,7 @@ swiotlb_sync_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, in
/* /*
* Map a set of buffers described by scatterlist in streaming mode for DMA. This is the * Map a set of buffers described by scatterlist in streaming mode for DMA. This is the
* scather-gather version of the above swiotlb_map_single interface. Here the scatter * scatter-gather version of the above swiotlb_map_single interface. Here the scatter
* gather list elements are each tagged with the appropriate dma address and length. They * gather list elements are each tagged with the appropriate dma address and length. They
* are obtained via sg_dma_{address,length}(SG). * are obtained via sg_dma_{address,length}(SG).
* *
......
...@@ -226,7 +226,7 @@ enum { ...@@ -226,7 +226,7 @@ enum {
/* Encodings for machine check parameter types */ /* Encodings for machine check parameter types */
enum { enum {
SAL_MC_PARAM_RENDEZ_INT = 1, /* Rendezevous interrupt */ SAL_MC_PARAM_RENDEZ_INT = 1, /* Rendezvous interrupt */
SAL_MC_PARAM_RENDEZ_WAKEUP = 2, /* Wakeup */ SAL_MC_PARAM_RENDEZ_WAKEUP = 2, /* Wakeup */
SAL_MC_PARAM_CPE_INT = 3 /* Corrected Platform Error Int */ SAL_MC_PARAM_CPE_INT = 3 /* Corrected Platform Error Int */
}; };
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* addresses. Thus, we need to be careful not to let the user to * addresses. Thus, we need to be careful not to let the user to
* trick us into accessing kernel memory that would normally be * trick us into accessing kernel memory that would normally be
* inaccessible. This code is also fairly performance sensitive, * inaccessible. This code is also fairly performance sensitive,
* so we want to spend as little time doing saftey checks as * so we want to spend as little time doing safety checks as
* possible. * possible.
* *
* To make matters a bit more interesting, these macros sometimes also * To make matters a bit more interesting, these macros sometimes also
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment