Commit 027dfac6 authored by Michael Ellerman's avatar Michael Ellerman

powerpc: Various typo fixes

Signed-off-by: default avatarAndrea Gelmini <andrea.gelmini@gelma.net>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent e289086f
...@@ -2,7 +2,7 @@ IBM OPAL real-time clock ...@@ -2,7 +2,7 @@ IBM OPAL real-time clock
------------------------ ------------------------
Required properties: Required properties:
- comapatible: Should be "ibm,opal-rtc" - compatible: Should be "ibm,opal-rtc"
Optional properties: Optional properties:
- wakeup-source: Decides if the wakeup is supported or not - wakeup-source: Decides if the wakeup is supported or not
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#define rLN r7 /* length of data to be processed */ #define rLN r7 /* length of data to be processed */
#define rIP r8 /* potiner to IV (CBC/CTR/XTS modes) */ #define rIP r8 /* potiner to IV (CBC/CTR/XTS modes) */
#define rKT r9 /* pointer to tweak key (XTS mode) */ #define rKT r9 /* pointer to tweak key (XTS mode) */
#define rT0 r11 /* pointers to en-/decrpytion tables */ #define rT0 r11 /* pointers to en-/decryption tables */
#define rT1 r10 #define rT1 r10
#define rD0 r9 /* data */ #define rD0 r9 /* data */
#define rD1 r14 #define rD1 r14
......
...@@ -434,7 +434,7 @@ extern void slb_set_size(u16 size); ...@@ -434,7 +434,7 @@ extern void slb_set_size(u16 size);
* function. Used in slb_allocate() and do_stab_bolted. The function * function. Used in slb_allocate() and do_stab_bolted. The function
* computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
* *
* rt = register continaing the proto-VSID and into which the * rt = register containing the proto-VSID and into which the
* VSID will be stored * VSID will be stored
* rx = scratch register (clobbered) * rx = scratch register (clobbered)
* *
......
...@@ -57,7 +57,7 @@ struct pci_dn; ...@@ -57,7 +57,7 @@ struct pci_dn;
/* /*
* The struct is used to trace PE related EEH functionality. * The struct is used to trace PE related EEH functionality.
* In theory, there will have one instance of the struct to * In theory, there will have one instance of the struct to
* be created against particular PE. In nature, PEs corelate * be created against particular PE. In nature, PEs correlate
* to each other. the struct has to reflect that hierarchy in * to each other. the struct has to reflect that hierarchy in
* order to easily pick up those affected PEs when one particular * order to easily pick up those affected PEs when one particular
* PE has EEH errors. * PE has EEH errors.
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
* - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR
* *
* Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional
* TLB2 storage attibute fields. Those are: * TLB2 storage attribute fields. Those are:
* *
* TLB2: * TLB2:
* 0...10 11 12 13 14 15 16...31 * 0...10 11 12 13 14 15 16...31
......
...@@ -802,7 +802,7 @@ struct opal_sg_entry { ...@@ -802,7 +802,7 @@ struct opal_sg_entry {
}; };
/* /*
* Candiate image SG list. * Candidate image SG list.
* *
* length = VER | length * length = VER | length
*/ */
...@@ -852,7 +852,7 @@ struct opal_i2c_request { ...@@ -852,7 +852,7 @@ struct opal_i2c_request {
* with individual elements being 16 bits wide to fetch the system * with individual elements being 16 bits wide to fetch the system
* wide EPOW status. Each element in the buffer will contain the * wide EPOW status. Each element in the buffer will contain the
* EPOW status in it's bit representation for a particular EPOW sub * EPOW status in it's bit representation for a particular EPOW sub
* class as defiend here. So multiple detailed EPOW status bits * class as defined here. So multiple detailed EPOW status bits
* specific for any sub class can be represented in a single buffer * specific for any sub class can be represented in a single buffer
* element as it's bit representation. * element as it's bit representation.
*/ */
......
...@@ -210,7 +210,7 @@ static inline long pmac_call_feature(int selector, struct device_node* node, ...@@ -210,7 +210,7 @@ static inline long pmac_call_feature(int selector, struct device_node* node,
/* PMAC_FTR_SOUND_CHIP_ENABLE (struct device_node* node, 0, int value) /* PMAC_FTR_SOUND_CHIP_ENABLE (struct device_node* node, 0, int value)
* enable/disable the sound chip, whatever it is and provided it can * enable/disable the sound chip, whatever it is and provided it can
* acually be controlled * actually be controlled
*/ */
#define PMAC_FTR_SOUND_CHIP_ENABLE PMAC_FTR_DEF(9) #define PMAC_FTR_SOUND_CHIP_ENABLE PMAC_FTR_DEF(9)
......
...@@ -224,7 +224,7 @@ struct thread_struct { ...@@ -224,7 +224,7 @@ struct thread_struct {
unsigned int align_ctl; /* alignment handling control */ unsigned int align_ctl; /* alignment handling control */
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
unsigned long start_tb; /* Start purr when proc switched in */ unsigned long start_tb; /* Start purr when proc switched in */
unsigned long accum_tb; /* Total accumilated purr for process */ unsigned long accum_tb; /* Total accumulated purr for process */
#ifdef CONFIG_HAVE_HW_BREAKPOINT #ifdef CONFIG_HAVE_HW_BREAKPOINT
struct perf_event *ptrace_bps[HBP_NUM]; struct perf_event *ptrace_bps[HBP_NUM];
/* /*
......
...@@ -104,7 +104,7 @@ ...@@ -104,7 +104,7 @@
#define PS3AV_CMD_AV_INPUTLEN_16 0x02 #define PS3AV_CMD_AV_INPUTLEN_16 0x02
#define PS3AV_CMD_AV_INPUTLEN_20 0x0a #define PS3AV_CMD_AV_INPUTLEN_20 0x0a
#define PS3AV_CMD_AV_INPUTLEN_24 0x0b #define PS3AV_CMD_AV_INPUTLEN_24 0x0b
/* alayout */ /* av_layout */
#define PS3AV_CMD_AV_LAYOUT_32 (1 << 0) #define PS3AV_CMD_AV_LAYOUT_32 (1 << 0)
#define PS3AV_CMD_AV_LAYOUT_44 (1 << 1) #define PS3AV_CMD_AV_LAYOUT_44 (1 << 1)
#define PS3AV_CMD_AV_LAYOUT_48 (1 << 2) #define PS3AV_CMD_AV_LAYOUT_48 (1 << 2)
......
...@@ -96,7 +96,7 @@ static inline bool pte_user(pte_t pte) ...@@ -96,7 +96,7 @@ static inline bool pte_user(pte_t pte)
#define PTE_RPN_SHIFT (PAGE_SHIFT) #define PTE_RPN_SHIFT (PAGE_SHIFT)
#endif #endif
/* The mask convered by the RPN must be a ULL on 32-bit platforms with /* The mask covered by the RPN must be a ULL on 32-bit platforms with
* 64-bit PTEs * 64-bit PTEs
*/ */
#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) #if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
......
...@@ -185,7 +185,7 @@ ...@@ -185,7 +185,7 @@
* x = processor mask * x = processor mask
* y = op. point index * y = op. point index
* z = processor freq. step index * z = processor freq. step index
* I haven't yet decyphered result codes * I haven't yet deciphered result codes
* *
*/ */
#define SMU_CMD_POWER_COMMAND 0xaa #define SMU_CMD_POWER_COMMAND 0xaa
......
...@@ -77,7 +77,7 @@ ...@@ -77,7 +77,7 @@
* nodes if your board uses the Broadcom PHYs * nodes if your board uses the Broadcom PHYs
*/ */
#define TSI108_PHY_MV88E 0 /* Marvel 88Exxxx PHY */ #define TSI108_PHY_MV88E 0 /* Marvel 88Exxxx PHY */
#define TSI108_PHY_BCM54XX 1 /* Broardcom BCM54xx PHY */ #define TSI108_PHY_BCM54XX 1 /* Broadcom BCM54xx PHY */
/* Global variables */ /* Global variables */
......
...@@ -156,7 +156,7 @@ setup_7410_workarounds: ...@@ -156,7 +156,7 @@ setup_7410_workarounds:
blr blr
/* 740/750/7400/7410 /* 740/750/7400/7410
* Enable Store Gathering (SGE), Address Brodcast (ABE), * Enable Store Gathering (SGE), Address Broadcast (ABE),
* Branch History Table (BHTE), Branch Target ICache (BTIC) * Branch History Table (BHTE), Branch Target ICache (BTIC)
* Dynamic Power Management (DPM), Speculative (SPD) * Dynamic Power Management (DPM), Speculative (SPD)
* Clear Instruction cache throttling (ICTC) * Clear Instruction cache throttling (ICTC)
......
...@@ -139,7 +139,7 @@ static void eeh_enable_irq(struct pci_dev *dev) ...@@ -139,7 +139,7 @@ static void eeh_enable_irq(struct pci_dev *dev)
* into it. * into it.
* *
* That's just wrong.The warning in the core code is * That's just wrong.The warning in the core code is
* there to tell people to fix their assymetries in * there to tell people to fix their asymmetries in
* their own code, not by abusing the core information * their own code, not by abusing the core information
* to avoid it. * to avoid it.
* *
......
...@@ -453,7 +453,7 @@ exc_##n##_bad_stack: \ ...@@ -453,7 +453,7 @@ exc_##n##_bad_stack: \
sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \ sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \
b bad_stack_book3e; /* bad stack error */ b bad_stack_book3e; /* bad stack error */
/* WARNING: If you change the layout of this stub, make sure you chcek /* WARNING: If you change the layout of this stub, make sure you check
* the debug exception handler which handles single stepping * the debug exception handler which handles single stepping
* into exceptions from userspace, and the MM code in * into exceptions from userspace, and the MM code in
* arch/powerpc/mm/tlb_nohash.c which patches the branch here * arch/powerpc/mm/tlb_nohash.c which patches the branch here
......
...@@ -82,7 +82,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus) ...@@ -82,7 +82,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
/* If this is not a PHB, we only flush the hash table over /* If this is not a PHB, we only flush the hash table over
* the area mapped by this bridge. We don't play with the PTE * the area mapped by this bridge. We don't play with the PTE
* mappings since we might have to deal with sub-page alignemnts * mappings since we might have to deal with sub-page alignments
* so flushing the hash table is the only sane way to make sure * so flushing the hash table is the only sane way to make sure
* that no hash entries are covering that removed bridge area * that no hash entries are covering that removed bridge area
* while still allowing other busses overlapping those pages * while still allowing other busses overlapping those pages
......
...@@ -802,7 +802,7 @@ static void tm_reclaim_thread(struct thread_struct *thr, ...@@ -802,7 +802,7 @@ static void tm_reclaim_thread(struct thread_struct *thr,
* this state. * this state.
* We do this using the current MSR, rather tracking it in * We do this using the current MSR, rather tracking it in
* some specific thread_struct bit, as it has the additional * some specific thread_struct bit, as it has the additional
* benifit of checking for a potential TM bad thing exception. * benefit of checking for a potential TM bad thing exception.
*/ */
if (!MSR_TM_SUSPENDED(mfmsr())) if (!MSR_TM_SUSPENDED(mfmsr()))
return; return;
......
...@@ -698,7 +698,7 @@ static void check_location(struct seq_file *m, const char *c) ...@@ -698,7 +698,7 @@ static void check_location(struct seq_file *m, const char *c)
/* /*
* Format: * Format:
* ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ] * ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ]
* the '.' may be an abbrevation * the '.' may be an abbreviation
*/ */
static void check_location_string(struct seq_file *m, const char *c) static void check_location_string(struct seq_file *m, const char *c)
{ {
......
...@@ -325,7 +325,7 @@ void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks, ...@@ -325,7 +325,7 @@ void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
} }
EXPORT_SYMBOL_GPL(rh_init); EXPORT_SYMBOL_GPL(rh_init);
/* Attach a free memory region, coalesces regions if adjuscent */ /* Attach a free memory region, coalesces regions if adjacent */
int rh_attach_region(rh_info_t * info, unsigned long start, int size) int rh_attach_region(rh_info_t * info, unsigned long start, int size)
{ {
rh_block_t *blk; rh_block_t *blk;
......
...@@ -55,7 +55,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) ...@@ -55,7 +55,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
* We need 14 to 65 bits of va for a tlibe of 4K page * We need 14 to 65 bits of va for a tlibe of 4K page
* With vpn we ignore the lower VPN_SHIFT bits already. * With vpn we ignore the lower VPN_SHIFT bits already.
* And top two bits are already ignored because we can * And top two bits are already ignored because we can
* only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
* of 12. * of 12.
*/ */
va = vpn << VPN_SHIFT; va = vpn << VPN_SHIFT;
...@@ -605,7 +605,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, ...@@ -605,7 +605,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
* crashdump and all bets are off anyway. * crashdump and all bets are off anyway.
* *
* TODO: add batching support when enabled. remember, no dynamic memory here, * TODO: add batching support when enabled. remember, no dynamic memory here,
* athough there is the control page available... * although there is the control page available...
*/ */
static void native_hpte_clear(void) static void native_hpte_clear(void)
{ {
......
...@@ -51,7 +51,7 @@ static void spu_buff_add(unsigned long int value, int spu) ...@@ -51,7 +51,7 @@ static void spu_buff_add(unsigned long int value, int spu)
* That way we can tell the difference between the * That way we can tell the difference between the
* buffer being full versus empty. * buffer being full versus empty.
* *
* ASSUPTION: the buffer_lock is held when this function * ASSUMPTION: the buffer_lock is held when this function
* is called to lock the buffer, head and tail. * is called to lock the buffer, head and tail.
*/ */
int full = 1; int full = 1;
......
...@@ -992,7 +992,7 @@ static u64 check_and_compute_delta(u64 prev, u64 val) ...@@ -992,7 +992,7 @@ static u64 check_and_compute_delta(u64 prev, u64 val)
* than the previous value it will cause the delta and the counter to * than the previous value it will cause the delta and the counter to
* have bogus values unless we rolled a counter over. If a coutner is * have bogus values unless we rolled a counter over. If a coutner is
* rolled back, it will be smaller, but within 256, which is the maximum * rolled back, it will be smaller, but within 256, which is the maximum
* number of events to rollback at once. If we dectect a rollback * number of events to rollback at once. If we detect a rollback
* return 0. This can lead to a small lack of precision in the * return 0. This can lead to a small lack of precision in the
* counters. * counters.
*/ */
......
...@@ -1298,7 +1298,7 @@ static void h_24x7_event_read(struct perf_event *event) ...@@ -1298,7 +1298,7 @@ static void h_24x7_event_read(struct perf_event *event)
__this_cpu_write(hv_24x7_txn_err, ret); __this_cpu_write(hv_24x7_txn_err, ret);
} else { } else {
/* /*
* Assoicate the event with the HCALL request index, * Associate the event with the HCALL request index,
* so ->commit_txn() can quickly find/update count. * so ->commit_txn() can quickly find/update count.
*/ */
i = request_buffer->num_requests - 1; i = request_buffer->num_requests - 1;
......
...@@ -66,7 +66,7 @@ struct hv_24x7_result_element { ...@@ -66,7 +66,7 @@ struct hv_24x7_result_element {
/* -1 if @performance_domain does not refer to a virtual processor */ /* -1 if @performance_domain does not refer to a virtual processor */
__be32 lpar_cfg_instance_id; __be32 lpar_cfg_instance_id;
/* size = @result_element_data_size of cointaining result. */ /* size = @result_element_data_size of containing result. */
__u64 element_data[1]; __u64 element_data[1];
} __packed; } __packed;
......
...@@ -719,7 +719,7 @@ static void mpc512x_clk_setup_clock_tree(struct device_node *np, int busfreq) ...@@ -719,7 +719,7 @@ static void mpc512x_clk_setup_clock_tree(struct device_node *np, int busfreq)
* most one of a mux, div, and gate each into one 'struct clk' * most one of a mux, div, and gate each into one 'struct clk'
* item * item
* - PSC/MSCAN/SPDIF clock generation OTOH already is very * - PSC/MSCAN/SPDIF clock generation OTOH already is very
* specific and cannot get mapped to componsites (at least not * specific and cannot get mapped to composites (at least not
* a single one, maybe two of them, but then some of these * a single one, maybe two of them, but then some of these
* intermediate clock signals get referenced elsewhere (e.g. * intermediate clock signals get referenced elsewhere (e.g.
* in the clock frequency measurement, CFM) and thus need * in the clock frequency measurement, CFM) and thus need
......
...@@ -178,7 +178,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages, ...@@ -178,7 +178,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
* default for now.*/ * default for now.*/
#ifdef CELL_IOMMU_STRICT_PROTECTION #ifdef CELL_IOMMU_STRICT_PROTECTION
/* to avoid referencing a global, we use a trick here to setup the /* to avoid referencing a global, we use a trick here to setup the
* protection bit. "prot" is setup to be 3 fields of 4 bits apprended * protection bit. "prot" is setup to be 3 fields of 4 bits appended
* together for each of the 3 supported direction values. It is then * together for each of the 3 supported direction values. It is then
* shifted left so that the fields matching the desired direction * shifted left so that the fields matching the desired direction
* lands on the appropriate bits, and other bits are masked out. * lands on the appropriate bits, and other bits are masked out.
...@@ -338,7 +338,7 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, ...@@ -338,7 +338,7 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
start_seg = base >> IO_SEGMENT_SHIFT; start_seg = base >> IO_SEGMENT_SHIFT;
segments = size >> IO_SEGMENT_SHIFT; segments = size >> IO_SEGMENT_SHIFT;
pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift); pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift);
/* PTEs for each segment must start on a 4K bounday */ /* PTEs for each segment must start on a 4K boundary */
pages_per_segment = max(pages_per_segment, pages_per_segment = max(pages_per_segment,
(1 << 12) / sizeof(unsigned long)); (1 << 12) / sizeof(unsigned long));
......
...@@ -217,7 +217,7 @@ static void spider_irq_cascade(struct irq_desc *desc) ...@@ -217,7 +217,7 @@ static void spider_irq_cascade(struct irq_desc *desc)
chip->irq_eoi(&desc->irq_data); chip->irq_eoi(&desc->irq_data);
} }
/* For hooking up the cascace we have a problem. Our device-tree is /* For hooking up the cascade we have a problem. Our device-tree is
* crap and we don't know on which BE iic interrupt we are hooked on at * crap and we don't know on which BE iic interrupt we are hooked on at
* least not the "standard" way. We can reconstitute it based on two * least not the "standard" way. We can reconstitute it based on two
* informations though: which BE node we are connected to and whether * informations though: which BE node we are connected to and whether
......
...@@ -69,7 +69,7 @@ static DEFINE_SPINLOCK(spu_lock); ...@@ -69,7 +69,7 @@ static DEFINE_SPINLOCK(spu_lock);
* spu_full_list_lock and spu_full_list_mutex held, while iterating * spu_full_list_lock and spu_full_list_mutex held, while iterating
* through it requires either of these locks. * through it requires either of these locks.
* *
* In addition spu_full_list_lock protects all assignmens to * In addition spu_full_list_lock protects all assignments to
* spu->mm. * spu->mm.
*/ */
static LIST_HEAD(spu_full_list); static LIST_HEAD(spu_full_list);
...@@ -253,7 +253,7 @@ static inline int __slb_present(struct copro_slb *slbs, int nr_slbs, ...@@ -253,7 +253,7 @@ static inline int __slb_present(struct copro_slb *slbs, int nr_slbs,
* Setup the SPU kernel SLBs, in preparation for a context save/restore. We * Setup the SPU kernel SLBs, in preparation for a context save/restore. We
* need to map both the context save area, and the save/restore code. * need to map both the context save area, and the save/restore code.
* *
* Because the lscsa and code may cross segment boundaires, we check to see * Because the lscsa and code may cross segment boundaries, we check to see
* if mappings are required for the start and end of each range. We currently * if mappings are required for the start and end of each range. We currently
* assume that the mappings are smaller that one segment - if not, something * assume that the mappings are smaller that one segment - if not, something
* is seriously wrong. * is seriously wrong.
......
...@@ -866,7 +866,7 @@ void spufs_wbox_callback(struct spu *spu) ...@@ -866,7 +866,7 @@ void spufs_wbox_callback(struct spu *spu)
* - end of the mapped area * - end of the mapped area
* *
* If the file is opened without O_NONBLOCK, we wait here until * If the file is opened without O_NONBLOCK, we wait here until
* space is availabyl, but return when we have been able to * space is available, but return when we have been able to
* write something. * write something.
*/ */
static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
......
...@@ -435,7 +435,7 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) ...@@ -435,7 +435,7 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
/* Note: we don't need to force_sig SIGTRAP on single-step /* Note: we don't need to force_sig SIGTRAP on single-step
* since we have TIF_SINGLESTEP set, thus the kernel will do * since we have TIF_SINGLESTEP set, thus the kernel will do
* it upon return from the syscall anyawy * it upon return from the syscall anyway.
*/ */
if (unlikely(status & SPU_STATUS_SINGLE_STEP)) if (unlikely(status & SPU_STATUS_SINGLE_STEP))
ret = -ERESTARTSYS; ret = -ERESTARTSYS;
......
...@@ -622,7 +622,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx) ...@@ -622,7 +622,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
/** /**
* find_victim - find a lower priority context to preempt * find_victim - find a lower priority context to preempt
* @ctx: canidate context for running * @ctx: candidate context for running
* *
* Returns the freed physical spu to run the new context on. * Returns the freed physical spu to run the new context on.
*/ */
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* This file thus provides a simple low level unified i2c interface for * This file thus provides a simple low level unified i2c interface for
* powermac that covers the various types of i2c busses used in Apple machines. * powermac that covers the various types of i2c busses used in Apple machines.
* For now, keywest, PMU and SMU, though we could add Cuda, or other bit * For now, keywest, PMU and SMU, though we could add Cuda, or other bit
* banging busses found on older chipstes in earlier machines if we ever need * banging busses found on older chipsets in earlier machines if we ever need
* one of them. * one of them.
* *
* The drivers in this file are synchronous/blocking. In addition, the * The drivers in this file are synchronous/blocking. In addition, the
......
...@@ -197,7 +197,7 @@ static int pnv_ioda2_init_m64(struct pnv_phb *phb) ...@@ -197,7 +197,7 @@ static int pnv_ioda2_init_m64(struct pnv_phb *phb)
/* /*
* Strip off the segment used by the reserved PE, which is * Strip off the segment used by the reserved PE, which is
* expected to be 0 or last one of PE capabicity. * expected to be 0 or last one of PE capability.
*/ */
r = &phb->hose->mem_resources[1]; r = &phb->hose->mem_resources[1];
if (phb->ioda.reserved_pe_idx == 0) if (phb->ioda.reserved_pe_idx == 0)
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* The file intends to implement the platform dependent EEH operations on pseries. * The file intends to implement the platform dependent EEH operations on pseries.
* Actually, the pseries platform is built based on RTAS heavily. That means the * Actually, the pseries platform is built based on RTAS heavily. That means the
* pseries platform dependent EEH operations will be built on RTAS calls. The functions * pseries platform dependent EEH operations will be built on RTAS calls. The functions
* are devired from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has * are derived from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has
* been done. * been done.
* *
* Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011. * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011.
......
...@@ -113,7 +113,7 @@ static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog) ...@@ -113,7 +113,7 @@ static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
* - The owner of an event is determined by combinations of scope, * - The owner of an event is determined by combinations of scope,
* event type, and sub-type. There is no easy way to pre-sort clients * event type, and sub-type. There is no easy way to pre-sort clients
* by scope or event type alone. For example, Torrent ISR route change * by scope or event type alone. For example, Torrent ISR route change
* event is reported with scope 0x00 (Not Applicatable) rather than * event is reported with scope 0x00 (Not Applicable) rather than
* 0x3B (Torrent-hub). It is better to let the clients to identify * 0x3B (Torrent-hub). It is better to let the clients to identify
* who owns the event. * who owns the event.
*/ */
......
...@@ -367,7 +367,7 @@ static void pseries_lpar_idle(void) ...@@ -367,7 +367,7 @@ static void pseries_lpar_idle(void)
{ {
/* /*
* Default handler to go into low thread priority and possibly * Default handler to go into low thread priority and possibly
* low power mode by cedeing processor to hypervisor * low power mode by ceding processor to hypervisor
*/ */
/* Indicate to hypervisor that we are idle. */ /* Indicate to hypervisor that we are idle. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment