2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-18 07:35:12 +08:00

spelling fixes

acquired (aquired)
contiguous (contigious)
successful (succesful, succesfull)
surprise (suprise)
whether (weather)
some other misspellings

Signed-off-by: Andreas Mohr <andi@lisas.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
This commit is contained in:
Andreas Mohr 2006-06-26 18:35:02 +02:00 committed by Adrian Bunk
parent f18190bd34
commit d6e05edc59
57 changed files with 88 additions and 88 deletions

View File

@ -354,7 +354,7 @@ static void __init init_nsc(struct cpuinfo_x86 *c)
* This function only handles the GX processor, and kicks every
* thing else to the Cyrix init function above - that should
* cover any processors that might have been branded differently
* after NSC aquired Cyrix.
* after NSC acquired Cyrix.
*
* If this breaks your GX1 horribly, please e-mail
* info-linux@ldcmail.amd.com to tell us.

View File

@ -175,7 +175,7 @@ static void mask_and_ack_8259A(unsigned int irq)
* Lightweight spurious IRQ detection. We do not want
* to overdo spurious IRQ handling - it's usually a sign
* of hardware problems, so we only do the checks we can
* do without slowing down good hardware unnecesserily.
* do without slowing down good hardware unnecessarily.
*
* Note that IRQ7 and IRQ15 (the two spurious IRQs
* usually resulting from the 8259A-1|2 PICs) occur

View File

@ -59,7 +59,7 @@ void hook_irq_handler(int int_cause, int bit_num, void *isr_ptr)
* bit_num - Indicates which bit number in the cause register
*
* Outputs :
* 1 if succesful, 0 if failure
* 1 if successful, 0 if failure
*/
int enable_galileo_irq(int int_cause, int bit_num)
{
@ -83,7 +83,7 @@ int enable_galileo_irq(int int_cause, int bit_num)
* bit_num - Indicates which bit number in the cause register
*
* Outputs :
* 1 if succesful, 0 if failure
* 1 if successful, 0 if failure
*/
int disable_galileo_irq(int int_cause, int bit_num)
{

View File

@ -2100,7 +2100,7 @@ EXPORT_SYMBOL_GPL(spu_save);
* @spu: pointer to SPU iomem structure.
*
* Perform harvest + restore, as we may not be coming
* from a previous succesful save operation, and the
* from a previous successful save operation, and the
* hardware state is unknown.
*/
int spu_restore(struct spu_state *new, struct spu *spu)

View File

@ -287,7 +287,7 @@ void pci_addr_cache_remove_device(struct pci_dev *dev)
* find the pci device that corresponds to a given address.
* This routine scans all pci busses to build the cache.
* Must be run late in boot process, after the pci controllers
* have been scaned for devices (after all device resources are known).
* have been scanned for devices (after all device resources are known).
*/
void __init pci_addr_cache_build(void)
{

View File

@ -356,7 +356,7 @@ static void internal_add_vtimer(struct vtimer_list *timer)
set_vtimer(event->expires);
spin_unlock_irqrestore(&vt_list->lock, flags);
/* release CPU aquired in prepare_vtimer or mod_virt_timer() */
/* release CPU acquired in prepare_vtimer or mod_virt_timer() */
put_cpu();
}

View File

@ -1222,7 +1222,7 @@ int open_ubd_file(char *file, struct openflags *openflags, int shared,
}
}
/* Succesful return case! */
/* Successful return case! */
if(backing_file_out == NULL)
return(fd);

View File

@ -278,7 +278,7 @@ static void mask_and_ack_8259A(unsigned int irq)
* Lightweight spurious IRQ detection. We do not want
* to overdo spurious IRQ handling - it's usually a sign
* of hardware problems, so we only do the checks we can
* do without slowing down good hardware unnecesserily.
* do without slowing down good hardware unnecessarily.
*
* Note that IRQ7 and IRQ15 (the two spurious IRQs
* usually resulting from the 8259A-1|2 PICs) occur

View File

@ -892,7 +892,7 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
}
/*
* as_can_anticipate indicates weather we should either run arq
* as_can_anticipate indicates whether we should either run arq
* or keep anticipating a better request.
*/
static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)

View File

@ -2745,7 +2745,7 @@ static int attempt_merge(request_queue_t *q, struct request *req,
return 0;
/*
* not contigious
* not contiguous
*/
if (req->sector + req->nr_sectors != next->sector)
return 0;
@ -3415,7 +3415,7 @@ static struct notifier_block blk_cpu_notifier = {
*
* Description:
* Ends all I/O on a request. It does not handle partial completions,
* unless the driver actually implements this in its completionc callback
* unless the driver actually implements this in its completion callback
* through requeueing. Theh actual completion happens out-of-order,
* through a softirq handler. The user must have registered a completion
* callback through blk_queue_softirq_done().

View File

@ -951,7 +951,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
it most likely that the chip will notice it. It also prevents us
from having to wait for completion. On the other hand, we may
need to wait for completion anyway, to see if it completed
succesfully. */
successfully. */
switch (atm_vcc->qos.aal) {
case ATM_AAL2:

View File

@ -118,7 +118,7 @@ static int amd_create_gatt_pages(int nr_tables)
return retval;
}
/* Since we don't need contigious memory we just try
/* Since we don't need contiguous memory we just try
* to get the gatt table once
*/

View File

@ -261,7 +261,7 @@ static int agp_ati_suspend(struct pci_dev *dev, pm_message_t state)
#endif
/*
*Since we don't need contigious memory we just try
*Since we don't need contiguous memory we just try
* to get the gatt table once
*/

View File

@ -177,7 +177,7 @@ static int efficeon_free_gatt_table(struct agp_bridge_data *bridge)
/*
* Since we don't need contigious memory we just try
* Since we don't need contiguous memory we just try
* to get the gatt table once
*/

View File

@ -546,7 +546,7 @@ static void RIOReceive(struct rio_info *p, struct Port *PortP)
** run out of space it will be set to the offset of the
** next byte to copy from the packet data area. The packet
** length field is decremented by the number of bytes that
** we succesfully removed from the packet. When this reaches
** we successfully removed from the packet. When this reaches
** zero, we reset the offset pointer to be zero, and free
** the packet from the front of the queue.
*/

View File

@ -341,7 +341,7 @@ static ssize_t set_fan_min(struct device *dev, const char *buf,
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
least suprise; the user doesn't expect the fan minimum to change just
least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)

View File

@ -358,7 +358,7 @@ static ssize_t show_fan_div(struct device *dev, char *buf, int nr)
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
least suprise; the user doesn't expect the fan minimum to change just
least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)

View File

@ -253,7 +253,7 @@ set_fan(min2, fan_min[1], LM80_REG_FAN_MIN(2), fan_div[1]);
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
least suprise; the user doesn't expect the fan minimum to change just
least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)

View File

@ -421,7 +421,7 @@ static void set_fan_min(struct device *dev, const char *buf, int nr)
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan clock divider. This follows the principle
of least suprise; the user doesn't expect the fan minimum to change just
of least surprise; the user doesn't expect the fan minimum to change just
because the divider changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)

View File

@ -380,7 +380,7 @@ static ssize_t show_fan_div(struct device *dev, char *buf, int nr)
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
least suprise; the user doesn't expect the fan minimum to change just
least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)

View File

@ -207,7 +207,7 @@ static ssize_t set_fan_min(struct device *dev, const char *buf,
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan clock divider. This follows the principle
of least suprise; the user doesn't expect the fan minimum to change just
of least surprise; the user doesn't expect the fan minimum to change just
because the divider changed. */
static ssize_t set_fan_div(struct device *dev, const char *buf,
size_t count, int nr)

View File

@ -781,7 +781,7 @@ show_fan_div_reg(struct device *dev, char *buf, int nr)
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
least suprise; the user doesn't expect the fan minimum to change just
least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t
store_fan_div_reg(struct device *dev, const char *buf, size_t count, int nr)

View File

@ -630,7 +630,7 @@ show_fan_div_reg(struct device *dev, char *buf, int nr)
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
least suprise; the user doesn't expect the fan minimum to change just
least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t
store_fan_div_reg(struct device *dev, const char *buf, size_t count, int nr)

View File

@ -463,7 +463,7 @@ show_fan_div(struct device *dev, struct device_attribute *attr,
/* Note: we save and restore the fan minimum here, because its value is
determined in part by the fan divisor. This follows the principle of
least suprise; the user doesn't expect the fan minimum to change just
least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
static ssize_t
store_fan_div(struct device *dev, struct device_attribute *attr,

View File

@ -37,7 +37,7 @@
* Version 1.15 convert all calls to ide_raw_taskfile
* since args will return register content.
* Version 1.16 added suspend-resume-checkpower
* Version 1.17 do flush on standy, do flush on ATA < ATA6
* Version 1.17 do flush on standby, do flush on ATA < ATA6
* fix wcache setup.
*/

View File

@ -1665,7 +1665,7 @@ irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs)
* Initialize a request before we fill it in and send it down to
* ide_do_drive_cmd. Commands must be set up by this function. Right
* now it doesn't do a lot, but if that changes abusers will have a
* nasty suprise.
* nasty surprise.
*/
void ide_init_drive_cmd (struct request *rq)

View File

@ -103,7 +103,7 @@ static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
* driver specific parts, enable the controller and make it available
* to the general subsystem using hpsb_add_host().
*
* Return Value: a pointer to the &hpsb_host if succesful, %NULL if
* Return Value: a pointer to the &hpsb_host if successful, %NULL if
* no memory was available.
*/
static DEFINE_MUTEX(host_num_alloc);

View File

@ -139,7 +139,7 @@ int hpsb_bus_reset(struct hpsb_host *host);
/*
* Hand over received selfid packet to the core. Complement check (second
* quadlet is complement of first) is expected to be done and succesful.
* quadlet is complement of first) is expected to be done and successful.
*/
void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid);

View File

@ -592,7 +592,7 @@ static int put_address(char *st, u_char *p, int len)
} /* put_address */
/*************************************/
/* report a succesfull interrogation */
/* report a successful interrogation */
/*************************************/
static int interrogate_success(isdn_ctrl *ic, struct call_struc *cs)
{ char *src = ic->parm.dss1_io.data;

View File

@ -4848,7 +4848,7 @@ static void picolo_tetra_muxsel (struct bttv* btv, unsigned int input)
*
* The IVC120G security card has 4 i2c controlled TDA8540 matrix
* swichers to provide 16 channels to MUX0. The TDA8540's have
* 4 indepedant outputs and as such the IVC120G also has the
* 4 independent outputs and as such the IVC120G also has the
* optional "Monitor Out" bus. This allows the card to be looking
* at one input while the monitor is looking at another.
*

View File

@ -508,11 +508,11 @@ static int el_start_xmit(struct sk_buff *skb, struct net_device *dev)
* speak of. We simply pull the packet out of its PIO buffer (which is slow)
* and queue it for the kernel. Then we reset the card for the next packet.
*
* We sometimes get suprise interrupts late both because the SMP IRQ delivery
* We sometimes get surprise interrupts late both because the SMP IRQ delivery
* is message passing and because the card sometimes seems to deliver late. I
* think if it is part way through a receive and the mode is changed it carries
* on receiving and sends us an interrupt. We have to band aid all these cases
* to get a sensible 150kbytes/second performance. Even then you want a small
* to get a sensible 150kBytes/second performance. Even then you want a small
* TCP window.
*/

View File

@ -386,7 +386,7 @@ static int __irport_change_speed(struct irda_task *task)
/* Locking notes : this function may be called from irq context with
* spinlock, via irport_write_wakeup(), or from non-interrupt without
* spinlock (from the task timer). Yuck !
* This is ugly, and unsafe is the spinlock is not already aquired.
* This is ugly, and unsafe is the spinlock is not already acquired.
* This will be fixed when irda-task get rewritten.
* Jean II */
if (!spin_is_locked(&self->lock)) {

View File

@ -1883,7 +1883,7 @@ static void smc_reset(struct net_device *dev)
/* Set the Window 1 control, configuration and station addr registers.
No point in writing the I/O base register ;-> */
SMC_SELECT_BANK(1);
/* Automatically release succesfully transmitted packets,
/* Automatically release successfully transmitted packets,
Accept link errors, counter and Tx error interrupts. */
outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE,
ioaddr + CONTROL);

View File

@ -1485,7 +1485,7 @@ static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv)
*
* Sending the PREPARE_FOR_POWER_DOWN will restrict the
* hardware from going into standby mode and will transition
* out of D0-standy if it is already in that state.
* out of D0-standby if it is already in that state.
*
* STATUS_PREPARE_POWER_DOWN_COMPLETE will be sent by the
* driver upon completion. Once received, the driver can

View File

@ -369,7 +369,7 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
/*
* Give firmware a chance to be called, such as ACPI _PRx, _PSx
* Firmware method after natice method ?
* Firmware method after native method ?
*/
if (platform_pci_set_power_state)
platform_pci_set_power_state(dev, state);

View File

@ -167,7 +167,7 @@ zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
* initiates adapter recovery which is done
* asynchronously
*
* returns: 0 - initiated action succesfully
* returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
int
@ -203,7 +203,7 @@ zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask)
* purpose: Wrappper for zfcp_erp_adapter_reopen_internal
* used to ensure the correct locking
*
* returns: 0 - initiated action succesfully
* returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
int
@ -469,7 +469,7 @@ zfcp_test_link(struct zfcp_port *port)
* initiates Forced Reopen recovery which is done
* asynchronously
*
* returns: 0 - initiated action succesfully
* returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
static int
@ -509,7 +509,7 @@ zfcp_erp_port_forced_reopen_internal(struct zfcp_port *port, int clear_mask)
* purpose: Wrappper for zfcp_erp_port_forced_reopen_internal
* used to ensure the correct locking
*
* returns: 0 - initiated action succesfully
* returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
int
@ -536,7 +536,7 @@ zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear_mask)
* initiates Reopen recovery which is done
* asynchronously
*
* returns: 0 - initiated action succesfully
* returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
static int
@ -605,7 +605,7 @@ zfcp_erp_port_reopen(struct zfcp_port *port, int clear_mask)
* initiates Reopen recovery which is done
* asynchronously
*
* returns: 0 - initiated action succesfully
* returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
static int
@ -1805,7 +1805,7 @@ zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u32 mask, int set_or_clear)
* purpose: Wrappper for zfcp_erp_port_reopen_all_internal
* used to ensure the correct locking
*
* returns: 0 - initiated action succesfully
* returns: 0 - initiated action successfully
* <0 - failed to initiate action
*/
int

View File

@ -500,7 +500,7 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
/*
* Function : int should_disconnect (unsigned char cmd)
*
* Purpose : decide weather a command would normally disconnect or
* Purpose : decide whether a command would normally disconnect or
* not, since if it won't disconnect we should go to sleep.
*
* Input : cmd - opcode of SCSI command

View File

@ -12374,7 +12374,7 @@ AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
ASC_PRINT1(
"AscInitFromEEP: Failed to re-write EEPROM with %d errors.\n", i);
} else {
ASC_PRINT("AscInitFromEEP: Succesfully re-wrote EEPROM.");
ASC_PRINT("AscInitFromEEP: Successfully re-wrote EEPROM.\n");
}
}
return (warn_code);

View File

@ -3771,7 +3771,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
* @target: The target for the new device.
* @lun: The lun for the new device.
*
* Return the new device if succesfull or NULL on failure.
* Return the new device if successful or NULL on failure.
**/
static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
u8 target, u8 lun)

View File

@ -760,7 +760,7 @@ static int device_inquiry(int host_index, int ldn)
while (!got_interrupt(host_index))
barrier();
/*if command succesful, break */
/*if command successful, break */
if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
return 1;
}
@ -885,7 +885,7 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
while (!got_interrupt(host_index))
barrier();
/*if command succesful, break */
/*if command successful, break */
if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
@ -921,7 +921,7 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
return 2;
} else
global_command_error_excuse = 0;
/*if command succesful, break */
/*if command successful, break */
if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
@ -959,7 +959,7 @@ static int immediate_reset(int host_index, unsigned int ldn)
/* did not work, finish */
return 1;
}
/*if command succesful, break */
/*if command successful, break */
if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}

View File

@ -6438,7 +6438,7 @@ ips_erase_bios(ips_ha_t * ha)
/* VPP failure */
return (1);
/* check for succesful flash */
/* check for successful flash */
if (status & 0x30)
/* sequence error */
return (1);
@ -6550,7 +6550,7 @@ ips_erase_bios_memio(ips_ha_t * ha)
/* VPP failure */
return (1);
/* check for succesful flash */
/* check for successful flash */
if (status & 0x30)
/* sequence error */
return (1);

View File

@ -2818,7 +2818,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
(cmdstatp->sense_hdr.sense_key == NO_SENSE ||
cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) &&
undone == 0) {
ioctl_result = 0; /* EOF written succesfully at EOM */
ioctl_result = 0; /* EOF written successfully at EOM */
if (fileno >= 0)
fileno++;
STps->drv_file = fileno;

View File

@ -712,7 +712,7 @@ static void v9fs_read_work(void *a)
* v9fs_send_request - send 9P request
* The function can sleep until the request is scheduled for sending.
* The function can be interrupted. Return from the function is not
* a guarantee that the request is sent succesfully. Can return errors
* a guarantee that the request is sent successfully. Can return errors
* that can be retrieved by PTR_ERR macros.
*
* @m: mux data

View File

@ -641,7 +641,7 @@ static inline int __queue_kicked_iocb(struct kiocb *iocb)
* invoked both for initial i/o submission and
* subsequent retries via the aio_kick_handler.
* Expects to be invoked with iocb->ki_ctx->lock
* already held. The lock is released and reaquired
* already held. The lock is released and reacquired
* as needed during processing.
*
* Calls the iocb retry method (already setup for the

View File

@ -43,7 +43,7 @@ int jffs2_sum_init(struct jffs2_sb_info *c)
return -ENOMEM;
}
dbg_summary("returned succesfully\n");
dbg_summary("returned successfully\n");
return 0;
}

View File

@ -126,7 +126,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, boolean_t abnr)
/* allocate the disk blocks for the extent. initially, extBalloc()
* will try to allocate disk blocks for the requested size (xlen).
* if this fails (xlen contigious free blocks not avaliable), it'll
* if this fails (xlen contiguous free blocks not avaliable), it'll
* try to allocate a smaller number of blocks (producing a smaller
* extent), with this smaller number of blocks consisting of the
* requested number of blocks rounded down to the next smaller
@ -493,7 +493,7 @@ int extFill(struct inode *ip, xad_t * xp)
*
* initially, we will try to allocate disk blocks for the
* requested size (nblocks). if this fails (nblocks
* contigious free blocks not avaliable), we'll try to allocate
* contiguous free blocks not avaliable), we'll try to allocate
* a smaller number of blocks (producing a smaller extent), with
* this smaller number of blocks consisting of the requested
* number of blocks rounded down to the next smaller power of 2
@ -529,7 +529,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
/* get the number of blocks to initially attempt to allocate.
* we'll first try the number of blocks requested unless this
* number is greater than the maximum number of contigious free
* number is greater than the maximum number of contiguous free
* blocks in the map. in that case, we'll start off with the
* maximum free.
*/
@ -586,7 +586,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
* in place. if this fails, we'll try to move the extent
* to a new set of blocks. if moving the extent, we initially
* will try to allocate disk blocks for the requested size
* (nnew). if this fails (nnew contigious free blocks not
* (nnew). if this fails (new contiguous free blocks not
* avaliable), we'll try to allocate a smaller number of
* blocks (producing a smaller extent), with this smaller
* number of blocks consisting of the requested number of

View File

@ -427,7 +427,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like thiswhere there are no data dependencies.
* in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while(0)

View File

@ -318,7 +318,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like thiswhere there are no data dependencies.
* in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while (0)

View File

@ -126,7 +126,7 @@ struct gss_api_mech *gss_mech_get_by_pseudoflavor(u32);
/* Just increments the mechanism's reference count and returns its input: */
struct gss_api_mech * gss_mech_get(struct gss_api_mech *);
/* For every succesful gss_mech_get or gss_mech_get_by_* call there must be a
/* For every successful gss_mech_get or gss_mech_get_by_* call there must be a
* corresponding call to gss_mech_put. */
void gss_mech_put(struct gss_api_mech *);

View File

@ -14,7 +14,7 @@
* The 'big kernel semaphore'
*
* This mutex is taken and released recursively by lock_kernel()
* and unlock_kernel(). It is transparently dropped and reaquired
* and unlock_kernel(). It is transparently dropped and reacquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
@ -92,7 +92,7 @@ void __lockfunc unlock_kernel(void)
* The 'big kernel lock'
*
* This spinlock is taken and released recursively by lock_kernel()
* and unlock_kernel(). It is transparently dropped and reaquired
* and unlock_kernel(). It is transparently dropped and reacquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*

View File

@ -266,7 +266,7 @@ static inline void rmv_page_order(struct page *page)
* satisfies the following equation:
* P = B & ~(1 << O)
*
* Assumption: *_mem_map is contigious at least up to MAX_ORDER
* Assumption: *_mem_map is contiguous at least up to MAX_ORDER
*/
static inline struct page *
__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)

View File

@ -390,8 +390,8 @@ int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
* Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block'
* is set wait till the read completes. Otherwise attempt to read without
* blocking.
* Returns 1 meaning 'success' if read is succesfull without switching off
* readhaead mode. Otherwise return failure.
* Returns 1 meaning 'success' if read is successful without switching off
* readahead mode. Otherwise return failure.
*/
static int
blockable_page_cache_readahead(struct address_space *mapping, struct file *filp,

View File

@ -169,7 +169,7 @@ gss_import_sec_context_kerberos(const void *p,
}
ctx_id->internal_ctx_id = ctx;
dprintk("RPC: Succesfully imported new context.\n");
dprintk("RPC: Successfully imported new context.\n");
return 0;
out_err_free_key2:

View File

@ -201,7 +201,7 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
ctx_id->internal_ctx_id = ctx;
dprintk("Succesfully imported new spkm context.\n");
dprintk("Successfully imported new spkm context.\n");
return 0;
out_err_free_key2:

View File

@ -31,7 +31,7 @@ struct snd_seq_event_cell {
struct snd_seq_event_cell *next; /* next cell */
};
/* design note: the pool is a contigious block of memory, if we dynamicly
/* design note: the pool is a contiguous block of memory, if we dynamicly
want to add additional cells to the pool be better store this in another
pool as we need to know the base address of the pool when releasing
memory. */

View File

@ -97,19 +97,19 @@
*
* The documentation is an adventure: it's close but not fully accurate. I
* found out that after a reset some registers are *NOT* reset, though the
* docs say the would be. Interresting ones are 0x7f, 0x7d and 0x7a. They are
* related to the Audio 2 channel. I also was suprised about the consequenses
* docs say the would be. Interesting ones are 0x7f, 0x7d and 0x7a. They are
* related to the Audio 2 channel. I also was surprised about the consequences
* of writing 0x00 to 0x7f (which should be done by reset): The ES1887 moves
* into ES1888 mode. This means that it claims IRQ 11, which happens to be my
* ISDN adapter. Needless to say it no longer worked. I now understand why
* after rebooting 0x7f already was 0x05, the value of my choice: the BIOS
* did it.
*
* Oh, and this is another trap: in ES1887 docs mixer register 0x70 is decribed
* as if it's exactly the same as register 0xa1. This is *NOT* true. The
* description of 0x70 in ES1869 docs is accurate however.
* Oh, and this is another trap: in ES1887 docs mixer register 0x70 is
* described as if it's exactly the same as register 0xa1. This is *NOT* true.
* The description of 0x70 in ES1869 docs is accurate however.
* Well, the assumption about ES1869 was wrong: register 0x70 is very much
* like register 0xa1, except that bit 7 is allways 1, whatever you want
* like register 0xa1, except that bit 7 is always 1, whatever you want
* it to be.
*
* When using audio 2 mixer register 0x72 seems te be meaningless. Only 0xa2
@ -117,10 +117,10 @@
*
* Software reset not being able to reset all registers is great! Especially
* the fact that register 0x78 isn't reset is great when you wanna change back
* to single dma operation (simplex): audio 2 is still operation, and uses the
* same dma as audio 1: your ess changes into a funny echo machine.
* to single dma operation (simplex): audio 2 is still operational, and uses
* the same dma as audio 1: your ess changes into a funny echo machine.
*
* Received the new that ES1688 is detected as a ES1788. Did some thinking:
* Received the news that ES1688 is detected as a ES1788. Did some thinking:
* the ES1887 detection scheme suggests in step 2 to try if bit 3 of register
* 0x64 can be changed. This is inaccurate, first I inverted the * check: "If
* can be modified, it's a 1688", which lead to a correct detection
@ -135,7 +135,7 @@
* About recognition of ESS chips
*
* The distinction of ES688, ES1688, ES1788, ES1887 and ES1888 is described in
* a (preliminary ??) datasheet on ES1887. It's aim is to identify ES1887, but
* a (preliminary ??) datasheet on ES1887. Its aim is to identify ES1887, but
* during detection the text claims that "this chip may be ..." when a step
* fails. This scheme is used to distinct between the above chips.
* It appears however that some PnP chips like ES1868 are recognized as ES1788
@ -156,9 +156,9 @@
*
* The existing ES1688 support didn't take care of the ES1688+ recording
* levels very well. Whenever a device was selected (recmask) for recording
* it's recording level was loud, and it couldn't be changed. The fact that
* its recording level was loud, and it couldn't be changed. The fact that
* internal register 0xb4 could take care of RECLEV, didn't work meaning until
* it's value was restored every time the chip was reset; this reset the
* its value was restored every time the chip was reset; this reset the
* value of 0xb4 too. I guess that's what 4front also had (have?) trouble with.
*
* About ES1887 support:
@ -169,9 +169,9 @@
* the latter case the recording volumes are 0.
* Now recording levels of inputs can be controlled, by changing the playback
* levels. Futhermore several devices can be recorded together (which is not
* possible with the ES1688.
* possible with the ES1688).
* Besides the separate recording level control for each input, the common
* recordig level can also be controlled by RECLEV as described above.
* recording level can also be controlled by RECLEV as described above.
*
* Not only ES1887 have this recording mixer. I know the following from the
* documentation:

View File

@ -143,7 +143,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
if (dma->periods == periods && dma->period_bytes == period_bytes)
return 0;
/* the u32 cast is okay because in snd*create we succesfully told
/* the u32 cast is okay because in snd*create we successfully told
pci alloc that we're only 32 bit capable so the uppper will be 0 */
addr = (u32) substream->runtime->dma_addr;
desc_addr = (u32) dma->desc_buf.addr;