Patches contributed by Eötvös Lorand University
commit d4f9af9dac4ecb75818f909168f87b441cc95653
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Mar 23 03:00:30 2006 -0800
[PATCH] sem2mutex: inotify
Semaphore to mutex conversion.
The conversion was generated via scripts, and the result was validated
automatically via a script as well.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: John McCutchan <ttb@tentacle.dhs.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Acked-by: Robert Love <rml@novell.com>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/fs/inode.c b/fs/inode.c
index d0be6159eb7f..603e93ef0c6f 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -206,7 +206,7 @@ void inode_init_once(struct inode *inode)
i_size_ordered_init(inode);
#ifdef CONFIG_INOTIFY
INIT_LIST_HEAD(&inode->inotify_watches);
- sema_init(&inode->inotify_sem, 1);
+ mutex_init(&inode->inotify_mutex);
#endif
}
diff --git a/fs/inotify.c b/fs/inotify.c
index 3041503bde02..60d9653d55b7 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -56,8 +56,8 @@ int inotify_max_queued_events;
* dentry->d_lock (used to keep d_move() away from dentry->d_parent)
* iprune_sem (synchronize shrink_icache_memory())
* inode_lock (protects the super_block->s_inodes list)
- * inode->inotify_sem (protects inode->inotify_watches and watches->i_list)
- * inotify_dev->sem (protects inotify_device and watches->d_list)
+ * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
+ * inotify_dev->mutex (protects inotify_device and watches->d_list)
*/
/*
@@ -79,12 +79,12 @@ int inotify_max_queued_events;
/*
* struct inotify_device - represents an inotify instance
*
- * This structure is protected by the semaphore 'sem'.
+ * This structure is protected by the mutex 'mutex'.
*/
struct inotify_device {
wait_queue_head_t wq; /* wait queue for i/o */
struct idr idr; /* idr mapping wd -> watch */
- struct semaphore sem; /* protects this bad boy */
+ struct mutex mutex; /* protects this bad boy */
struct list_head events; /* list of queued events */
struct list_head watches; /* list of watches */
atomic_t count; /* reference count */
@@ -101,7 +101,7 @@ struct inotify_device {
* device. In read(), this list is walked and all events that can fit in the
* buffer are returned.
*
- * Protected by dev->sem of the device in which we are queued.
+ * Protected by dev->mutex of the device in which we are queued.
*/
struct inotify_kernel_event {
struct inotify_event event; /* the user-space event */
@@ -112,8 +112,8 @@ struct inotify_kernel_event {
/*
* struct inotify_watch - represents a watch request on a specific inode
*
- * d_list is protected by dev->sem of the associated watch->dev.
- * i_list and mask are protected by inode->inotify_sem of the associated inode.
+ * d_list is protected by dev->mutex of the associated watch->dev.
+ * i_list and mask are protected by inode->inotify_mutex of the associated inode.
* dev, inode, and wd are never written to once the watch is created.
*/
struct inotify_watch {
@@ -261,7 +261,7 @@ static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
/*
* inotify_dev_get_event - return the next event in the given dev's queue
*
- * Caller must hold dev->sem.
+ * Caller must hold dev->mutex.
*/
static inline struct inotify_kernel_event *
inotify_dev_get_event(struct inotify_device *dev)
@@ -272,7 +272,7 @@ inotify_dev_get_event(struct inotify_device *dev)
/*
* inotify_dev_queue_event - add a new event to the given device
*
- * Caller must hold dev->sem. Can sleep (calls kernel_event()).
+ * Caller must hold dev->mutex. Can sleep (calls kernel_event()).
*/
static void inotify_dev_queue_event(struct inotify_device *dev,
struct inotify_watch *watch, u32 mask,
@@ -315,7 +315,7 @@ static void inotify_dev_queue_event(struct inotify_device *dev,
/*
* remove_kevent - cleans up and ultimately frees the given kevent
*
- * Caller must hold dev->sem.
+ * Caller must hold dev->mutex.
*/
static void remove_kevent(struct inotify_device *dev,
struct inotify_kernel_event *kevent)
@@ -332,7 +332,7 @@ static void remove_kevent(struct inotify_device *dev,
/*
* inotify_dev_event_dequeue - destroy an event on the given device
*
- * Caller must hold dev->sem.
+ * Caller must hold dev->mutex.
*/
static void inotify_dev_event_dequeue(struct inotify_device *dev)
{
@@ -346,7 +346,7 @@ static void inotify_dev_event_dequeue(struct inotify_device *dev)
/*
* inotify_dev_get_wd - returns the next WD for use by the given dev
*
- * Callers must hold dev->sem. This function can sleep.
+ * Callers must hold dev->mutex. This function can sleep.
*/
static int inotify_dev_get_wd(struct inotify_device *dev,
struct inotify_watch *watch)
@@ -383,7 +383,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd,
/*
* create_watch - creates a watch on the given device.
*
- * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep.
+ * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep.
* Both 'dev' and 'inode' (by way of nameidata) need to be pinned.
*/
static struct inotify_watch *create_watch(struct inotify_device *dev,
@@ -434,7 +434,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev,
/*
* inotify_find_dev - find the watch associated with the given inode and dev
*
- * Callers must hold inode->inotify_sem.
+ * Callers must hold inode->inotify_mutex.
*/
static struct inotify_watch *inode_find_dev(struct inode *inode,
struct inotify_device *dev)
@@ -469,7 +469,7 @@ static void remove_watch_no_event(struct inotify_watch *watch,
* the IN_IGNORED event to the given device signifying that the inode is no
* longer watched.
*
- * Callers must hold both inode->inotify_sem and dev->sem. We drop a
+ * Callers must hold both inode->inotify_mutex and dev->mutex. We drop a
* reference to the inode before returning.
*
* The inode is not iput() so as to remain atomic. If the inode needs to be
@@ -507,21 +507,21 @@ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
if (!inotify_inode_watched(inode))
return;
- down(&inode->inotify_sem);
+ mutex_lock(&inode->inotify_mutex);
list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
u32 watch_mask = watch->mask;
if (watch_mask & mask) {
struct inotify_device *dev = watch->dev;
get_inotify_watch(watch);
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
inotify_dev_queue_event(dev, watch, mask, cookie, name);
if (watch_mask & IN_ONESHOT)
remove_watch_no_event(watch, dev);
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
put_inotify_watch(watch);
}
}
- up(&inode->inotify_sem);
+ mutex_unlock(&inode->inotify_mutex);
}
EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
@@ -626,16 +626,16 @@ void inotify_unmount_inodes(struct list_head *list)
iput(need_iput_tmp);
/* for each watch, send IN_UNMOUNT and then remove it */
- down(&inode->inotify_sem);
+ mutex_lock(&inode->inotify_mutex);
watches = &inode->inotify_watches;
list_for_each_entry_safe(watch, next_w, watches, i_list) {
struct inotify_device *dev = watch->dev;
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL);
remove_watch(watch, dev);
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
}
- up(&inode->inotify_sem);
+ mutex_unlock(&inode->inotify_mutex);
iput(inode);
spin_lock(&inode_lock);
@@ -651,14 +651,14 @@ void inotify_inode_is_dead(struct inode *inode)
{
struct inotify_watch *watch, *next;
- down(&inode->inotify_sem);
+ mutex_lock(&inode->inotify_mutex);
list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
struct inotify_device *dev = watch->dev;
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
remove_watch(watch, dev);
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
}
- up(&inode->inotify_sem);
+ mutex_unlock(&inode->inotify_mutex);
}
EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
@@ -670,10 +670,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait)
int ret = 0;
poll_wait(file, &dev->wq, wait);
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
if (!list_empty(&dev->events))
ret = POLLIN | POLLRDNORM;
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
return ret;
}
@@ -695,9 +695,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
events = !list_empty(&dev->events);
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
if (events) {
ret = 0;
break;
@@ -720,7 +720,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
if (ret)
return ret;
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
while (1) {
struct inotify_kernel_event *kevent;
@@ -750,7 +750,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
remove_kevent(dev, kevent);
}
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
return ret;
}
@@ -763,37 +763,37 @@ static int inotify_release(struct inode *ignored, struct file *file)
* Destroy all of the watches on this device. Unfortunately, not very
* pretty. We cannot do a simple iteration over the list, because we
* do not know the inode until we iterate to the watch. But we need to
- * hold inode->inotify_sem before dev->sem. The following works.
+ * hold inode->inotify_mutex before dev->mutex. The following works.
*/
while (1) {
struct inotify_watch *watch;
struct list_head *watches;
struct inode *inode;
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
watches = &dev->watches;
if (list_empty(watches)) {
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
break;
}
watch = list_entry(watches->next, struct inotify_watch, d_list);
get_inotify_watch(watch);
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
inode = watch->inode;
- down(&inode->inotify_sem);
- down(&dev->sem);
+ mutex_lock(&inode->inotify_mutex);
+ mutex_lock(&dev->mutex);
remove_watch_no_event(watch, dev);
- up(&dev->sem);
- up(&inode->inotify_sem);
+ mutex_unlock(&dev->mutex);
+ mutex_unlock(&inode->inotify_mutex);
put_inotify_watch(watch);
}
/* destroy all of the events on this device */
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
while (!list_empty(&dev->events))
inotify_dev_event_dequeue(dev);
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
/* free this device: the put matching the get in inotify_init() */
put_inotify_dev(dev);
@@ -811,26 +811,26 @@ static int inotify_ignore(struct inotify_device *dev, s32 wd)
struct inotify_watch *watch;
struct inode *inode;
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
watch = idr_find(&dev->idr, wd);
if (unlikely(!watch)) {
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
return -EINVAL;
}
get_inotify_watch(watch);
inode = watch->inode;
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
- down(&inode->inotify_sem);
- down(&dev->sem);
+ mutex_lock(&inode->inotify_mutex);
+ mutex_lock(&dev->mutex);
/* make sure that we did not race */
watch = idr_find(&dev->idr, wd);
if (likely(watch))
remove_watch(watch, dev);
- up(&dev->sem);
- up(&inode->inotify_sem);
+ mutex_unlock(&dev->mutex);
+ mutex_unlock(&inode->inotify_mutex);
put_inotify_watch(watch);
return 0;
@@ -905,7 +905,7 @@ asmlinkage long sys_inotify_init(void)
INIT_LIST_HEAD(&dev->events);
INIT_LIST_HEAD(&dev->watches);
init_waitqueue_head(&dev->wq);
- sema_init(&dev->sem, 1);
+ mutex_init(&dev->mutex);
dev->event_count = 0;
dev->queue_size = 0;
dev->max_events = inotify_max_queued_events;
@@ -960,8 +960,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
inode = nd.dentry->d_inode;
dev = filp->private_data;
- down(&inode->inotify_sem);
- down(&dev->sem);
+ mutex_lock(&inode->inotify_mutex);
+ mutex_lock(&dev->mutex);
if (mask & IN_MASK_ADD)
mask_add = 1;
@@ -998,8 +998,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
list_add(&watch->i_list, &inode->inotify_watches);
ret = watch->wd;
out:
- up(&dev->sem);
- up(&inode->inotify_sem);
+ mutex_unlock(&dev->mutex);
+ mutex_unlock(&inode->inotify_mutex);
path_release(&nd);
fput_and_out:
fput_light(filp, fput_needed);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 009ac96053fe..9ed1f36b6d54 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -509,7 +509,7 @@ struct inode {
#ifdef CONFIG_INOTIFY
struct list_head inotify_watches; /* watches on this inode */
- struct semaphore inotify_sem; /* protects the watches list */
+ struct mutex inotify_mutex; /* protects the watches list */
#endif
unsigned long i_state;
commit d3be915fc5e7d19a2283ad9b0fe0782a74675d0a
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Mar 23 03:00:29 2006 -0800
[PATCH] sem2mutex: quota
Semaphore to mutex conversion.
The conversion was generated via scripts, and the result was validated
automatically via a script as well.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Jan Kara <jack@ucw.cz>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/fs/dquot.c b/fs/dquot.c
index 9376a4378988..acf07e581f8c 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -103,12 +103,12 @@
* (these locking rules also apply for S_NOQUOTA flag in the inode - note that
* for altering the flag i_mutex is also needed). If operation is holding
* reference to dquot in other way (e.g. quotactl ops) it must be guarded by
- * dqonoff_sem.
+ * dqonoff_mutex.
* This locking assures that:
* a) update/access to dquot pointers in inode is serialized
* b) everyone is guarded against invalidate_dquots()
*
- * Each dquot has its dq_lock semaphore. Locked dquots might not be referenced
+ * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
* from inodes (dquot_alloc_space() and such don't check the dq_lock).
* Currently dquot is locked only when it is being read to memory (or space for
* it is being allocated) on the first dqget() and when it is being released on
@@ -118,8 +118,9 @@
* spinlock to internal buffers before writing.
*
* Lock ordering (including related VFS locks) is the following:
- * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > dqio_sem
- * i_mutex on quota files is special (it's below dqio_sem)
+ * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
+ * dqio_mutex
+ * i_mutex on quota files is special (it's below dqio_mutex)
*/
static DEFINE_SPINLOCK(dq_list_lock);
@@ -280,8 +281,8 @@ static inline void remove_inuse(struct dquot *dquot)
static void wait_on_dquot(struct dquot *dquot)
{
- down(&dquot->dq_lock);
- up(&dquot->dq_lock);
+ mutex_lock(&dquot->dq_lock);
+ mutex_unlock(&dquot->dq_lock);
}
#define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot))
@@ -320,8 +321,8 @@ int dquot_acquire(struct dquot *dquot)
int ret = 0, ret2 = 0;
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
- down(&dquot->dq_lock);
- down(&dqopt->dqio_sem);
+ mutex_lock(&dquot->dq_lock);
+ mutex_lock(&dqopt->dqio_mutex);
if (!test_bit(DQ_READ_B, &dquot->dq_flags))
ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
if (ret < 0)
@@ -342,8 +343,8 @@ int dquot_acquire(struct dquot *dquot)
}
set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out_iolock:
- up(&dqopt->dqio_sem);
- up(&dquot->dq_lock);
+ mutex_unlock(&dqopt->dqio_mutex);
+ mutex_unlock(&dquot->dq_lock);
return ret;
}
@@ -355,7 +356,7 @@ int dquot_commit(struct dquot *dquot)
int ret = 0, ret2 = 0;
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
- down(&dqopt->dqio_sem);
+ mutex_lock(&dqopt->dqio_mutex);
spin_lock(&dq_list_lock);
if (!clear_dquot_dirty(dquot)) {
spin_unlock(&dq_list_lock);
@@ -372,7 +373,7 @@ int dquot_commit(struct dquot *dquot)
ret = ret2;
}
out_sem:
- up(&dqopt->dqio_sem);
+ mutex_unlock(&dqopt->dqio_mutex);
return ret;
}
@@ -384,11 +385,11 @@ int dquot_release(struct dquot *dquot)
int ret = 0, ret2 = 0;
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
- down(&dquot->dq_lock);
+ mutex_lock(&dquot->dq_lock);
/* Check whether we are not racing with some other dqget() */
if (atomic_read(&dquot->dq_count) > 1)
goto out_dqlock;
- down(&dqopt->dqio_sem);
+ mutex_lock(&dqopt->dqio_mutex);
if (dqopt->ops[dquot->dq_type]->release_dqblk) {
ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
/* Write the info */
@@ -398,9 +399,9 @@ int dquot_release(struct dquot *dquot)
ret = ret2;
}
clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
- up(&dqopt->dqio_sem);
+ mutex_unlock(&dqopt->dqio_mutex);
out_dqlock:
- up(&dquot->dq_lock);
+ mutex_unlock(&dquot->dq_lock);
return ret;
}
@@ -464,7 +465,7 @@ int vfs_quota_sync(struct super_block *sb, int type)
struct quota_info *dqopt = sb_dqopt(sb);
int cnt;
- down(&dqopt->dqonoff_sem);
+ mutex_lock(&dqopt->dqonoff_mutex);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
@@ -499,7 +500,7 @@ int vfs_quota_sync(struct super_block *sb, int type)
spin_lock(&dq_list_lock);
dqstats.syncs++;
spin_unlock(&dq_list_lock);
- up(&dqopt->dqonoff_sem);
+ mutex_unlock(&dqopt->dqonoff_mutex);
return 0;
}
@@ -540,7 +541,7 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
/*
* Put reference to dquot
* NOTE: If you change this function please check whether dqput_blocks() works right...
- * MUST be called with either dqptr_sem or dqonoff_sem held
+ * MUST be called with either dqptr_sem or dqonoff_mutex held
*/
static void dqput(struct dquot *dquot)
{
@@ -605,7 +606,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
return NODQUOT;
memset((caddr_t)dquot, 0, sizeof(struct dquot));
- sema_init(&dquot->dq_lock, 1);
+ mutex_init(&dquot->dq_lock);
INIT_LIST_HEAD(&dquot->dq_free);
INIT_LIST_HEAD(&dquot->dq_inuse);
INIT_HLIST_NODE(&dquot->dq_hash);
@@ -620,7 +621,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
/*
* Get reference to dquot
- * MUST be called with either dqptr_sem or dqonoff_sem held
+ * MUST be called with either dqptr_sem or dqonoff_mutex held
*/
static struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
{
@@ -686,7 +687,7 @@ static int dqinit_needed(struct inode *inode, int type)
return 0;
}
-/* This routine is guarded by dqonoff_sem semaphore */
+/* This routine is guarded by dqonoff_mutex mutex */
static void add_dquot_ref(struct super_block *sb, int type)
{
struct list_head *p;
@@ -964,8 +965,8 @@ int dquot_initialize(struct inode *inode, int type)
unsigned int id = 0;
int cnt, ret = 0;
- /* First test before acquiring semaphore - solves deadlocks when we
- * re-enter the quota code and are already holding the semaphore */
+ /* First test before acquiring mutex - solves deadlocks when we
+ * re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode))
return 0;
down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
@@ -1028,8 +1029,8 @@ int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
int cnt, ret = NO_QUOTA;
char warntype[MAXQUOTAS];
- /* First test before acquiring semaphore - solves deadlocks when we
- * re-enter the quota code and are already holding the semaphore */
+ /* First test before acquiring mutex - solves deadlocks when we
+ * re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode)) {
out_add:
inode_add_bytes(inode, number);
@@ -1077,8 +1078,8 @@ int dquot_alloc_inode(const struct inode *inode, unsigned long number)
int cnt, ret = NO_QUOTA;
char warntype[MAXQUOTAS];
- /* First test before acquiring semaphore - solves deadlocks when we
- * re-enter the quota code and are already holding the semaphore */
+ /* First test before acquiring mutex - solves deadlocks when we
+ * re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode))
return QUOTA_OK;
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
@@ -1121,8 +1122,8 @@ int dquot_free_space(struct inode *inode, qsize_t number)
{
unsigned int cnt;
- /* First test before acquiring semaphore - solves deadlocks when we
- * re-enter the quota code and are already holding the semaphore */
+ /* First test before acquiring mutex - solves deadlocks when we
+ * re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode)) {
out_sub:
inode_sub_bytes(inode, number);
@@ -1157,8 +1158,8 @@ int dquot_free_inode(const struct inode *inode, unsigned long number)
{
unsigned int cnt;
- /* First test before acquiring semaphore - solves deadlocks when we
- * re-enter the quota code and are already holding the semaphore */
+ /* First test before acquiring mutex - solves deadlocks when we
+ * re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode))
return QUOTA_OK;
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
@@ -1197,8 +1198,8 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid;
char warntype[MAXQUOTAS];
- /* First test before acquiring semaphore - solves deadlocks when we
- * re-enter the quota code and are already holding the semaphore */
+ /* First test before acquiring mutex - solves deadlocks when we
+ * re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode))
return QUOTA_OK;
/* Clear the arrays */
@@ -1292,9 +1293,9 @@ int dquot_commit_info(struct super_block *sb, int type)
int ret;
struct quota_info *dqopt = sb_dqopt(sb);
- down(&dqopt->dqio_sem);
+ mutex_lock(&dqopt->dqio_mutex);
ret = dqopt->ops[type]->write_file_info(sb, type);
- up(&dqopt->dqio_sem);
+ mutex_unlock(&dqopt->dqio_mutex);
return ret;
}
@@ -1350,7 +1351,7 @@ int vfs_quota_off(struct super_block *sb, int type)
struct inode *toputinode[MAXQUOTAS];
/* We need to serialize quota_off() for device */
- down(&dqopt->dqonoff_sem);
+ mutex_lock(&dqopt->dqonoff_mutex);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
toputinode[cnt] = NULL;
if (type != -1 && cnt != type)
@@ -1379,7 +1380,7 @@ int vfs_quota_off(struct super_block *sb, int type)
dqopt->info[cnt].dqi_bgrace = 0;
dqopt->ops[cnt] = NULL;
}
- up(&dqopt->dqonoff_sem);
+ mutex_unlock(&dqopt->dqonoff_mutex);
/* Sync the superblock so that buffers with quota data are written to
* disk (and so userspace sees correct data afterwards). */
if (sb->s_op->sync_fs)
@@ -1392,7 +1393,7 @@ int vfs_quota_off(struct super_block *sb, int type)
* changes done by userspace on the next quotaon() */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
if (toputinode[cnt]) {
- down(&dqopt->dqonoff_sem);
+ mutex_lock(&dqopt->dqonoff_mutex);
/* If quota was reenabled in the meantime, we have
* nothing to do */
if (!sb_has_quota_enabled(sb, cnt)) {
@@ -1404,7 +1405,7 @@ int vfs_quota_off(struct super_block *sb, int type)
mark_inode_dirty(toputinode[cnt]);
iput(toputinode[cnt]);
}
- up(&dqopt->dqonoff_sem);
+ mutex_unlock(&dqopt->dqonoff_mutex);
}
if (sb->s_bdev)
invalidate_bdev(sb->s_bdev, 0);
@@ -1445,7 +1446,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
/* And now flush the block cache so that kernel sees the changes */
invalidate_bdev(sb->s_bdev, 0);
mutex_lock(&inode->i_mutex);
- down(&dqopt->dqonoff_sem);
+ mutex_lock(&dqopt->dqonoff_mutex);
if (sb_has_quota_enabled(sb, type)) {
error = -EBUSY;
goto out_lock;
@@ -1470,17 +1471,17 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
dqopt->ops[type] = fmt->qf_ops;
dqopt->info[type].dqi_format = fmt;
INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
- down(&dqopt->dqio_sem);
+ mutex_lock(&dqopt->dqio_mutex);
if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) {
- up(&dqopt->dqio_sem);
+ mutex_unlock(&dqopt->dqio_mutex);
goto out_file_init;
}
- up(&dqopt->dqio_sem);
+ mutex_unlock(&dqopt->dqio_mutex);
mutex_unlock(&inode->i_mutex);
set_enable_flags(dqopt, type);
add_dquot_ref(sb, type);
- up(&dqopt->dqonoff_sem);
+ mutex_unlock(&dqopt->dqonoff_mutex);
return 0;
@@ -1488,7 +1489,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
dqopt->files[type] = NULL;
iput(inode);
out_lock:
- up(&dqopt->dqonoff_sem);
+ mutex_unlock(&dqopt->dqonoff_mutex);
if (oldflags != -1) {
down_write(&dqopt->dqptr_sem);
/* Set the flags back (in the case of accidental quotaon()
@@ -1576,14 +1577,14 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
{
struct dquot *dquot;
- down(&sb_dqopt(sb)->dqonoff_sem);
+ mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
if (!(dquot = dqget(sb, id, type))) {
- up(&sb_dqopt(sb)->dqonoff_sem);
+ mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return -ESRCH;
}
do_get_dqblk(dquot, di);
dqput(dquot);
- up(&sb_dqopt(sb)->dqonoff_sem);
+ mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return 0;
}
@@ -1645,14 +1646,14 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
{
struct dquot *dquot;
- down(&sb_dqopt(sb)->dqonoff_sem);
+ mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
if (!(dquot = dqget(sb, id, type))) {
- up(&sb_dqopt(sb)->dqonoff_sem);
+ mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return -ESRCH;
}
do_set_dqblk(dquot, di);
dqput(dquot);
- up(&sb_dqopt(sb)->dqonoff_sem);
+ mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return 0;
}
@@ -1661,9 +1662,9 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
{
struct mem_dqinfo *mi;
- down(&sb_dqopt(sb)->dqonoff_sem);
+ mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
if (!sb_has_quota_enabled(sb, type)) {
- up(&sb_dqopt(sb)->dqonoff_sem);
+ mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return -ESRCH;
}
mi = sb_dqopt(sb)->info + type;
@@ -1673,7 +1674,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
ii->dqi_flags = mi->dqi_flags & DQF_MASK;
ii->dqi_valid = IIF_ALL;
spin_unlock(&dq_data_lock);
- up(&sb_dqopt(sb)->dqonoff_sem);
+ mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return 0;
}
@@ -1682,9 +1683,9 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
{
struct mem_dqinfo *mi;
- down(&sb_dqopt(sb)->dqonoff_sem);
+ mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
if (!sb_has_quota_enabled(sb, type)) {
- up(&sb_dqopt(sb)->dqonoff_sem);
+ mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return -ESRCH;
}
mi = sb_dqopt(sb)->info + type;
@@ -1699,7 +1700,7 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
mark_info_dirty(sb, type);
/* Force write to disk */
sb->dq_op->write_info(sb, type);
- up(&sb_dqopt(sb)->dqonoff_sem);
+ mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return 0;
}
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 56bf76586019..efa832059143 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2382,8 +2382,8 @@ static int ext3_statfs (struct super_block * sb, struct kstatfs * buf)
* Process 1 Process 2
* ext3_create() quota_sync()
* journal_start() write_dquot()
- * DQUOT_INIT() down(dqio_sem)
- * down(dqio_sem) journal_start()
+ * DQUOT_INIT() down(dqio_mutex)
+ * down(dqio_mutex) journal_start()
*
*/
diff --git a/fs/quota.c b/fs/quota.c
index ba9e0bf32f67..d6a2be826e29 100644
--- a/fs/quota.c
+++ b/fs/quota.c
@@ -170,10 +170,10 @@ static void quota_sync_sb(struct super_block *sb, int type)
/* Now when everything is written we can discard the pagecache so
* that userspace sees the changes. We need i_mutex and so we could
- * not do it inside dqonoff_sem. Moreover we need to be carefull
+ * not do it inside dqonoff_mutex. Moreover we need to be carefull
* about races with quotaoff() (that is the reason why we have own
* reference to inode). */
- down(&sb_dqopt(sb)->dqonoff_sem);
+ mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
discard[cnt] = NULL;
if (type != -1 && cnt != type)
@@ -182,7 +182,7 @@ static void quota_sync_sb(struct super_block *sb, int type)
continue;
discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]);
}
- up(&sb_dqopt(sb)->dqonoff_sem);
+ mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (discard[cnt]) {
mutex_lock(&discard[cnt]->i_mutex);
diff --git a/fs/quota_v2.c b/fs/quota_v2.c
index b4199ec3ece4..c519a583e681 100644
--- a/fs/quota_v2.c
+++ b/fs/quota_v2.c
@@ -394,7 +394,7 @@ static int v2_write_dquot(struct dquot *dquot)
ssize_t ret;
struct v2_disk_dqblk ddquot, empty;
- /* dq_off is guarded by dqio_sem */
+ /* dq_off is guarded by dqio_mutex */
if (!dquot->dq_off)
if ((ret = dq_insert_tree(dquot)) < 0) {
printk(KERN_ERR "VFS: Error %zd occurred while creating quota.\n", ret);
diff --git a/fs/super.c b/fs/super.c
index 8f9c9b3af70c..9cc6545dfa4c 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -77,8 +77,8 @@ static struct super_block *alloc_super(void)
s->s_count = S_BIAS;
atomic_set(&s->s_active, 1);
sema_init(&s->s_vfs_rename_sem,1);
- sema_init(&s->s_dquot.dqio_sem, 1);
- sema_init(&s->s_dquot.dqonoff_sem, 1);
+ mutex_init(&s->s_dquot.dqio_mutex);
+ mutex_init(&s->s_dquot.dqonoff_mutex);
init_rwsem(&s->s_dquot.dqptr_sem);
init_waitqueue_head(&s->s_wait_unfrozen);
s->s_maxbytes = MAX_NON_LFS;
diff --git a/include/linux/quota.h b/include/linux/quota.h
index f33aeb22c26a..8dc2d04a103f 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -38,6 +38,7 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#define __DQUOT_VERSION__ "dquot_6.5.1"
#define __DQUOT_NUM_VERSION__ 6*10000+5*100+1
@@ -215,7 +216,7 @@ struct dquot {
struct list_head dq_inuse; /* List of all quotas */
struct list_head dq_free; /* Free list element */
struct list_head dq_dirty; /* List of dirty dquots */
- struct semaphore dq_lock; /* dquot IO lock */
+ struct mutex dq_lock; /* dquot IO lock */
atomic_t dq_count; /* Use count */
wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */
struct super_block *dq_sb; /* superblock this applies to */
@@ -285,8 +286,8 @@ struct quota_format_type {
struct quota_info {
unsigned int flags; /* Flags for diskquotas on this device */
- struct semaphore dqio_sem; /* lock device while I/O in progress */
- struct semaphore dqonoff_sem; /* Serialize quotaon & quotaoff */
+ struct mutex dqio_mutex; /* lock device while I/O in progress */
+ struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */
struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */
struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
commit 6f87f0deebaff2716a3ce9232f948d702690562a
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Mar 23 03:00:27 2006 -0800
[PATCH] sem2mutex: drivers/char/
Semaphore to mutex conversion.
The conversion was generated via scripts, and the result was validated
automatically via a script as well.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Wim Van Sebroeck <wim@iguana.be>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 28c5a3193b81..ede128356af2 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -181,7 +181,6 @@ static struct tty_driver *stli_serial;
* is already swapping a shared buffer won't make things any worse.
*/
static char *stli_tmpwritebuf;
-static DECLARE_MUTEX(stli_tmpwritesem);
#define STLI_TXBUFSIZE 4096
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index ca41d62b1d9d..8865387d3448 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -27,6 +27,7 @@
#include <linux/rwsem.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include <asm/hardware/dec21285.h>
#include <asm/io.h>
@@ -56,7 +57,7 @@ static int gbWriteEnable;
static int gbWriteBase64Enable;
static volatile unsigned char *FLASH_BASE;
static int gbFlashSize = KFLASH_SIZE;
-static DECLARE_MUTEX(nwflash_sem);
+static DEFINE_MUTEX(nwflash_mutex);
extern spinlock_t gpio_lock;
@@ -140,7 +141,7 @@ static ssize_t flash_read(struct file *file, char __user *buf, size_t size,
/*
* We now lock against reads and writes. --rmk
*/
- if (down_interruptible(&nwflash_sem))
+ if (mutex_lock_interruptible(&nwflash_mutex))
return -ERESTARTSYS;
ret = copy_to_user(buf, (void *)(FLASH_BASE + p), count);
@@ -149,7 +150,7 @@ static ssize_t flash_read(struct file *file, char __user *buf, size_t size,
*ppos += count;
} else
ret = -EFAULT;
- up(&nwflash_sem);
+ mutex_unlock(&nwflash_mutex);
}
return ret;
}
@@ -188,7 +189,7 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
/*
* We now lock against reads and writes. --rmk
*/
- if (down_interruptible(&nwflash_sem))
+ if (mutex_lock_interruptible(&nwflash_mutex))
return -ERESTARTSYS;
written = 0;
@@ -277,7 +278,7 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
*/
leds_event(led_release);
- up(&nwflash_sem);
+ mutex_unlock(&nwflash_mutex);
return written;
}
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index bdaab6992109..3f5d6077f39c 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -148,7 +148,6 @@ static struct tty_driver *stl_serial;
* is already swapping a shared buffer won't make things any worse.
*/
static char *stl_tmpwritebuf;
-static DECLARE_MUTEX(stl_tmpwritesem);
/*
* Define a local default termios struct. All ports will be created
diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c
index 1533f56baa42..2700c5c45b8a 100644
--- a/drivers/char/watchdog/pcwd_usb.c
+++ b/drivers/char/watchdog/pcwd_usb.c
@@ -42,6 +42,7 @@
#include <linux/completion.h>
#include <asm/uaccess.h>
#include <linux/usb.h>
+#include <linux/mutex.h>
#ifdef CONFIG_USB_DEBUG
@@ -143,7 +144,7 @@ struct usb_pcwd_private {
static struct usb_pcwd_private *usb_pcwd_device;
/* prevent races between open() and disconnect() */
-static DECLARE_MUTEX (disconnect_sem);
+static DEFINE_MUTEX(disconnect_mutex);
/* local function prototypes */
static int usb_pcwd_probe (struct usb_interface *interface, const struct usb_device_id *id);
@@ -723,7 +724,7 @@ static void usb_pcwd_disconnect(struct usb_interface *interface)
struct usb_pcwd_private *usb_pcwd;
/* prevent races with open() */
- down (&disconnect_sem);
+ mutex_lock(&disconnect_mutex);
usb_pcwd = usb_get_intfdata (interface);
usb_set_intfdata (interface, NULL);
@@ -749,7 +750,7 @@ static void usb_pcwd_disconnect(struct usb_interface *interface)
cards_found--;
- up (&disconnect_sem);
+ mutex_unlock(&disconnect_mutex);
printk(KERN_INFO PFX "USB PC Watchdog disconnected\n");
}
commit 4f7a07b88726781e37c4c5f63db3a6ee3f8500d3
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Mar 23 03:00:19 2006 -0800
[PATCH] convert fs/9p/ to mutexes, fix locking bugs
Convert fs/9p/mux.c from semaphore to mutex.
NOTE: fixed locking bugs in the process - the code was using semaphores
the other way around.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Eric Van Hensbergen <ericvh@ericvh.myip.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index ea1134eb47c8..8e8356c1c229 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -31,6 +31,7 @@
#include <linux/poll.h>
#include <linux/kthread.h>
#include <linux/idr.h>
+#include <linux/mutex.h>
#include "debug.h"
#include "v9fs.h"
@@ -110,7 +111,7 @@ static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16);
-static DECLARE_MUTEX(v9fs_mux_task_lock);
+static DEFINE_MUTEX(v9fs_mux_task_lock);
static struct workqueue_struct *v9fs_mux_wq;
static int v9fs_mux_num;
@@ -166,7 +167,7 @@ static int v9fs_mux_poll_start(struct v9fs_mux_data *m)
dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num,
v9fs_mux_poll_task_num);
- up(&v9fs_mux_task_lock);
+ mutex_lock(&v9fs_mux_task_lock);
n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1);
if (n > v9fs_mux_poll_task_num) {
@@ -225,7 +226,7 @@ static int v9fs_mux_poll_start(struct v9fs_mux_data *m)
}
v9fs_mux_num++;
- down(&v9fs_mux_task_lock);
+ mutex_unlock(&v9fs_mux_task_lock);
return 0;
}
@@ -235,7 +236,7 @@ static void v9fs_mux_poll_stop(struct v9fs_mux_data *m)
int i;
struct v9fs_mux_poll_task *vpt;
- up(&v9fs_mux_task_lock);
+ mutex_lock(&v9fs_mux_task_lock);
vpt = m->poll_task;
list_del(&m->mux_list);
for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
@@ -252,7 +253,7 @@ static void v9fs_mux_poll_stop(struct v9fs_mux_data *m)
v9fs_mux_poll_task_num--;
}
v9fs_mux_num--;
- down(&v9fs_mux_task_lock);
+ mutex_unlock(&v9fs_mux_task_lock);
}
/**
commit 9331b3157c835353dd28efcd80d23563ad226aee
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Mar 23 03:00:19 2006 -0800
[PATCH] convert kernel/rcupdate.c:rcu_barrier_sema to mutex
Convert kernel/rcupdate's rcu_barrier_sema to mutex.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: "Paul E. McKenney" <paulmck@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index fedf5e369755..af8a2a57e17d 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -47,6 +47,7 @@
#include <linux/notifier.h>
#include <linux/rcupdate.h>
#include <linux/cpu.h>
+#include <linux/mutex.h>
/* Definition for rcupdate control block. */
struct rcu_ctrlblk rcu_ctrlblk = {
@@ -75,7 +76,7 @@ static int rsinterval = 1000;
#endif
static atomic_t rcu_barrier_cpu_count;
-static struct semaphore rcu_barrier_sema;
+static DEFINE_MUTEX(rcu_barrier_mutex);
static struct completion rcu_barrier_completion;
#ifdef CONFIG_SMP
@@ -207,13 +208,13 @@ static void rcu_barrier_func(void *notused)
void rcu_barrier(void)
{
BUG_ON(in_interrupt());
- /* Take cpucontrol semaphore to protect against CPU hotplug */
- down(&rcu_barrier_sema);
+ /* Take cpucontrol mutex to protect against CPU hotplug */
+ mutex_lock(&rcu_barrier_mutex);
init_completion(&rcu_barrier_completion);
atomic_set(&rcu_barrier_cpu_count, 0);
on_each_cpu(rcu_barrier_func, NULL, 0, 1);
wait_for_completion(&rcu_barrier_completion);
- up(&rcu_barrier_sema);
+ mutex_unlock(&rcu_barrier_mutex);
}
EXPORT_SYMBOL_GPL(rcu_barrier);
@@ -549,7 +550,6 @@ static struct notifier_block __devinitdata rcu_nb = {
*/
void __init rcu_init(void)
{
- sema_init(&rcu_barrier_sema, 1);
rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
/* Register notifier for non-boot CPUs */
commit 3d3f26a7baaa921a0e790b4c72d20f0de91a5d65
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Mar 23 03:00:18 2006 -0800
[PATCH] kernel/cpuset.c, mutex conversion
convert cpuset.c's callback_sem and manage_sem to mutexes.
Build and boot tested by Ingo.
Build, boot, unit and stress tested by pj.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Paul Jackson <pj@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 12815d3f1a05..c86ee051b734 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -53,7 +53,7 @@
#include <asm/uaccess.h>
#include <asm/atomic.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#define CPUSET_SUPER_MAGIC 0x27e0eb
@@ -168,63 +168,57 @@ static struct vfsmount *cpuset_mount;
static struct super_block *cpuset_sb;
/*
- * We have two global cpuset semaphores below. They can nest.
- * It is ok to first take manage_sem, then nest callback_sem. We also
+ * We have two global cpuset mutexes below. They can nest.
+ * It is ok to first take manage_mutex, then nest callback_mutex. We also
* require taking task_lock() when dereferencing a tasks cpuset pointer.
* See "The task_lock() exception", at the end of this comment.
*
- * A task must hold both semaphores to modify cpusets. If a task
- * holds manage_sem, then it blocks others wanting that semaphore,
- * ensuring that it is the only task able to also acquire callback_sem
+ * A task must hold both mutexes to modify cpusets. If a task
+ * holds manage_mutex, then it blocks others wanting that mutex,
+ * ensuring that it is the only task able to also acquire callback_mutex
* and be able to modify cpusets. It can perform various checks on
* the cpuset structure first, knowing nothing will change. It can
- * also allocate memory while just holding manage_sem. While it is
+ * also allocate memory while just holding manage_mutex. While it is
* performing these checks, various callback routines can briefly
- * acquire callback_sem to query cpusets. Once it is ready to make
- * the changes, it takes callback_sem, blocking everyone else.
+ * acquire callback_mutex to query cpusets. Once it is ready to make
+ * the changes, it takes callback_mutex, blocking everyone else.
*
* Calls to the kernel memory allocator can not be made while holding
- * callback_sem, as that would risk double tripping on callback_sem
+ * callback_mutex, as that would risk double tripping on callback_mutex
* from one of the callbacks into the cpuset code from within
* __alloc_pages().
*
- * If a task is only holding callback_sem, then it has read-only
+ * If a task is only holding callback_mutex, then it has read-only
* access to cpusets.
*
* The task_struct fields mems_allowed and mems_generation may only
* be accessed in the context of that task, so require no locks.
*
* Any task can increment and decrement the count field without lock.
- * So in general, code holding manage_sem or callback_sem can't rely
+ * So in general, code holding manage_mutex or callback_mutex can't rely
* on the count field not changing. However, if the count goes to
- * zero, then only attach_task(), which holds both semaphores, can
+ * zero, then only attach_task(), which holds both mutexes, can
* increment it again. Because a count of zero means that no tasks
* are currently attached, therefore there is no way a task attached
* to that cpuset can fork (the other way to increment the count).
- * So code holding manage_sem or callback_sem can safely assume that
+ * So code holding manage_mutex or callback_mutex can safely assume that
* if the count is zero, it will stay zero. Similarly, if a task
- * holds manage_sem or callback_sem on a cpuset with zero count, it
+ * holds manage_mutex or callback_mutex on a cpuset with zero count, it
* knows that the cpuset won't be removed, as cpuset_rmdir() needs
- * both of those semaphores.
- *
- * A possible optimization to improve parallelism would be to make
- * callback_sem a R/W semaphore (rwsem), allowing the callback routines
- * to proceed in parallel, with read access, until the holder of
- * manage_sem needed to take this rwsem for exclusive write access
- * and modify some cpusets.
+ * both of those mutexes.
*
* The cpuset_common_file_write handler for operations that modify
- * the cpuset hierarchy holds manage_sem across the entire operation,
+ * the cpuset hierarchy holds manage_mutex across the entire operation,
* single threading all such cpuset modifications across the system.
*
- * The cpuset_common_file_read() handlers only hold callback_sem across
+ * The cpuset_common_file_read() handlers only hold callback_mutex across
* small pieces of code, such as when reading out possibly multi-word
* cpumasks and nodemasks.
*
* The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't
- * (usually) take either semaphore. These are the two most performance
+ * (usually) take either mutex. These are the two most performance
* critical pieces of code here. The exception occurs on cpuset_exit(),
- * when a task in a notify_on_release cpuset exits. Then manage_sem
+ * when a task in a notify_on_release cpuset exits. Then manage_mutex
* is taken, and if the cpuset count is zero, a usermode call made
* to /sbin/cpuset_release_agent with the name of the cpuset (path
* relative to the root of cpuset file system) as the argument.
@@ -242,9 +236,9 @@ static struct super_block *cpuset_sb;
*
* The need for this exception arises from the action of attach_task(),
* which overwrites one tasks cpuset pointer with another. It does
- * so using both semaphores, however there are several performance
+ * so using both mutexes, however there are several performance
* critical places that need to reference task->cpuset without the
- * expense of grabbing a system global semaphore. Therefore except as
+ * expense of grabbing a system global mutex. Therefore except as
* noted below, when dereferencing or, as in attach_task(), modifying
* a tasks cpuset pointer we use task_lock(), which acts on a spinlock
* (task->alloc_lock) already in the task_struct routinely used for
@@ -256,8 +250,8 @@ static struct super_block *cpuset_sb;
* the routine cpuset_update_task_memory_state().
*/
-static DECLARE_MUTEX(manage_sem);
-static DECLARE_MUTEX(callback_sem);
+static DEFINE_MUTEX(manage_mutex);
+static DEFINE_MUTEX(callback_mutex);
/*
* A couple of forward declarations required, due to cyclic reference loop:
@@ -432,7 +426,7 @@ static inline struct cftype *__d_cft(struct dentry *dentry)
}
/*
- * Call with manage_sem held. Writes path of cpuset into buf.
+ * Call with manage_mutex held. Writes path of cpuset into buf.
* Returns 0 on success, -errno on error.
*/
@@ -484,11 +478,11 @@ static int cpuset_path(const struct cpuset *cs, char *buf, int buflen)
* status of the /sbin/cpuset_release_agent task, so no sense holding
* our caller up for that.
*
- * When we had only one cpuset semaphore, we had to call this
+ * When we had only one cpuset mutex, we had to call this
* without holding it, to avoid deadlock when call_usermodehelper()
* allocated memory. With two locks, we could now call this while
- * holding manage_sem, but we still don't, so as to minimize
- * the time manage_sem is held.
+ * holding manage_mutex, but we still don't, so as to minimize
+ * the time manage_mutex is held.
*/
static void cpuset_release_agent(const char *pathbuf)
@@ -520,15 +514,15 @@ static void cpuset_release_agent(const char *pathbuf)
* cs is notify_on_release() and now both the user count is zero and
* the list of children is empty, prepare cpuset path in a kmalloc'd
* buffer, to be returned via ppathbuf, so that the caller can invoke
- * cpuset_release_agent() with it later on, once manage_sem is dropped.
- * Call here with manage_sem held.
+ * cpuset_release_agent() with it later on, once manage_mutex is dropped.
+ * Call here with manage_mutex held.
*
* This check_for_release() routine is responsible for kmalloc'ing
* pathbuf. The above cpuset_release_agent() is responsible for
* kfree'ing pathbuf. The caller of these routines is responsible
* for providing a pathbuf pointer, initialized to NULL, then
- * calling check_for_release() with manage_sem held and the address
- * of the pathbuf pointer, then dropping manage_sem, then calling
+ * calling check_for_release() with manage_mutex held and the address
+ * of the pathbuf pointer, then dropping manage_mutex, then calling
* cpuset_release_agent() with pathbuf, as set by check_for_release().
*/
@@ -559,7 +553,7 @@ static void check_for_release(struct cpuset *cs, char **ppathbuf)
* One way or another, we guarantee to return some non-empty subset
* of cpu_online_map.
*
- * Call with callback_sem held.
+ * Call with callback_mutex held.
*/
static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
@@ -583,7 +577,7 @@ static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
* One way or another, we guarantee to return some non-empty subset
* of node_online_map.
*
- * Call with callback_sem held.
+ * Call with callback_mutex held.
*/
static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
@@ -608,12 +602,12 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
* current->cpuset if a task has its memory placement changed.
* Do not call this routine if in_interrupt().
*
- * Call without callback_sem or task_lock() held. May be called
- * with or without manage_sem held. Doesn't need task_lock to guard
+ * Call without callback_mutex or task_lock() held. May be called
+ * with or without manage_mutex held. Doesn't need task_lock to guard
* against another task changing a non-NULL cpuset pointer to NULL,
* as that is only done by a task on itself, and if the current task
* is here, it is not simultaneously in the exit code NULL'ing its
- * cpuset pointer. This routine also might acquire callback_sem and
+ * cpuset pointer. This routine also might acquire callback_mutex and
* current->mm->mmap_sem during call.
*
* Reading current->cpuset->mems_generation doesn't need task_lock
@@ -658,13 +652,13 @@ void cpuset_update_task_memory_state(void)
}
if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
- down(&callback_sem);
+ mutex_lock(&callback_mutex);
task_lock(tsk);
cs = tsk->cpuset; /* Maybe changed when task not locked */
guarantee_online_mems(cs, &tsk->mems_allowed);
tsk->cpuset_mems_generation = cs->mems_generation;
task_unlock(tsk);
- up(&callback_sem);
+ mutex_unlock(&callback_mutex);
mpol_rebind_task(tsk, &tsk->mems_allowed);
}
}
@@ -674,7 +668,7 @@ void cpuset_update_task_memory_state(void)
*
* One cpuset is a subset of another if all its allowed CPUs and
* Memory Nodes are a subset of the other, and its exclusive flags
- * are only set if the other's are set. Call holding manage_sem.
+ * are only set if the other's are set. Call holding manage_mutex.
*/
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
@@ -692,7 +686,7 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
* If we replaced the flag and mask values of the current cpuset
* (cur) with those values in the trial cpuset (trial), would
* our various subset and exclusive rules still be valid? Presumes
- * manage_sem held.
+ * manage_mutex held.
*
* 'cur' is the address of an actual, in-use cpuset. Operations
* such as list traversal that depend on the actual address of the
@@ -746,7 +740,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
* exclusive child cpusets
* Build these two partitions by calling partition_sched_domains
*
- * Call with manage_sem held. May nest a call to the
+ * Call with manage_mutex held. May nest a call to the
* lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
*/
@@ -792,7 +786,7 @@ static void update_cpu_domains(struct cpuset *cur)
}
/*
- * Call with manage_sem held. May take callback_sem during call.
+ * Call with manage_mutex held. May take callback_mutex during call.
*/
static int update_cpumask(struct cpuset *cs, char *buf)
@@ -811,9 +805,9 @@ static int update_cpumask(struct cpuset *cs, char *buf)
if (retval < 0)
return retval;
cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed);
- down(&callback_sem);
+ mutex_lock(&callback_mutex);
cs->cpus_allowed = trialcs.cpus_allowed;
- up(&callback_sem);
+ mutex_unlock(&callback_mutex);
if (is_cpu_exclusive(cs) && !cpus_unchanged)
update_cpu_domains(cs);
return 0;
@@ -827,7 +821,7 @@ static int update_cpumask(struct cpuset *cs, char *buf)
* the cpuset is marked 'memory_migrate', migrate the tasks
* pages to the new memory.
*
- * Call with manage_sem held. May take callback_sem during call.
+ * Call with manage_mutex held. May take callback_mutex during call.
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
* lock each such tasks mm->mmap_sem, scan its vma's and rebind
* their mempolicies to the cpusets new mems_allowed.
@@ -862,11 +856,11 @@ static int update_nodemask(struct cpuset *cs, char *buf)
if (retval < 0)
goto done;
- down(&callback_sem);
+ mutex_lock(&callback_mutex);
cs->mems_allowed = trialcs.mems_allowed;
atomic_inc(&cpuset_mems_generation);
cs->mems_generation = atomic_read(&cpuset_mems_generation);
- up(&callback_sem);
+ mutex_unlock(&callback_mutex);
set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */
@@ -922,7 +916,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
* tasklist_lock. Forks can happen again now - the mpol_copy()
* cpuset_being_rebound check will catch such forks, and rebind
* their vma mempolicies too. Because we still hold the global
- * cpuset manage_sem, we know that no other rebind effort will
+ * cpuset manage_mutex, we know that no other rebind effort will
* be contending for the global variable cpuset_being_rebound.
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes.
@@ -948,7 +942,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
}
/*
- * Call with manage_sem held.
+ * Call with manage_mutex held.
*/
static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
@@ -967,7 +961,7 @@ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
* cs: the cpuset to update
* buf: the buffer where we read the 0 or 1
*
- * Call with manage_sem held.
+ * Call with manage_mutex held.
*/
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
@@ -989,12 +983,12 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
return err;
cpu_exclusive_changed =
(is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs));
- down(&callback_sem);
+ mutex_lock(&callback_mutex);
if (turning_on)
set_bit(bit, &cs->flags);
else
clear_bit(bit, &cs->flags);
- up(&callback_sem);
+ mutex_unlock(&callback_mutex);
if (cpu_exclusive_changed)
update_cpu_domains(cs);
@@ -1104,7 +1098,7 @@ static int fmeter_getrate(struct fmeter *fmp)
* writing the path of the old cpuset in 'ppathbuf' if it needs to be
* notified on release.
*
- * Call holding manage_sem. May take callback_sem and task_lock of
+ * Call holding manage_mutex. May take callback_mutex and task_lock of
* the task 'pid' during call.
*/
@@ -1144,13 +1138,13 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
get_task_struct(tsk);
}
- down(&callback_sem);
+ mutex_lock(&callback_mutex);
task_lock(tsk);
oldcs = tsk->cpuset;
if (!oldcs) {
task_unlock(tsk);
- up(&callback_sem);
+ mutex_unlock(&callback_mutex);
put_task_struct(tsk);
return -ESRCH;
}
@@ -1164,7 +1158,7 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
from = oldcs->mems_allowed;
to = cs->mems_allowed;
- up(&callback_sem);
+ mutex_unlock(&callback_mutex);
mm = get_task_mm(tsk);
if (mm) {
@@ -1221,7 +1215,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us
}
buffer[nbytes] = 0; /* nul-terminate */
- down(&manage_sem);
+ mutex_lock(&manage_mutex);
if (is_removed(cs)) {
retval = -ENODEV;
@@ -1264,7 +1258,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us
if (retval == 0)
retval = nbytes;
out2:
- up(&manage_sem);
+ mutex_unlock(&manage_mutex);
cpuset_release_agent(pathbuf);
out1:
kfree(buffer);
@@ -1304,9 +1298,9 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
{
cpumask_t mask;
- down(&callback_sem);
+ mutex_lock(&callback_mutex);
mask = cs->cpus_allowed;
- up(&callback_sem);
+ mutex_unlock(&callback_mutex);
return cpulist_scnprintf(page, PAGE_SIZE, mask);
}
@@ -1315,9 +1309,9 @@ static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
{
nodemask_t mask;
- down(&callback_sem);
+ mutex_lock(&callback_mutex);
mask = cs->mems_allowed;
- up(&callback_sem);
+ mutex_unlock(&callback_mutex);
return nodelist_scnprintf(page, PAGE_SIZE, mask);
}
@@ -1598,7 +1592,7 @@ static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids)
* Handle an open on 'tasks' file. Prepare a buffer listing the
* process id's of tasks currently attached to the cpuset being opened.
*
- * Does not require any specific cpuset semaphores, and does not take any.
+ * Does not require any specific cpuset mutexes, and does not take any.
*/
static int cpuset_tasks_open(struct inode *unused, struct file *file)
{
@@ -1754,7 +1748,7 @@ static int cpuset_populate_dir(struct dentry *cs_dentry)
* name: name of the new cpuset. Will be strcpy'ed.
* mode: mode to set on new inode
*
- * Must be called with the semaphore on the parent inode held
+ * Must be called with the mutex on the parent inode held
*/
static long cpuset_create(struct cpuset *parent, const char *name, int mode)
@@ -1766,7 +1760,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
if (!cs)
return -ENOMEM;
- down(&manage_sem);
+ mutex_lock(&manage_mutex);
cpuset_update_task_memory_state();
cs->flags = 0;
if (notify_on_release(parent))
@@ -1782,28 +1776,28 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
cs->parent = parent;
- down(&callback_sem);
+ mutex_lock(&callback_mutex);
list_add(&cs->sibling, &cs->parent->children);
number_of_cpusets++;
- up(&callback_sem);
+ mutex_unlock(&callback_mutex);
err = cpuset_create_dir(cs, name, mode);
if (err < 0)
goto err;
/*
- * Release manage_sem before cpuset_populate_dir() because it
+ * Release manage_mutex before cpuset_populate_dir() because it
* will down() this new directory's i_mutex and if we race with
* another mkdir, we might deadlock.
*/
- up(&manage_sem);
+ mutex_unlock(&manage_mutex);
err = cpuset_populate_dir(cs->dentry);
/* If err < 0, we have a half-filled directory - oh well ;) */
return 0;
err:
list_del(&cs->sibling);
- up(&manage_sem);
+ mutex_unlock(&manage_mutex);
kfree(cs);
return err;
}
@@ -1825,18 +1819,18 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
/* the vfs holds both inode->i_mutex already */
- down(&manage_sem);
+ mutex_lock(&manage_mutex);
cpuset_update_task_memory_state();
if (atomic_read(&cs->count) > 0) {
- up(&manage_sem);
+ mutex_unlock(&manage_mutex);
return -EBUSY;
}
if (!list_empty(&cs->children)) {
- up(&manage_sem);
+ mutex_unlock(&manage_mutex);
return -EBUSY;
}
parent = cs->parent;
- down(&callback_sem);
+ mutex_lock(&callback_mutex);
set_bit(CS_REMOVED, &cs->flags);
if (is_cpu_exclusive(cs))
update_cpu_domains(cs);
@@ -1848,10 +1842,10 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
cpuset_d_remove_dir(d);
dput(d);
number_of_cpusets--;
- up(&callback_sem);
+ mutex_unlock(&callback_mutex);
if (list_empty(&parent->children))
check_for_release(parent, &pathbuf);
- up(&manage_sem);
+ mutex_unlock(&manage_mutex);
cpuset_release_agent(pathbuf);
return 0;
}
@@ -1960,19 +1954,19 @@ void cpuset_fork(struct task_struct *child)
* Description: Detach cpuset from @tsk and release it.
*
* Note that cpusets marked notify_on_release force every task in
- * them to take the global manage_sem semaphore when exiting.
+ * them to take the global manage_mutex mutex when exiting.
* This could impact scaling on very large systems. Be reluctant to
* use notify_on_release cpusets where very high task exit scaling
* is required on large systems.
*
* Don't even think about derefencing 'cs' after the cpuset use count
- * goes to zero, except inside a critical section guarded by manage_sem
- * or callback_sem. Otherwise a zero cpuset use count is a license to
+ * goes to zero, except inside a critical section guarded by manage_mutex
+ * or callback_mutex. Otherwise a zero cpuset use count is a license to
* any other task to nuke the cpuset immediately, via cpuset_rmdir().
*
- * This routine has to take manage_sem, not callback_sem, because
- * it is holding that semaphore while calling check_for_release(),
- * which calls kmalloc(), so can't be called holding callback__sem().
+ * This routine has to take manage_mutex, not callback_mutex, because
+ * it is holding that mutex while calling check_for_release(),
+ * which calls kmalloc(), so can't be called holding callback_mutex().
*
* We don't need to task_lock() this reference to tsk->cpuset,
* because tsk is already marked PF_EXITING, so attach_task() won't
@@ -2022,10 +2016,10 @@ void cpuset_exit(struct task_struct *tsk)
if (notify_on_release(cs)) {
char *pathbuf = NULL;
- down(&manage_sem);
+ mutex_lock(&manage_mutex);
if (atomic_dec_and_test(&cs->count))
check_for_release(cs, &pathbuf);
- up(&manage_sem);
+ mutex_unlock(&manage_mutex);
cpuset_release_agent(pathbuf);
} else {
atomic_dec(&cs->count);
@@ -2046,11 +2040,11 @@ cpumask_t cpuset_cpus_allowed(struct task_struct *tsk)
{
cpumask_t mask;
- down(&callback_sem);
+ mutex_lock(&callback_mutex);
task_lock(tsk);
guarantee_online_cpus(tsk->cpuset, &mask);
task_unlock(tsk);
- up(&callback_sem);
+ mutex_unlock(&callback_mutex);
return mask;
}
@@ -2074,11 +2068,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
{
nodemask_t mask;
- down(&callback_sem);
+ mutex_lock(&callback_mutex);
task_lock(tsk);
guarantee_online_mems(tsk->cpuset, &mask);
task_unlock(tsk);
- up(&callback_sem);
+ mutex_unlock(&callback_mutex);
return mask;
}
@@ -2104,7 +2098,7 @@ int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
/*
* nearest_exclusive_ancestor() - Returns the nearest mem_exclusive
- * ancestor to the specified cpuset. Call holding callback_sem.
+ * ancestor to the specified cpuset. Call holding callback_mutex.
* If no ancestor is mem_exclusive (an unusual configuration), then
* returns the root cpuset.
*/
@@ -2131,12 +2125,12 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
* GFP_KERNEL allocations are not so marked, so can escape to the
* nearest mem_exclusive ancestor cpuset.
*
- * Scanning up parent cpusets requires callback_sem. The __alloc_pages()
+ * Scanning up parent cpusets requires callback_mutex. The __alloc_pages()
* routine only calls here with __GFP_HARDWALL bit _not_ set if
* it's a GFP_KERNEL allocation, and all nodes in the current tasks
* mems_allowed came up empty on the first pass over the zonelist.
* So only GFP_KERNEL allocations, if all nodes in the cpuset are
- * short of memory, might require taking the callback_sem semaphore.
+ * short of memory, might require taking the callback_mutex mutex.
*
* The first loop over the zonelist in mm/page_alloc.c:__alloc_pages()
* calls here with __GFP_HARDWALL always set in gfp_mask, enforcing
@@ -2171,31 +2165,31 @@ int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
return 1;
/* Not hardwall and node outside mems_allowed: scan up cpusets */
- down(&callback_sem);
+ mutex_lock(&callback_mutex);
task_lock(current);
cs = nearest_exclusive_ancestor(current->cpuset);
task_unlock(current);
allowed = node_isset(node, cs->mems_allowed);
- up(&callback_sem);
+ mutex_unlock(&callback_mutex);
return allowed;
}
/**
* cpuset_lock - lock out any changes to cpuset structures
*
- * The out of memory (oom) code needs to lock down cpusets
+ * The out of memory (oom) code needs to mutex_lock cpusets
* from being changed while it scans the tasklist looking for a
- * task in an overlapping cpuset. Expose callback_sem via this
+ * task in an overlapping cpuset. Expose callback_mutex via this
* cpuset_lock() routine, so the oom code can lock it, before
* locking the task list. The tasklist_lock is a spinlock, so
- * must be taken inside callback_sem.
+ * must be taken inside callback_mutex.
*/
void cpuset_lock(void)
{
- down(&callback_sem);
+ mutex_lock(&callback_mutex);
}
/**
@@ -2206,7 +2200,7 @@ void cpuset_lock(void)
void cpuset_unlock(void)
{
- up(&callback_sem);
+ mutex_unlock(&callback_mutex);
}
/**
@@ -2218,7 +2212,7 @@ void cpuset_unlock(void)
* determine if task @p's memory usage might impact the memory
* available to the current task.
*
- * Call while holding callback_sem.
+ * Call while holding callback_mutex.
**/
int cpuset_excl_nodes_overlap(const struct task_struct *p)
@@ -2289,7 +2283,7 @@ void __cpuset_memory_pressure_bump(void)
* - Used for /proc/<pid>/cpuset.
* - No need to task_lock(tsk) on this tsk->cpuset reference, as it
* doesn't really matter if tsk->cpuset changes after we read it,
- * and we take manage_sem, keeping attach_task() from changing it
+ * and we take manage_mutex, keeping attach_task() from changing it
* anyway.
*/
@@ -2305,7 +2299,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v)
return -ENOMEM;
tsk = m->private;
- down(&manage_sem);
+ mutex_lock(&manage_mutex);
cs = tsk->cpuset;
if (!cs) {
retval = -EINVAL;
@@ -2318,7 +2312,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v)
seq_puts(m, buf);
seq_putc(m, '\n');
out:
- up(&manage_sem);
+ mutex_unlock(&manage_mutex);
kfree(buf);
return retval;
}
commit b824eb605ccba995fd32c6590aed365f93d48002
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Mar 23 02:59:29 2006 -0800
[PATCH] Make CONFIG_REGPARM enabled by default
Make CONFIG_REGPARM enabled by default. It's a noticable win both for size
and for performance, and gcc[34] handles it correctly.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 69c8a70e854b..5f89c74537ef 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -661,9 +661,17 @@ config BOOT_IOREMAP
config REGPARM
bool "Use register arguments"
+ default y
help
- Compile the kernel with -mregparm=3. This uses a different ABI
- and passes the first three arguments of a function call in registers.
+ Compile the kernel with -mregparm=3. This instructs gcc to use
+ a more efficient function call ABI which passes the first three
+ arguments of a function call via registers, which results in denser
+ and faster code.
+
+ If this option is disabled, then the default ABI of passing
+ arguments via the stack is used.
+
+ If unsure, say Y.
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
commit 384c36893f94e0e2145832cf2f20684ae372aee5
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Mar 22 03:54:16 2006 -0300
V4L/DVB (3568g): sem2mutex: zoran
Semaphore to mutexes conversion.
The conversion was generated via scripts, and the result was validated
automatically via a script as well.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Mauro Carvalho Chehab <mchehab@infradead.org>
diff --git a/drivers/media/video/zoran.h b/drivers/media/video/zoran.h
index 9fe6ad3b6352..ad04a129499d 100644
--- a/drivers/media/video/zoran.h
+++ b/drivers/media/video/zoran.h
@@ -395,7 +395,7 @@ struct zoran {
struct videocodec *codec; /* video codec */
struct videocodec *vfe; /* video front end */
- struct semaphore resource_lock; /* prevent evil stuff */
+ struct mutex resource_lock; /* prevent evil stuff */
u8 initialized; /* flag if zoran has been correctly initalized */
int user; /* number of current users */
diff --git a/drivers/media/video/zoran_card.c b/drivers/media/video/zoran_card.c
index a2e528ccf28a..b22dbb6d18f6 100644
--- a/drivers/media/video/zoran_card.c
+++ b/drivers/media/video/zoran_card.c
@@ -47,6 +47,7 @@
#include <linux/interrupt.h>
#include <linux/video_decoder.h>
#include <linux/video_encoder.h>
+#include <linux/mutex.h>
#include <asm/io.h>
@@ -673,7 +674,7 @@ zoran_i2c_client_register (struct i2c_client *client)
KERN_DEBUG "%s: i2c_client_register() - driver id = %d\n",
ZR_DEVNAME(zr), client->driver->id);
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (zr->user > 0) {
/* we're already busy, so we keep a reference to
@@ -694,7 +695,7 @@ zoran_i2c_client_register (struct i2c_client *client)
}
clientreg_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -707,7 +708,7 @@ zoran_i2c_client_unregister (struct i2c_client *client)
dprintk(2, KERN_DEBUG "%s: i2c_client_unregister()\n", ZR_DEVNAME(zr));
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (zr->user > 0) {
res = -EBUSY;
@@ -722,7 +723,7 @@ zoran_i2c_client_unregister (struct i2c_client *client)
snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), "MJPEG[%d]", zr->id);
}
clientunreg_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -1202,7 +1203,7 @@ find_zr36057 (void)
zr->id = zoran_num;
snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), "MJPEG[%u]", zr->id);
spin_lock_init(&zr->spinlock);
- init_MUTEX(&zr->resource_lock);
+ mutex_init(&zr->resource_lock);
if (pci_enable_device(dev))
continue;
zr->zr36057_adr = pci_resource_start(zr->pci_dev, 0);
diff --git a/drivers/media/video/zoran_driver.c b/drivers/media/video/zoran_driver.c
index 485553be190b..b2c6e01e3923 100644
--- a/drivers/media/video/zoran_driver.c
+++ b/drivers/media/video/zoran_driver.c
@@ -81,6 +81,7 @@
#include <linux/video_decoder.h>
#include <linux/video_encoder.h>
+#include <linux/mutex.h>
#include "zoran.h"
#include "zoran_device.h"
#include "zoran_card.h"
@@ -1292,7 +1293,7 @@ zoran_open (struct inode *inode,
/* see fs/device.c - the kernel already locks during open(),
* so locking ourselves only causes deadlocks */
- /*down(&zr->resource_lock);*/
+ /*mutex_lock(&zr->resource_lock);*/
if (!zr->decoder) {
dprintk(1,
@@ -1371,7 +1372,7 @@ zoran_open (struct inode *inode,
if (zr->user++ == 0)
first_open = 1;
- /*up(&zr->resource_lock);*/
+ /*mutex_unlock(&zr->resource_lock);*/
/* default setup - TODO: look at flags */
if (first_open) { /* First device open */
@@ -1401,7 +1402,7 @@ zoran_open (struct inode *inode,
/* if there's no device found, we didn't obtain the lock either */
if (zr) {
- /*up(&zr->resource_lock);*/
+ /*mutex_unlock(&zr->resource_lock);*/
}
return res;
@@ -1419,7 +1420,7 @@ zoran_close (struct inode *inode,
/* kernel locks (fs/device.c), so don't do that ourselves
* (prevents deadlocks) */
- /*down(&zr->resource_lock);*/
+ /*mutex_lock(&zr->resource_lock);*/
zoran_close_end_session(file);
@@ -1466,7 +1467,7 @@ zoran_close (struct inode *inode,
}
module_put(THIS_MODULE);
- /*up(&zr->resource_lock);*/
+ /*mutex_unlock(&zr->resource_lock);*/
dprintk(4, KERN_INFO "%s: zoran_close() done\n", ZR_DEVNAME(zr));
@@ -2027,14 +2028,14 @@ zoran_do_ioctl (struct inode *inode,
* but moving the free code outside the munmap() handler fixes
* all this... If someone knows why, please explain me (Ronald)
*/
- if (!down_trylock(&zr->resource_lock)) {
+ if (!!mutex_trylock(&zr->resource_lock)) {
/* we obtained it! Let's try to free some things */
if (fh->jpg_buffers.ready_to_be_freed)
jpg_fbuffer_free(file);
if (fh->v4l_buffers.ready_to_be_freed)
v4l_fbuffer_free(file);
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
}
switch (cmd) {
@@ -2051,12 +2052,12 @@ zoran_do_ioctl (struct inode *inode,
vcap->channels = zr->card.inputs;
vcap->audios = 0;
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
vcap->maxwidth = BUZ_MAX_WIDTH;
vcap->maxheight = BUZ_MAX_HEIGHT;
vcap->minwidth = BUZ_MIN_WIDTH;
vcap->minheight = BUZ_MIN_HEIGHT;
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return 0;
}
@@ -2084,9 +2085,9 @@ zoran_do_ioctl (struct inode *inode,
vchan->tuners = 0;
vchan->flags = 0;
vchan->type = VIDEO_TYPE_CAMERA;
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
vchan->norm = zr->norm;
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
vchan->channel = channel;
return 0;
@@ -2113,7 +2114,7 @@ zoran_do_ioctl (struct inode *inode,
"%s: VIDIOCSCHAN - channel=%d, norm=%d\n",
ZR_DEVNAME(zr), vchan->channel, vchan->norm);
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if ((res = zoran_set_input(zr, vchan->channel)))
goto schan_unlock_and_return;
if ((res = zoran_set_norm(zr, vchan->norm)))
@@ -2122,7 +2123,7 @@ zoran_do_ioctl (struct inode *inode,
/* Make sure the changes come into effect */
res = wait_grab_pending(zr);
schan_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
break;
@@ -2134,7 +2135,7 @@ zoran_do_ioctl (struct inode *inode,
dprintk(3, KERN_DEBUG "%s: VIDIOCGPICT\n", ZR_DEVNAME(zr));
memset(vpict, 0, sizeof(struct video_picture));
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
vpict->hue = zr->hue;
vpict->brightness = zr->brightness;
vpict->contrast = zr->contrast;
@@ -2145,7 +2146,7 @@ zoran_do_ioctl (struct inode *inode,
} else {
vpict->depth = 0;
}
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return 0;
}
@@ -2180,7 +2181,7 @@ zoran_do_ioctl (struct inode *inode,
return -EINVAL;
}
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
decoder_command(zr, DECODER_SET_PICTURE, vpict);
@@ -2191,7 +2192,7 @@ zoran_do_ioctl (struct inode *inode,
fh->overlay_settings.format = &zoran_formats[i];
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return 0;
}
@@ -2204,9 +2205,9 @@ zoran_do_ioctl (struct inode *inode,
dprintk(3, KERN_DEBUG "%s: VIDIOCCAPTURE - on=%d\n",
ZR_DEVNAME(zr), *on);
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
res = setup_overlay(file, *on);
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -2219,12 +2220,12 @@ zoran_do_ioctl (struct inode *inode,
dprintk(3, KERN_DEBUG "%s: VIDIOCGWIN\n", ZR_DEVNAME(zr));
memset(vwin, 0, sizeof(struct video_window));
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
vwin->x = fh->overlay_settings.x;
vwin->y = fh->overlay_settings.y;
vwin->width = fh->overlay_settings.width;
vwin->height = fh->overlay_settings.height;
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
vwin->clipcount = 0;
return 0;
}
@@ -2241,12 +2242,12 @@ zoran_do_ioctl (struct inode *inode,
ZR_DEVNAME(zr), vwin->x, vwin->y, vwin->width,
vwin->height, vwin->clipcount);
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
res =
setup_window(file, vwin->x, vwin->y, vwin->width,
vwin->height, vwin->clips,
vwin->clipcount, NULL);
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -2258,9 +2259,9 @@ zoran_do_ioctl (struct inode *inode,
dprintk(3, KERN_DEBUG "%s: VIDIOCGFBUF\n", ZR_DEVNAME(zr));
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
*vbuf = zr->buffer;
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return 0;
}
break;
@@ -2287,12 +2288,12 @@ zoran_do_ioctl (struct inode *inode,
return -EINVAL;
}
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
res =
setup_fbuffer(file, vbuf->base, &zoran_formats[i],
vbuf->width, vbuf->height,
vbuf->bytesperline);
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -2305,9 +2306,9 @@ zoran_do_ioctl (struct inode *inode,
dprintk(3, KERN_DEBUG "%s: VIDIOCSYNC - frame=%d\n",
ZR_DEVNAME(zr), *frame);
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
res = v4l_sync(file, *frame);
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
if (!res)
zr->v4l_sync_tail++;
return res;
@@ -2325,9 +2326,9 @@ zoran_do_ioctl (struct inode *inode,
ZR_DEVNAME(zr), vmap->frame, vmap->width, vmap->height,
vmap->format);
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
res = v4l_grab(file, vmap);
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
break;
@@ -2348,7 +2349,7 @@ zoran_do_ioctl (struct inode *inode,
i * fh->v4l_buffers.buffer_size;
}
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (fh->jpg_buffers.allocated || fh->v4l_buffers.allocated) {
dprintk(1,
@@ -2367,7 +2368,7 @@ zoran_do_ioctl (struct inode *inode,
/* The next mmap will map the V4L buffers */
fh->map_mode = ZORAN_MAP_MODE_RAW;
v4l1reqbuf_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -2421,7 +2422,7 @@ zoran_do_ioctl (struct inode *inode,
bparams->major_version = MAJOR_VERSION;
bparams->minor_version = MINOR_VERSION;
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
bparams->norm = zr->norm;
bparams->input = zr->input;
@@ -2450,7 +2451,7 @@ zoran_do_ioctl (struct inode *inode,
bparams->jpeg_markers =
fh->jpg_settings.jpg_comp.jpeg_markers;
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
bparams->VFIFO_FB = 0;
@@ -2486,7 +2487,7 @@ zoran_do_ioctl (struct inode *inode,
sizeof(bparams->COM_data));
settings.jpg_comp.jpeg_markers = bparams->jpeg_markers;
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (zr->codec_mode != BUZ_MODE_IDLE) {
dprintk(1,
@@ -2506,7 +2507,7 @@ zoran_do_ioctl (struct inode *inode,
fh->jpg_settings = settings;
sparams_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -2538,7 +2539,7 @@ zoran_do_ioctl (struct inode *inode,
breq->size > MAX_KMALLOC_MEM)
breq->size = MAX_KMALLOC_MEM;
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (fh->jpg_buffers.allocated || fh->v4l_buffers.allocated) {
dprintk(1,
@@ -2561,7 +2562,7 @@ zoran_do_ioctl (struct inode *inode,
* also be *_PLAY, but it doesn't matter here */
fh->map_mode = ZORAN_MAP_MODE_JPG_REC;
jpgreqbuf_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -2574,9 +2575,9 @@ zoran_do_ioctl (struct inode *inode,
dprintk(3, KERN_DEBUG "%s: BUZIOC_QBUF_CAPT - frame=%d\n",
ZR_DEVNAME(zr), *frame);
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
res = jpg_qbuf(file, *frame, BUZ_MODE_MOTION_COMPRESS);
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -2589,9 +2590,9 @@ zoran_do_ioctl (struct inode *inode,
dprintk(3, KERN_DEBUG "%s: BUZIOC_QBUF_PLAY - frame=%d\n",
ZR_DEVNAME(zr), *frame);
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
res = jpg_qbuf(file, *frame, BUZ_MODE_MOTION_DECOMPRESS);
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -2604,9 +2605,9 @@ zoran_do_ioctl (struct inode *inode,
dprintk(3, KERN_DEBUG "%s: BUZIOC_SYNC\n", ZR_DEVNAME(zr));
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
res = jpg_sync(file, bsync);
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -2630,7 +2631,7 @@ zoran_do_ioctl (struct inode *inode,
input = zr->card.input[bstat->input].muxsel;
norm = VIDEO_MODE_AUTO;
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (zr->codec_mode != BUZ_MODE_IDLE) {
dprintk(1,
@@ -2655,7 +2656,7 @@ zoran_do_ioctl (struct inode *inode,
decoder_command(zr, DECODER_SET_INPUT, &input);
decoder_command(zr, DECODER_SET_NORM, &zr->norm);
gstat_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
if (!res) {
bstat->signal =
@@ -2763,7 +2764,7 @@ zoran_do_ioctl (struct inode *inode,
switch (fmt->type) {
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
fmt->fmt.win.w.left = fh->overlay_settings.x;
fmt->fmt.win.w.top = fh->overlay_settings.y;
@@ -2776,14 +2777,14 @@ zoran_do_ioctl (struct inode *inode,
else
fmt->fmt.win.field = V4L2_FIELD_TOP;
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
fh->map_mode == ZORAN_MAP_MODE_RAW) {
@@ -2837,7 +2838,7 @@ zoran_do_ioctl (struct inode *inode,
V4L2_COLORSPACE_SMPTE170M;
}
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
break;
@@ -2870,7 +2871,7 @@ zoran_do_ioctl (struct inode *inode,
fmt->fmt.win.w.height,
fmt->fmt.win.clipcount,
fmt->fmt.win.bitmap);
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
res =
setup_window(file, fmt->fmt.win.w.left,
fmt->fmt.win.w.top,
@@ -2880,7 +2881,7 @@ zoran_do_ioctl (struct inode *inode,
fmt->fmt.win.clips,
fmt->fmt.win.clipcount,
fmt->fmt.win.bitmap);
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
break;
@@ -2917,7 +2918,7 @@ zoran_do_ioctl (struct inode *inode,
}
if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG) {
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
settings = fh->jpg_settings;
@@ -2995,7 +2996,7 @@ zoran_do_ioctl (struct inode *inode,
ZORAN_MAP_MODE_JPG_REC :
ZORAN_MAP_MODE_JPG_PLAY;
sfmtjpg_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
} else {
for (i = 0; i < zoran_num_formats; i++)
if (fmt->fmt.pix.pixelformat ==
@@ -3010,7 +3011,7 @@ zoran_do_ioctl (struct inode *inode,
(char *) &printformat);
return -EINVAL;
}
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (fh->jpg_buffers.allocated ||
(fh->v4l_buffers.allocated &&
fh->v4l_buffers.active !=
@@ -3052,7 +3053,7 @@ zoran_do_ioctl (struct inode *inode,
fh->map_mode = ZORAN_MAP_MODE_RAW;
sfmtv4l_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
}
break;
@@ -3077,7 +3078,7 @@ zoran_do_ioctl (struct inode *inode,
dprintk(3, KERN_DEBUG "%s: VIDIOC_G_FBUF\n", ZR_DEVNAME(zr));
memset(fb, 0, sizeof(*fb));
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
fb->base = zr->buffer.base;
fb->fmt.width = zr->buffer.width;
fb->fmt.height = zr->buffer.height;
@@ -3086,7 +3087,7 @@ zoran_do_ioctl (struct inode *inode,
fh->overlay_settings.format->fourcc;
}
fb->fmt.bytesperline = zr->buffer.bytesperline;
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
fb->fmt.colorspace = V4L2_COLORSPACE_SRGB;
fb->fmt.field = V4L2_FIELD_INTERLACED;
fb->flags = V4L2_FBUF_FLAG_OVERLAY;
@@ -3121,12 +3122,12 @@ zoran_do_ioctl (struct inode *inode,
return -EINVAL;
}
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
res =
setup_fbuffer(file, fb->base, &zoran_formats[i],
fb->fmt.width, fb->fmt.height,
fb->fmt.bytesperline);
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -3139,9 +3140,9 @@ zoran_do_ioctl (struct inode *inode,
dprintk(3, KERN_DEBUG "%s: VIDIOC_PREVIEW - on=%d\n",
ZR_DEVNAME(zr), *on);
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
res = setup_overlay(file, *on);
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -3163,7 +3164,7 @@ zoran_do_ioctl (struct inode *inode,
return -EINVAL;
}
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (fh->v4l_buffers.allocated || fh->jpg_buffers.allocated) {
dprintk(1,
@@ -3224,7 +3225,7 @@ zoran_do_ioctl (struct inode *inode,
goto v4l2reqbuf_unlock_and_return;
}
v4l2reqbuf_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return 0;
}
@@ -3245,9 +3246,9 @@ zoran_do_ioctl (struct inode *inode,
buf->type = type;
buf->index = index;
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
res = zoran_v4l2_buffer_status(file, buf, buf->index);
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -3262,7 +3263,7 @@ zoran_do_ioctl (struct inode *inode,
KERN_DEBUG "%s: VIDIOC_QBUF - type=%d, index=%d\n",
ZR_DEVNAME(zr), buf->type, buf->index);
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW:
@@ -3322,7 +3323,7 @@ zoran_do_ioctl (struct inode *inode,
goto qbuf_unlock_and_return;
}
qbuf_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -3336,7 +3337,7 @@ zoran_do_ioctl (struct inode *inode,
dprintk(3, KERN_DEBUG "%s: VIDIOC_DQBUF - type=%d\n",
ZR_DEVNAME(zr), buf->type);
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW:
@@ -3410,7 +3411,7 @@ zoran_do_ioctl (struct inode *inode,
goto dqbuf_unlock_and_return;
}
dqbuf_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -3422,7 +3423,7 @@ zoran_do_ioctl (struct inode *inode,
dprintk(3, KERN_DEBUG "%s: VIDIOC_STREAMON\n", ZR_DEVNAME(zr));
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW: /* raw capture */
@@ -3470,7 +3471,7 @@ zoran_do_ioctl (struct inode *inode,
goto strmon_unlock_and_return;
}
strmon_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -3482,7 +3483,7 @@ zoran_do_ioctl (struct inode *inode,
dprintk(3, KERN_DEBUG "%s: VIDIOC_STREAMOFF\n", ZR_DEVNAME(zr));
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW: /* raw capture */
@@ -3540,7 +3541,7 @@ zoran_do_ioctl (struct inode *inode,
goto strmoff_unlock_and_return;
}
strmoff_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -3600,7 +3601,7 @@ zoran_do_ioctl (struct inode *inode,
ctrl->id > V4L2_CID_HUE)
return -EINVAL;
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
ctrl->value = zr->brightness;
@@ -3615,7 +3616,7 @@ zoran_do_ioctl (struct inode *inode,
ctrl->value = zr->hue;
break;
}
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return 0;
}
@@ -3642,7 +3643,7 @@ zoran_do_ioctl (struct inode *inode,
return -EINVAL;
}
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
zr->brightness = ctrl->value;
@@ -3664,7 +3665,7 @@ zoran_do_ioctl (struct inode *inode,
decoder_command(zr, DECODER_SET_PICTURE, &pict);
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return 0;
}
@@ -3732,9 +3733,9 @@ zoran_do_ioctl (struct inode *inode,
dprintk(3, KERN_DEBUG "%s: VIDIOC_G_STD\n", ZR_DEVNAME(zr));
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
norm = zr->norm;
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
switch (norm) {
case VIDEO_MODE_PAL:
@@ -3776,13 +3777,13 @@ zoran_do_ioctl (struct inode *inode,
return -EINVAL;
}
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if ((res = zoran_set_norm(zr, norm)))
goto sstd_unlock_and_return;
res = wait_grab_pending(zr);
sstd_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
break;
@@ -3809,9 +3810,9 @@ zoran_do_ioctl (struct inode *inode,
inp->std = V4L2_STD_ALL;
/* Get status of video decoder */
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
decoder_command(zr, DECODER_GET_STATUS, &status);
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
if (!(status & DECODER_STATUS_GOOD)) {
inp->status |= V4L2_IN_ST_NO_POWER;
@@ -3830,9 +3831,9 @@ zoran_do_ioctl (struct inode *inode,
dprintk(3, KERN_DEBUG "%s: VIDIOC_G_INPUT\n", ZR_DEVNAME(zr));
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
*input = zr->input;
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return 0;
}
@@ -3845,14 +3846,14 @@ zoran_do_ioctl (struct inode *inode,
dprintk(3, KERN_DEBUG "%s: VIDIOC_S_INPUT - input=%d\n",
ZR_DEVNAME(zr), *input);
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if ((res = zoran_set_input(zr, *input)))
goto sinput_unlock_and_return;
/* Make sure the changes come into effect */
res = wait_grab_pending(zr);
sinput_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
break;
@@ -3914,7 +3915,7 @@ zoran_do_ioctl (struct inode *inode,
memset(cropcap, 0, sizeof(*cropcap));
cropcap->type = type;
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
(cropcap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
@@ -3934,7 +3935,7 @@ zoran_do_ioctl (struct inode *inode,
cropcap->defrect.width = BUZ_MIN_WIDTH;
cropcap->defrect.height = BUZ_MIN_HEIGHT;
cropcap_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
break;
@@ -3950,7 +3951,7 @@ zoran_do_ioctl (struct inode *inode,
memset(crop, 0, sizeof(*crop));
crop->type = type;
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
(crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
@@ -3969,7 +3970,7 @@ zoran_do_ioctl (struct inode *inode,
crop->c.height = fh->jpg_settings.img_height;
gcrop_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -3988,7 +3989,7 @@ zoran_do_ioctl (struct inode *inode,
ZR_DEVNAME(zr), crop->type, crop->c.left, crop->c.top,
crop->c.width, crop->c.height);
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (fh->jpg_buffers.allocated || fh->v4l_buffers.allocated) {
dprintk(1,
@@ -4024,7 +4025,7 @@ zoran_do_ioctl (struct inode *inode,
fh->jpg_settings = settings;
scrop_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
break;
@@ -4038,7 +4039,7 @@ zoran_do_ioctl (struct inode *inode,
memset(params, 0, sizeof(*params));
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
params->quality = fh->jpg_settings.jpg_comp.quality;
params->APPn = fh->jpg_settings.jpg_comp.APPn;
@@ -4053,7 +4054,7 @@ zoran_do_ioctl (struct inode *inode,
params->jpeg_markers =
fh->jpg_settings.jpg_comp.jpeg_markers;
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return 0;
}
@@ -4074,7 +4075,7 @@ zoran_do_ioctl (struct inode *inode,
settings.jpg_comp = *params;
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (fh->v4l_buffers.active != ZORAN_FREE ||
fh->jpg_buffers.active != ZORAN_FREE) {
@@ -4093,7 +4094,7 @@ zoran_do_ioctl (struct inode *inode,
zoran_v4l2_calc_bufsize(&fh->jpg_settings);
fh->jpg_settings.jpg_comp = *params = settings.jpg_comp;
sjpegc_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return 0;
}
@@ -4127,7 +4128,7 @@ zoran_do_ioctl (struct inode *inode,
switch (fmt->type) {
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (fmt->fmt.win.w.width > BUZ_MAX_WIDTH)
fmt->fmt.win.w.width = BUZ_MAX_WIDTH;
@@ -4138,7 +4139,7 @@ zoran_do_ioctl (struct inode *inode,
if (fmt->fmt.win.w.height < BUZ_MIN_HEIGHT)
fmt->fmt.win.w.height = BUZ_MIN_HEIGHT;
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
@@ -4146,7 +4147,7 @@ zoran_do_ioctl (struct inode *inode,
if (fmt->fmt.pix.bytesperline > 0)
return -EINVAL;
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG) {
settings = fh->jpg_settings;
@@ -4229,7 +4230,7 @@ zoran_do_ioctl (struct inode *inode,
goto tryfmt_unlock_and_return;
}
tryfmt_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
break;
@@ -4280,7 +4281,7 @@ zoran_poll (struct file *file,
* if no buffers queued or so, return POLLNVAL
*/
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
switch (fh->map_mode) {
case ZORAN_MAP_MODE_RAW:
@@ -4329,7 +4330,7 @@ zoran_poll (struct file *file,
}
poll_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
return res;
}
@@ -4385,7 +4386,7 @@ zoran_vm_close (struct vm_area_struct *vma)
if (fh->jpg_buffers.buffer[i].map)
break;
if (i == fh->jpg_buffers.num_buffers) {
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (fh->jpg_buffers.active != ZORAN_FREE) {
jpg_qbuf(file, -1, zr->codec_mode);
@@ -4398,7 +4399,7 @@ zoran_vm_close (struct vm_area_struct *vma)
fh->jpg_buffers.allocated = 0;
fh->jpg_buffers.ready_to_be_freed = 1;
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
}
break;
@@ -4421,7 +4422,7 @@ zoran_vm_close (struct vm_area_struct *vma)
if (fh->v4l_buffers.buffer[i].map)
break;
if (i == fh->v4l_buffers.num_buffers) {
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
if (fh->v4l_buffers.active != ZORAN_FREE) {
zr36057_set_memgrab(zr, 0);
@@ -4434,7 +4435,7 @@ zoran_vm_close (struct vm_area_struct *vma)
fh->v4l_buffers.allocated = 0;
fh->v4l_buffers.ready_to_be_freed = 1;
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
}
break;
@@ -4489,7 +4490,7 @@ zoran_mmap (struct file *file,
case ZORAN_MAP_MODE_JPG_PLAY:
/* lock */
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
/* Map the MJPEG buffers */
if (!fh->jpg_buffers.allocated) {
@@ -4579,13 +4580,13 @@ zoran_mmap (struct file *file,
}
jpg_mmap_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
break;
case ZORAN_MAP_MODE_RAW:
- down(&zr->resource_lock);
+ mutex_lock(&zr->resource_lock);
/* Map the V4L buffers */
if (!fh->v4l_buffers.allocated) {
@@ -4657,7 +4658,7 @@ zoran_mmap (struct file *file,
break;
}
v4l_mmap_unlock_and_return:
- up(&zr->resource_lock);
+ mutex_unlock(&zr->resource_lock);
break;
commit bb0030797f55c9996ea1cebd16b65750ceb7e4fd
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Mar 22 00:09:14 2006 -0800
[PATCH] sem2mutex: security/
Semaphore to mutex conversion.
The conversion was generated via scripts, and the result was validated
automatically via a script as well.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Stephen Smalley <sds@epoch.ncsc.mil>
Cc: James Morris <jmorris@namei.org>
Cc: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 74cb79eb917e..f6940618e345 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -16,11 +16,12 @@
#include <linux/keyctl.h>
#include <linux/fs.h>
#include <linux/err.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#include "internal.h"
/* session keyring create vs join semaphore */
-static DECLARE_MUTEX(key_session_sem);
+static DEFINE_MUTEX(key_session_mutex);
/* the root user's tracking struct */
struct key_user root_key_user = {
@@ -711,7 +712,7 @@ long join_session_keyring(const char *name)
}
/* allow the user to join or create a named keyring */
- down(&key_session_sem);
+ mutex_lock(&key_session_mutex);
/* look for an existing keyring of this name */
keyring = find_keyring_by_name(name, 0);
@@ -737,7 +738,7 @@ long join_session_keyring(const char *name)
key_put(keyring);
error2:
- up(&key_session_sem);
+ mutex_unlock(&key_session_mutex);
error:
return ret;
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index b5fa02d17b1e..65efa8f76331 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
+#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/security.h>
@@ -44,7 +45,7 @@ static int __init checkreqprot_setup(char *str)
__setup("checkreqprot=", checkreqprot_setup);
-static DECLARE_MUTEX(sel_sem);
+static DEFINE_MUTEX(sel_mutex);
/* global data for booleans */
static struct dentry *bool_dir = NULL;
@@ -230,7 +231,7 @@ static ssize_t sel_write_load(struct file * file, const char __user * buf,
ssize_t length;
void *data = NULL;
- down(&sel_sem);
+ mutex_lock(&sel_mutex);
length = task_has_security(current, SECURITY__LOAD_POLICY);
if (length)
@@ -262,7 +263,7 @@ static ssize_t sel_write_load(struct file * file, const char __user * buf,
else
length = count;
out:
- up(&sel_sem);
+ mutex_unlock(&sel_mutex);
vfree(data);
return length;
}
@@ -714,7 +715,7 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
int cur_enforcing;
struct inode *inode;
- down(&sel_sem);
+ mutex_lock(&sel_mutex);
ret = -EFAULT;
@@ -759,7 +760,7 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
*ppos = end;
ret = count;
out:
- up(&sel_sem);
+ mutex_unlock(&sel_mutex);
if (page)
free_page((unsigned long)page);
return ret;
@@ -773,7 +774,7 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
int new_value;
struct inode *inode;
- down(&sel_sem);
+ mutex_lock(&sel_mutex);
length = task_has_security(current, SECURITY__SETBOOL);
if (length)
@@ -812,7 +813,7 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
length = count;
out:
- up(&sel_sem);
+ mutex_unlock(&sel_mutex);
if (page)
free_page((unsigned long) page);
return length;
@@ -831,7 +832,7 @@ static ssize_t sel_commit_bools_write(struct file *filep,
ssize_t length = -EFAULT;
int new_value;
- down(&sel_sem);
+ mutex_lock(&sel_mutex);
length = task_has_security(current, SECURITY__SETBOOL);
if (length)
@@ -869,7 +870,7 @@ static ssize_t sel_commit_bools_write(struct file *filep,
length = count;
out:
- up(&sel_sem);
+ mutex_unlock(&sel_mutex);
if (page)
free_page((unsigned long) page);
return length;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 8a764928ff4b..63e0b7f29cb5 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -27,7 +27,8 @@
#include <linux/in.h>
#include <linux/sched.h>
#include <linux/audit.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
+
#include "flask.h"
#include "avc.h"
#include "avc_ss.h"
@@ -48,9 +49,9 @@ static DEFINE_RWLOCK(policy_rwlock);
#define POLICY_RDUNLOCK read_unlock(&policy_rwlock)
#define POLICY_WRUNLOCK write_unlock_irq(&policy_rwlock)
-static DECLARE_MUTEX(load_sem);
-#define LOAD_LOCK down(&load_sem)
-#define LOAD_UNLOCK up(&load_sem)
+static DEFINE_MUTEX(load_mutex);
+#define LOAD_LOCK mutex_lock(&load_mutex)
+#define LOAD_UNLOCK mutex_unlock(&load_mutex)
static struct sidtab sidtab;
struct policydb policydb;
commit 12aa757905d09b1dc2c1c3d0de3fa8f4c9726f2b
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jan 16 16:36:05 2006 +0100
[ALSA] semaphore -> mutex (Archs, misc buses)
Semaphore to mutex conversion.
The conversion was generated via scripts, and the result was validated
automatically via a script as well.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 149feb410654..5f22d70fefc0 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -73,7 +73,7 @@ static void aaci_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned
if (ac97->num >= 4)
return;
- down(&aaci->ac97_sem);
+ mutex_lock(&aaci->ac97_sem);
aaci_ac97_select_codec(aaci, ac97);
@@ -91,7 +91,7 @@ static void aaci_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned
v = readl(aaci->base + AACI_SLFR);
} while (v & (SLFR_1TXB|SLFR_2TXB));
- up(&aaci->ac97_sem);
+ mutex_unlock(&aaci->ac97_sem);
}
/*
@@ -105,7 +105,7 @@ static unsigned short aaci_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
if (ac97->num >= 4)
return ~0;
- down(&aaci->ac97_sem);
+ mutex_lock(&aaci->ac97_sem);
aaci_ac97_select_codec(aaci, ac97);
@@ -145,7 +145,7 @@ static unsigned short aaci_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
v = ~0;
}
- up(&aaci->ac97_sem);
+ mutex_unlock(&aaci->ac97_sem);
return v;
}
@@ -783,7 +783,7 @@ static struct aaci * __devinit aaci_init_card(struct amba_device *dev)
card->shortname, dev->res.start, dev->irq[0]);
aaci = card->private_data;
- init_MUTEX(&aaci->ac97_sem);
+ mutex_init(&aaci->ac97_sem);
spin_lock_init(&aaci->lock);
aaci->card = card;
aaci->dev = dev;
diff --git a/sound/arm/aaci.h b/sound/arm/aaci.h
index 83f73c2505c6..06295190606c 100644
--- a/sound/arm/aaci.h
+++ b/sound/arm/aaci.h
@@ -227,7 +227,7 @@ struct aaci {
unsigned int fifosize;
/* AC'97 */
- struct semaphore ac97_sem;
+ struct mutex ac97_sem;
ac97_bus_t *ac97_bus;
u32 maincr;
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index 3acbc6023d19..599aff8290e8 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -25,7 +25,7 @@
#include <sound/initval.h>
#include <asm/irq.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include <asm/hardware.h>
#include <asm/arch/pxa-regs.h>
#include <asm/arch/audio.h>
@@ -33,7 +33,7 @@
#include "pxa2xx-pcm.h"
-static DECLARE_MUTEX(car_mutex);
+static DEFINE_MUTEX(car_mutex);
static DECLARE_WAIT_QUEUE_HEAD(gsr_wq);
static volatile long gsr_bits;
@@ -52,7 +52,7 @@ static unsigned short pxa2xx_ac97_read(struct snd_ac97 *ac97, unsigned short reg
unsigned short val = -1;
volatile u32 *reg_addr;
- down(&car_mutex);
+ mutex_lock(&car_mutex);
/* set up primary or secondary codec space */
reg_addr = (ac97->num & 1) ? &SAC_REG_BASE : &PAC_REG_BASE;
@@ -79,7 +79,7 @@ static unsigned short pxa2xx_ac97_read(struct snd_ac97 *ac97, unsigned short reg
/* but we've just started another cycle... */
wait_event_timeout(gsr_wq, (GSR | gsr_bits) & GSR_SDONE, 1);
-out: up(&car_mutex);
+out: mutex_unlock(&car_mutex);
return val;
}
@@ -87,7 +87,7 @@ static void pxa2xx_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigne
{
volatile u32 *reg_addr;
- down(&car_mutex);
+ mutex_lock(&car_mutex);
/* set up primary or secondary codec space */
reg_addr = (ac97->num & 1) ? &SAC_REG_BASE : &PAC_REG_BASE;
@@ -101,7 +101,7 @@ static void pxa2xx_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigne
printk(KERN_ERR "%s: write error (ac97_reg=%d GSR=%#lx)\n",
__FUNCTION__, reg, GSR | gsr_bits);
- up(&car_mutex);
+ mutex_unlock(&car_mutex);
}
static void pxa2xx_ac97_reset(struct snd_ac97 *ac97)
diff --git a/sound/pcmcia/vx/vxp_mixer.c b/sound/pcmcia/vx/vxp_mixer.c
index 9450149b931c..e237f6c2018f 100644
--- a/sound/pcmcia/vx/vxp_mixer.c
+++ b/sound/pcmcia/vx/vxp_mixer.c
@@ -52,14 +52,14 @@ static int vx_mic_level_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_v
{
struct vx_core *_chip = snd_kcontrol_chip(kcontrol);
struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
- down(&_chip->mixer_mutex);
+ mutex_lock(&_chip->mixer_mutex);
if (chip->mic_level != ucontrol->value.integer.value[0]) {
vx_set_mic_level(_chip, ucontrol->value.integer.value[0]);
chip->mic_level = ucontrol->value.integer.value[0];
- up(&_chip->mixer_mutex);
+ mutex_unlock(&_chip->mixer_mutex);
return 1;
}
- up(&_chip->mixer_mutex);
+ mutex_unlock(&_chip->mixer_mutex);
return 0;
}
@@ -95,14 +95,14 @@ static int vx_mic_boost_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_v
{
struct vx_core *_chip = snd_kcontrol_chip(kcontrol);
struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
- down(&_chip->mixer_mutex);
+ mutex_lock(&_chip->mixer_mutex);
if (chip->mic_level != ucontrol->value.integer.value[0]) {
vx_set_mic_boost(_chip, ucontrol->value.integer.value[0]);
chip->mic_level = ucontrol->value.integer.value[0];
- up(&_chip->mixer_mutex);
+ mutex_unlock(&_chip->mixer_mutex);
return 1;
}
- up(&_chip->mixer_mutex);
+ mutex_unlock(&_chip->mixer_mutex);
return 0;
}
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index fd6543998788..53a148b01b6b 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -115,8 +115,8 @@ struct snd_cs4231 {
unsigned char image[32]; /* registers image */
int mce_bit;
int calibrate_mute;
- struct semaphore mce_mutex;
- struct semaphore open_mutex;
+ struct mutex mce_mutex;
+ struct mutex open_mutex;
union {
#ifdef SBUS_SUPPORT
@@ -775,7 +775,7 @@ static void snd_cs4231_playback_format(struct snd_cs4231 *chip, struct snd_pcm_h
{
unsigned long flags;
- down(&chip->mce_mutex);
+ mutex_lock(&chip->mce_mutex);
snd_cs4231_calibrate_mute(chip, 1);
snd_cs4231_mce_up(chip);
@@ -790,7 +790,7 @@ static void snd_cs4231_playback_format(struct snd_cs4231 *chip, struct snd_pcm_h
snd_cs4231_mce_down(chip);
snd_cs4231_calibrate_mute(chip, 0);
- up(&chip->mce_mutex);
+ mutex_unlock(&chip->mce_mutex);
}
static void snd_cs4231_capture_format(struct snd_cs4231 *chip, struct snd_pcm_hw_params *params,
@@ -798,7 +798,7 @@ static void snd_cs4231_capture_format(struct snd_cs4231 *chip, struct snd_pcm_hw
{
unsigned long flags;
- down(&chip->mce_mutex);
+ mutex_lock(&chip->mce_mutex);
snd_cs4231_calibrate_mute(chip, 1);
snd_cs4231_mce_up(chip);
@@ -819,7 +819,7 @@ static void snd_cs4231_capture_format(struct snd_cs4231 *chip, struct snd_pcm_hw
snd_cs4231_mce_down(chip);
snd_cs4231_calibrate_mute(chip, 0);
- up(&chip->mce_mutex);
+ mutex_unlock(&chip->mce_mutex);
}
/*
@@ -933,14 +933,14 @@ static int snd_cs4231_open(struct snd_cs4231 *chip, unsigned int mode)
{
unsigned long flags;
- down(&chip->open_mutex);
+ mutex_lock(&chip->open_mutex);
if ((chip->mode & mode)) {
- up(&chip->open_mutex);
+ mutex_unlock(&chip->open_mutex);
return -EAGAIN;
}
if (chip->mode & CS4231_MODE_OPEN) {
chip->mode |= mode;
- up(&chip->open_mutex);
+ mutex_unlock(&chip->open_mutex);
return 0;
}
/* ok. now enable and ack CODEC IRQ */
@@ -960,7 +960,7 @@ static int snd_cs4231_open(struct snd_cs4231 *chip, unsigned int mode)
spin_unlock_irqrestore(&chip->lock, flags);
chip->mode = mode;
- up(&chip->open_mutex);
+ mutex_unlock(&chip->open_mutex);
return 0;
}
@@ -968,10 +968,10 @@ static void snd_cs4231_close(struct snd_cs4231 *chip, unsigned int mode)
{
unsigned long flags;
- down(&chip->open_mutex);
+ mutex_lock(&chip->open_mutex);
chip->mode &= ~mode;
if (chip->mode & CS4231_MODE_OPEN) {
- up(&chip->open_mutex);
+ mutex_unlock(&chip->open_mutex);
return;
}
snd_cs4231_calibrate_mute(chip, 1);
@@ -1008,7 +1008,7 @@ static void snd_cs4231_close(struct snd_cs4231 *chip, unsigned int mode)
snd_cs4231_calibrate_mute(chip, 0);
chip->mode = 0;
- up(&chip->open_mutex);
+ mutex_unlock(&chip->open_mutex);
}
/*
@@ -1969,8 +1969,8 @@ static int __init snd_cs4231_sbus_create(struct snd_card *card,
spin_lock_init(&chip->lock);
spin_lock_init(&chip->c_dma.sbus_info.lock);
spin_lock_init(&chip->p_dma.sbus_info.lock);
- init_MUTEX(&chip->mce_mutex);
- init_MUTEX(&chip->open_mutex);
+ mutex_init(&chip->mce_mutex);
+ mutex_init(&chip->open_mutex);
chip->card = card;
chip->dev_u.sdev = sdev;
chip->regs_size = sdev->reg_addrs[0].reg_size;
@@ -2157,8 +2157,8 @@ static int __init snd_cs4231_ebus_create(struct snd_card *card,
spin_lock_init(&chip->lock);
spin_lock_init(&chip->c_dma.ebus_info.lock);
spin_lock_init(&chip->p_dma.ebus_info.lock);
- init_MUTEX(&chip->mce_mutex);
- init_MUTEX(&chip->open_mutex);
+ mutex_init(&chip->mce_mutex);
+ mutex_init(&chip->open_mutex);
chip->flags |= CS4231_FLAG_EBUS;
chip->card = card;
chip->dev_u.pdev = edev->bus->self;
diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
index c9476c237c42..53009bb642f6 100644
--- a/sound/usb/usbaudio.c
+++ b/sound/usb/usbaudio.c
@@ -47,6 +47,7 @@
#include <linux/usb.h>
#include <linux/vmalloc.h>
#include <linux/moduleparam.h>
+#include <linux/mutex.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/pcm.h>
@@ -202,7 +203,7 @@ struct snd_usb_stream {
* the all interfaces on the same card as one sound device.
*/
-static DECLARE_MUTEX(register_mutex);
+static DEFINE_MUTEX(register_mutex);
static struct snd_usb_audio *usb_chip[SNDRV_CARDS];
@@ -3285,7 +3286,7 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
/* check whether it's already registered */
chip = NULL;
- down(®ister_mutex);
+ mutex_lock(®ister_mutex);
for (i = 0; i < SNDRV_CARDS; i++) {
if (usb_chip[i] && usb_chip[i]->dev == dev) {
if (usb_chip[i]->shutdown) {
@@ -3338,13 +3339,13 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
usb_chip[chip->index] = chip;
chip->num_interfaces++;
- up(®ister_mutex);
+ mutex_unlock(®ister_mutex);
return chip;
__error:
if (chip && !chip->num_interfaces)
snd_card_free(chip->card);
- up(®ister_mutex);
+ mutex_unlock(®ister_mutex);
__err_val:
return NULL;
}
@@ -3364,7 +3365,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
chip = ptr;
card = chip->card;
- down(®ister_mutex);
+ mutex_lock(®ister_mutex);
chip->shutdown = 1;
chip->num_interfaces--;
if (chip->num_interfaces <= 0) {
@@ -3382,10 +3383,10 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
snd_usb_mixer_disconnect(p);
}
usb_chip[chip->index] = NULL;
- up(®ister_mutex);
+ mutex_unlock(®ister_mutex);
snd_card_free(card);
} else {
- up(®ister_mutex);
+ mutex_unlock(®ister_mutex);
}
}
diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c
index e0abb56bbe49..cfec38d7839b 100644
--- a/sound/usb/usx2y/usbusx2y.c
+++ b/sound/usb/usx2y/usbusx2y.c
@@ -351,7 +351,7 @@ static struct snd_card *usX2Y_create_card(struct usb_device *device)
usX2Y(card)->chip.dev = device;
usX2Y(card)->chip.card = card;
init_waitqueue_head(&usX2Y(card)->prepare_wait_queue);
- init_MUTEX (&usX2Y(card)->prepare_mutex);
+ mutex_init(&usX2Y(card)->prepare_mutex);
INIT_LIST_HEAD(&usX2Y(card)->chip.midi_list);
strcpy(card->driver, "USB "NAME_ALLCAPS"");
sprintf(card->shortname, "TASCAM "NAME_ALLCAPS"");
diff --git a/sound/usb/usx2y/usbusx2y.h b/sound/usb/usx2y/usbusx2y.h
index 435c1feda9df..456b5fdbc339 100644
--- a/sound/usb/usx2y/usbusx2y.h
+++ b/sound/usb/usx2y/usbusx2y.h
@@ -34,7 +34,7 @@ struct usX2Ydev {
unsigned int rate,
format;
int chip_status;
- struct semaphore prepare_mutex;
+ struct mutex prepare_mutex;
struct us428ctls_sharedmem *us428ctls_sharedmem;
int wait_iso_frame;
wait_queue_head_t us428ctls_wait_queue_head;
diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
index a6bbc7a6348f..f6bd0dee563c 100644
--- a/sound/usb/usx2y/usbusx2yaudio.c
+++ b/sound/usb/usx2y/usbusx2yaudio.c
@@ -811,7 +811,7 @@ static int snd_usX2Y_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_usX2Y_substream *subs = runtime->private_data;
- down(&subs->usX2Y->prepare_mutex);
+ mutex_lock(&subs->usX2Y->prepare_mutex);
snd_printdd("snd_usX2Y_hw_free(%p)\n", substream);
if (SNDRV_PCM_STREAM_PLAYBACK == substream->stream) {
@@ -832,7 +832,7 @@ static int snd_usX2Y_pcm_hw_free(struct snd_pcm_substream *substream)
usX2Y_urbs_release(subs);
}
}
- up(&subs->usX2Y->prepare_mutex);
+ mutex_unlock(&subs->usX2Y->prepare_mutex);
return snd_pcm_lib_free_pages(substream);
}
/*
@@ -849,7 +849,7 @@ static int snd_usX2Y_pcm_prepare(struct snd_pcm_substream *substream)
int err = 0;
snd_printdd("snd_usX2Y_pcm_prepare(%p)\n", substream);
- down(&usX2Y->prepare_mutex);
+ mutex_lock(&usX2Y->prepare_mutex);
usX2Y_subs_prepare(subs);
// Start hardware streams
// SyncStream first....
@@ -869,7 +869,7 @@ static int snd_usX2Y_pcm_prepare(struct snd_pcm_substream *substream)
err = usX2Y_urbs_start(subs);
up_prepare_mutex:
- up(&usX2Y->prepare_mutex);
+ mutex_unlock(&usX2Y->prepare_mutex);
return err;
}
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index 796a7dcef09d..315855082fe1 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -366,7 +366,7 @@ static int snd_usX2Y_usbpcm_hw_free(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_usX2Y_substream *subs = runtime->private_data,
*cap_subs2 = subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
- down(&subs->usX2Y->prepare_mutex);
+ mutex_lock(&subs->usX2Y->prepare_mutex);
snd_printdd("snd_usX2Y_usbpcm_hw_free(%p)\n", substream);
if (SNDRV_PCM_STREAM_PLAYBACK == substream->stream) {
@@ -395,7 +395,7 @@ static int snd_usX2Y_usbpcm_hw_free(struct snd_pcm_substream *substream)
usX2Y_usbpcm_urbs_release(cap_subs2);
}
}
- up(&subs->usX2Y->prepare_mutex);
+ mutex_unlock(&subs->usX2Y->prepare_mutex);
return snd_pcm_lib_free_pages(substream);
}
@@ -503,7 +503,7 @@ static int snd_usX2Y_usbpcm_prepare(struct snd_pcm_substream *substream)
memset(usX2Y->hwdep_pcm_shm, 0, sizeof(struct snd_usX2Y_hwdep_pcm_shm));
}
- down(&usX2Y->prepare_mutex);
+ mutex_lock(&usX2Y->prepare_mutex);
usX2Y_subs_prepare(subs);
// Start hardware streams
// SyncStream first....
@@ -544,7 +544,7 @@ static int snd_usX2Y_usbpcm_prepare(struct snd_pcm_substream *substream)
usX2Y->hwdep_pcm_shm->capture_iso_start = -1;
up_prepare_mutex:
- up(&usX2Y->prepare_mutex);
+ mutex_unlock(&usX2Y->prepare_mutex);
return err;
}
@@ -621,7 +621,7 @@ static int usX2Y_pcms_lock_check(struct snd_card *card)
if (dev->type != SNDRV_DEV_PCM)
continue;
pcm = dev->device_data;
- down(&pcm->open_mutex);
+ mutex_lock(&pcm->open_mutex);
}
list_for_each(list, &card->devices) {
int s;
@@ -650,7 +650,7 @@ static void usX2Y_pcms_unlock(struct snd_card *card)
if (dev->type != SNDRV_DEV_PCM)
continue;
pcm = dev->device_data;
- up(&pcm->open_mutex);
+ mutex_unlock(&pcm->open_mutex);
}
}