Skip to content

Commit fbe9cc4

Browse files
davem330David S. Miller
authored and
David S. Miller
committed
[AF_UNIX]: Use spinlock for unix_table_lock
This lock is actually taken mostly as a writer, so using a rwlock actually just makes performance worse especially on chips like the Intel P4. Signed-off-by: David S. Miller <[email protected]>
1 parent d83d846 commit fbe9cc4

File tree

3 files changed

+20
-20
lines changed

3 files changed

+20
-20
lines changed

include/net/af_unix.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ extern void unix_gc(void);
1313
#define UNIX_HASH_SIZE 256
1414

1515
extern struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
16-
extern rwlock_t unix_table_lock;
16+
extern spinlock_t unix_table_lock;
1717

1818
extern atomic_t unix_tot_inflight;
1919

net/unix/af_unix.c

+17-17
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@
121121
int sysctl_unix_max_dgram_qlen = 10;
122122

123123
struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
124-
DEFINE_RWLOCK(unix_table_lock);
124+
DEFINE_SPINLOCK(unix_table_lock);
125125
static atomic_t unix_nr_socks = ATOMIC_INIT(0);
126126

127127
#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
@@ -130,7 +130,7 @@ static atomic_t unix_nr_socks = ATOMIC_INIT(0);
130130

131131
/*
132132
* SMP locking strategy:
133-
* hash table is protected with rwlock unix_table_lock
133+
* hash table is protected with spinlock unix_table_lock
134134
* each socket state is protected by separate rwlock.
135135
*/
136136

@@ -214,16 +214,16 @@ static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
214214

215215
static inline void unix_remove_socket(struct sock *sk)
216216
{
217-
write_lock(&unix_table_lock);
217+
spin_lock(&unix_table_lock);
218218
__unix_remove_socket(sk);
219-
write_unlock(&unix_table_lock);
219+
spin_unlock(&unix_table_lock);
220220
}
221221

222222
static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
223223
{
224-
write_lock(&unix_table_lock);
224+
spin_lock(&unix_table_lock);
225225
__unix_insert_socket(list, sk);
226-
write_unlock(&unix_table_lock);
226+
spin_unlock(&unix_table_lock);
227227
}
228228

229229
static struct sock *__unix_find_socket_byname(struct sockaddr_un *sunname,
@@ -250,11 +250,11 @@ static inline struct sock *unix_find_socket_byname(struct sockaddr_un *sunname,
250250
{
251251
struct sock *s;
252252

253-
read_lock(&unix_table_lock);
253+
spin_lock(&unix_table_lock);
254254
s = __unix_find_socket_byname(sunname, len, type, hash);
255255
if (s)
256256
sock_hold(s);
257-
read_unlock(&unix_table_lock);
257+
spin_unlock(&unix_table_lock);
258258
return s;
259259
}
260260

@@ -263,7 +263,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
263263
struct sock *s;
264264
struct hlist_node *node;
265265

266-
read_lock(&unix_table_lock);
266+
spin_lock(&unix_table_lock);
267267
sk_for_each(s, node,
268268
&unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
269269
struct dentry *dentry = unix_sk(s)->dentry;
@@ -276,7 +276,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
276276
}
277277
s = NULL;
278278
found:
279-
read_unlock(&unix_table_lock);
279+
spin_unlock(&unix_table_lock);
280280
return s;
281281
}
282282

@@ -642,12 +642,12 @@ static int unix_autobind(struct socket *sock)
642642
addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
643643
addr->hash = unix_hash_fold(csum_partial((void*)addr->name, addr->len, 0));
644644

645-
write_lock(&unix_table_lock);
645+
spin_lock(&unix_table_lock);
646646
ordernum = (ordernum+1)&0xFFFFF;
647647

648648
if (__unix_find_socket_byname(addr->name, addr->len, sock->type,
649649
addr->hash)) {
650-
write_unlock(&unix_table_lock);
650+
spin_unlock(&unix_table_lock);
651651
/* Sanity yield. It is unusual case, but yet... */
652652
if (!(ordernum&0xFF))
653653
yield();
@@ -658,7 +658,7 @@ static int unix_autobind(struct socket *sock)
658658
__unix_remove_socket(sk);
659659
u->addr = addr;
660660
__unix_insert_socket(&unix_socket_table[addr->hash], sk);
661-
write_unlock(&unix_table_lock);
661+
spin_unlock(&unix_table_lock);
662662
err = 0;
663663

664664
out: up(&u->readsem);
@@ -791,7 +791,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
791791
addr->hash = UNIX_HASH_SIZE;
792792
}
793793

794-
write_lock(&unix_table_lock);
794+
spin_lock(&unix_table_lock);
795795

796796
if (!sunaddr->sun_path[0]) {
797797
err = -EADDRINUSE;
@@ -814,7 +814,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
814814
__unix_insert_socket(list, sk);
815815

816816
out_unlock:
817-
write_unlock(&unix_table_lock);
817+
spin_unlock(&unix_table_lock);
818818
out_up:
819819
up(&u->readsem);
820820
out:
@@ -1916,7 +1916,7 @@ static struct sock *unix_seq_idx(int *iter, loff_t pos)
19161916

19171917
static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
19181918
{
1919-
read_lock(&unix_table_lock);
1919+
spin_lock(&unix_table_lock);
19201920
return *pos ? unix_seq_idx(seq->private, *pos - 1) : ((void *) 1);
19211921
}
19221922

@@ -1931,7 +1931,7 @@ static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
19311931

19321932
static void unix_seq_stop(struct seq_file *seq, void *v)
19331933
{
1934-
read_unlock(&unix_table_lock);
1934+
spin_unlock(&unix_table_lock);
19351935
}
19361936

19371937
static int unix_seq_show(struct seq_file *seq, void *v)

net/unix/garbage.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ void unix_gc(void)
182182
if (down_trylock(&unix_gc_sem))
183183
return;
184184

185-
read_lock(&unix_table_lock);
185+
spin_lock(&unix_table_lock);
186186

187187
forall_unix_sockets(i, s)
188188
{
@@ -301,7 +301,7 @@ void unix_gc(void)
301301
}
302302
u->gc_tree = GC_ORPHAN;
303303
}
304-
read_unlock(&unix_table_lock);
304+
spin_unlock(&unix_table_lock);
305305

306306
/*
307307
* Here we are. Hitlist is filled. Die.

0 commit comments

Comments
 (0)