121
121
int sysctl_unix_max_dgram_qlen = 10 ;
122
122
123
123
struct hlist_head unix_socket_table [UNIX_HASH_SIZE + 1 ];
124
- DEFINE_RWLOCK (unix_table_lock );
124
+ DEFINE_SPINLOCK (unix_table_lock );
125
125
static atomic_t unix_nr_socks = ATOMIC_INIT (0 );
126
126
127
127
#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
@@ -130,7 +130,7 @@ static atomic_t unix_nr_socks = ATOMIC_INIT(0);
130
130
131
131
/*
132
132
* SMP locking strategy:
133
- * hash table is protected with rwlock unix_table_lock
133
+ * hash table is protected with spinlock unix_table_lock
134
134
* each socket state is protected by separate rwlock.
135
135
*/
136
136
@@ -214,16 +214,16 @@ static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
214
214
215
215
static inline void unix_remove_socket (struct sock * sk )
216
216
{
217
- write_lock (& unix_table_lock );
217
+ spin_lock (& unix_table_lock );
218
218
__unix_remove_socket (sk );
219
- write_unlock (& unix_table_lock );
219
+ spin_unlock (& unix_table_lock );
220
220
}
221
221
222
222
static inline void unix_insert_socket (struct hlist_head * list , struct sock * sk )
223
223
{
224
- write_lock (& unix_table_lock );
224
+ spin_lock (& unix_table_lock );
225
225
__unix_insert_socket (list , sk );
226
- write_unlock (& unix_table_lock );
226
+ spin_unlock (& unix_table_lock );
227
227
}
228
228
229
229
static struct sock * __unix_find_socket_byname (struct sockaddr_un * sunname ,
@@ -250,11 +250,11 @@ static inline struct sock *unix_find_socket_byname(struct sockaddr_un *sunname,
250
250
{
251
251
struct sock * s ;
252
252
253
- read_lock (& unix_table_lock );
253
+ spin_lock (& unix_table_lock );
254
254
s = __unix_find_socket_byname (sunname , len , type , hash );
255
255
if (s )
256
256
sock_hold (s );
257
- read_unlock (& unix_table_lock );
257
+ spin_unlock (& unix_table_lock );
258
258
return s ;
259
259
}
260
260
@@ -263,7 +263,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
263
263
struct sock * s ;
264
264
struct hlist_node * node ;
265
265
266
- read_lock (& unix_table_lock );
266
+ spin_lock (& unix_table_lock );
267
267
sk_for_each (s , node ,
268
268
& unix_socket_table [i -> i_ino & (UNIX_HASH_SIZE - 1 )]) {
269
269
struct dentry * dentry = unix_sk (s )-> dentry ;
@@ -276,7 +276,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
276
276
}
277
277
s = NULL ;
278
278
found :
279
- read_unlock (& unix_table_lock );
279
+ spin_unlock (& unix_table_lock );
280
280
return s ;
281
281
}
282
282
@@ -642,12 +642,12 @@ static int unix_autobind(struct socket *sock)
642
642
addr -> len = sprintf (addr -> name -> sun_path + 1 , "%05x" , ordernum ) + 1 + sizeof (short );
643
643
addr -> hash = unix_hash_fold (csum_partial ((void * )addr -> name , addr -> len , 0 ));
644
644
645
- write_lock (& unix_table_lock );
645
+ spin_lock (& unix_table_lock );
646
646
ordernum = (ordernum + 1 )& 0xFFFFF ;
647
647
648
648
if (__unix_find_socket_byname (addr -> name , addr -> len , sock -> type ,
649
649
addr -> hash )) {
650
- write_unlock (& unix_table_lock );
650
+ spin_unlock (& unix_table_lock );
651
651
/* Sanity yield. It is unusual case, but yet... */
652
652
if (!(ordernum & 0xFF ))
653
653
yield ();
@@ -658,7 +658,7 @@ static int unix_autobind(struct socket *sock)
658
658
__unix_remove_socket (sk );
659
659
u -> addr = addr ;
660
660
__unix_insert_socket (& unix_socket_table [addr -> hash ], sk );
661
- write_unlock (& unix_table_lock );
661
+ spin_unlock (& unix_table_lock );
662
662
err = 0 ;
663
663
664
664
out : up (& u -> readsem );
@@ -791,7 +791,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
791
791
addr -> hash = UNIX_HASH_SIZE ;
792
792
}
793
793
794
- write_lock (& unix_table_lock );
794
+ spin_lock (& unix_table_lock );
795
795
796
796
if (!sunaddr -> sun_path [0 ]) {
797
797
err = - EADDRINUSE ;
@@ -814,7 +814,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
814
814
__unix_insert_socket (list , sk );
815
815
816
816
out_unlock :
817
- write_unlock (& unix_table_lock );
817
+ spin_unlock (& unix_table_lock );
818
818
out_up :
819
819
up (& u -> readsem );
820
820
out :
@@ -1916,7 +1916,7 @@ static struct sock *unix_seq_idx(int *iter, loff_t pos)
1916
1916
1917
1917
static void * unix_seq_start (struct seq_file * seq , loff_t * pos )
1918
1918
{
1919
- read_lock (& unix_table_lock );
1919
+ spin_lock (& unix_table_lock );
1920
1920
return * pos ? unix_seq_idx (seq -> private , * pos - 1 ) : ((void * ) 1 );
1921
1921
}
1922
1922
@@ -1931,7 +1931,7 @@ static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1931
1931
1932
1932
static void unix_seq_stop (struct seq_file * seq , void * v )
1933
1933
{
1934
- read_unlock (& unix_table_lock );
1934
+ spin_unlock (& unix_table_lock );
1935
1935
}
1936
1936
1937
1937
static int unix_seq_show (struct seq_file * seq , void * v )
0 commit comments