if (ret > 0) {
struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
if (fc) {
- spin_lock(&fc->lock);
+ spin_lock(&fc->bg_lock);
fc->max_background = val;
fc->blocked = fc->num_background >= fc->max_background;
if (!fc->blocked)
wake_up(&fc->blocked_waitq);
- spin_unlock(&fc->lock);
+ spin_unlock(&fc->bg_lock);
fuse_conn_put(fc);
}
}
if (!fc)
goto out;
- spin_lock(&fc->lock);
+ spin_lock(&fc->bg_lock);
fc->congestion_threshold = val;
if (fc->sb) {
if (fc->num_background < fc->congestion_threshold) {
set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
}
}
- spin_unlock(&fc->lock);
+ spin_unlock(&fc->bg_lock);
fuse_conn_put(fc);
out:
return ret;
* We get here in the unlikely case that a background
* request was allocated but not sent
*/
- spin_lock(&fc->lock);
+ spin_lock(&fc->bg_lock);
if (!fc->blocked)
wake_up(&fc->blocked_waitq);
- spin_unlock(&fc->lock);
+ spin_unlock(&fc->bg_lock);
}
if (test_bit(FR_WAITING, &req->flags)) {
WARN_ON(test_bit(FR_PENDING, &req->flags));
WARN_ON(test_bit(FR_SENT, &req->flags));
if (test_bit(FR_BACKGROUND, &req->flags)) {
- spin_lock(&fc->lock);
+ spin_lock(&fc->bg_lock);
clear_bit(FR_BACKGROUND, &req->flags);
if (fc->num_background == fc->max_background) {
fc->blocked = 0;
fc->num_background--;
fc->active_background--;
flush_bg_queue(fc);
- spin_unlock(&fc->lock);
+ spin_unlock(&fc->bg_lock);
}
wake_up(&req->waitq);
if (req->end)
*
* fc->connected must have been checked previously
*/
-void fuse_request_send_background_locked(struct fuse_conn *fc,
- struct fuse_req *req)
+void fuse_request_send_background_nocheck(struct fuse_conn *fc,
+ struct fuse_req *req)
{
BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
if (!test_bit(FR_WAITING, &req->flags)) {
atomic_inc(&fc->num_waiting);
}
__set_bit(FR_ISREPLY, &req->flags);
+ spin_lock(&fc->bg_lock);
fc->num_background++;
if (fc->num_background == fc->max_background)
fc->blocked = 1;
}
list_add_tail(&req->list, &fc->bg_queue);
flush_bg_queue(fc);
+ spin_unlock(&fc->bg_lock);
}
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
BUG_ON(!req->end);
spin_lock(&fc->lock);
if (fc->connected) {
- fuse_request_send_background_locked(fc, req);
+ fuse_request_send_background_nocheck(fc, req);
spin_unlock(&fc->lock);
} else {
spin_unlock(&fc->lock);
LIST_HEAD(to_end);
fc->connected = 0;
- fc->blocked = 0;
fc->aborted = is_abort;
fuse_set_initialized(fc);
list_for_each_entry(fud, &fc->devices, entry) {
list_splice_tail_init(&fpq->processing, &to_end);
spin_unlock(&fpq->lock);
}
+ spin_lock(&fc->bg_lock);
+ fc->blocked = 0;
fc->max_background = UINT_MAX;
flush_bg_queue(fc);
+ spin_unlock(&fc->bg_lock);
spin_lock(&fiq->waitq.lock);
fiq->connected = 0;
req->in.args[1].size = inarg->size;
fi->writectr++;
- fuse_request_send_background_locked(fc, req);
+ fuse_request_send_background_nocheck(fc, req);
return;
out_free:
/** The list of background requests set aside for later queuing */
struct list_head bg_queue;
+ /** Protects: max_background, congestion_threshold, num_background,
+ * active_background, bg_queue, blocked */
+ spinlock_t bg_lock;
+
/** Flag indicating that INIT reply has been received. Allocating
* any fuse request will be suspended until the flag is set */
int initialized;
*/
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req);
-void fuse_request_send_background_locked(struct fuse_conn *fc,
- struct fuse_req *req);
+void fuse_request_send_background_nocheck(struct fuse_conn *fc,
+ struct fuse_req *req);
/* Abort all requests */
void fuse_abort_conn(struct fuse_conn *fc, bool is_abort);
{
memset(fc, 0, sizeof(*fc));
spin_lock_init(&fc->lock);
+ spin_lock_init(&fc->bg_lock);
init_rwsem(&fc->killsb);
refcount_set(&fc->count, 1);
atomic_set(&fc->dev_count, 1);
sanitize_global_limit(&max_user_bgreq);
sanitize_global_limit(&max_user_congthresh);
+ spin_lock(&fc->bg_lock);
if (arg->max_background) {
fc->max_background = arg->max_background;
fc->congestion_threshold > max_user_congthresh)
fc->congestion_threshold = max_user_congthresh;
}
+ spin_unlock(&fc->bg_lock);
}
static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)