Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/******************************************************************************
3*******************************************************************************
4**
5** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
7**
8**
9*******************************************************************************
10******************************************************************************/
11
12#include <trace/events/dlm.h>
13
14#include "dlm_internal.h"
15#include "lvb_table.h"
16#include "memory.h"
17#include "lock.h"
18#include "user.h"
19#include "ast.h"
20
21static void dlm_run_callback(uint32_t ls_id, uint32_t lkb_id, int8_t mode,
22 uint32_t flags, uint8_t sb_flags, int sb_status,
23 struct dlm_lksb *lksb,
24 void (*astfn)(void *astparam),
25 void (*bastfn)(void *astparam, int mode),
26 void *astparam, const char *res_name,
27 size_t res_length)
28{
29 if (flags & DLM_CB_BAST) {
30 trace_dlm_bast(ls_id, lkb_id, mode, res_name, res_length);
31 bastfn(astparam, mode);
32 } else if (flags & DLM_CB_CAST) {
33 trace_dlm_ast(ls_id, lkb_id, sb_flags, sb_status, res_name,
34 res_length);
35 lksb->sb_status = sb_status;
36 lksb->sb_flags = sb_flags;
37 astfn(astparam);
38 }
39}
40
41static void dlm_do_callback(struct dlm_callback *cb)
42{
43 dlm_run_callback(cb->ls_id, cb->lkb_id, cb->mode, cb->flags,
44 cb->sb_flags, cb->sb_status, cb->lkb_lksb,
45 cb->astfn, cb->bastfn, cb->astparam,
46 cb->res_name, cb->res_length);
47 dlm_free_cb(cb);
48}
49
50static void dlm_callback_work(struct work_struct *work)
51{
52 struct dlm_callback *cb = container_of(work, struct dlm_callback, work);
53
54 dlm_do_callback(cb);
55}
56
57bool dlm_may_skip_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
58 int status, uint32_t sbflags, int *copy_lvb)
59{
60 struct dlm_rsb *rsb = lkb->lkb_resource;
61 struct dlm_ls *ls = rsb->res_ls;
62 int prev_mode;
63
64 if (copy_lvb)
65 *copy_lvb = 0;
66
67 if (flags & DLM_CB_BAST) {
68 /* if cb is a bast, it should be skipped if the blocking mode is
69 * compatible with the last granted mode
70 */
71 if (lkb->lkb_last_cast_cb_mode != -1) {
72 if (dlm_modes_compat(mode, lkb->lkb_last_cast_cb_mode)) {
73 log_debug(ls, "skip %x bast mode %d for cast mode %d",
74 lkb->lkb_id, mode,
75 lkb->lkb_last_cast_cb_mode);
76 return true;
77 }
78 }
79
80 /*
81 * Suppress some redundant basts here, do more on removal.
82 * Don't even add a bast if the callback just before it
83 * is a bast for the same mode or a more restrictive mode.
84 * (the addional > PR check is needed for PR/CW inversion)
85 */
86 if (lkb->lkb_last_cb_mode != -1 &&
87 lkb->lkb_last_cb_flags & DLM_CB_BAST) {
88 prev_mode = lkb->lkb_last_cb_mode;
89
90 if ((prev_mode == mode) ||
91 (prev_mode > mode && prev_mode > DLM_LOCK_PR)) {
92 log_debug(ls, "skip %x add bast mode %d for bast mode %d",
93 lkb->lkb_id, mode, prev_mode);
94 return true;
95 }
96 }
97
98 lkb->lkb_last_bast_time = ktime_get();
99 lkb->lkb_last_bast_cb_mode = mode;
100 } else if (flags & DLM_CB_CAST) {
101 if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
102 prev_mode = lkb->lkb_last_cast_cb_mode;
103
104 if (!status && lkb->lkb_lksb->sb_lvbptr &&
105 dlm_lvb_operations[prev_mode + 1][mode + 1]) {
106 if (copy_lvb)
107 *copy_lvb = 1;
108 }
109 }
110
111 lkb->lkb_last_cast_cb_mode = mode;
112 lkb->lkb_last_cast_time = ktime_get();
113 }
114
115 lkb->lkb_last_cb_mode = mode;
116 lkb->lkb_last_cb_flags = flags;
117
118 return false;
119}
120
121int dlm_get_cb(struct dlm_lkb *lkb, uint32_t flags, int mode,
122 int status, uint32_t sbflags,
123 struct dlm_callback **cb)
124{
125 struct dlm_rsb *rsb = lkb->lkb_resource;
126 struct dlm_ls *ls = rsb->res_ls;
127
128 *cb = dlm_allocate_cb();
129 if (WARN_ON_ONCE(!*cb))
130 return -ENOMEM;
131
132 /* for tracing */
133 (*cb)->lkb_id = lkb->lkb_id;
134 (*cb)->ls_id = ls->ls_global_id;
135 memcpy((*cb)->res_name, rsb->res_name, rsb->res_length);
136 (*cb)->res_length = rsb->res_length;
137
138 (*cb)->flags = flags;
139 (*cb)->mode = mode;
140 (*cb)->sb_status = status;
141 (*cb)->sb_flags = (sbflags & 0x000000FF);
142 (*cb)->lkb_lksb = lkb->lkb_lksb;
143
144 return 0;
145}
146
147static int dlm_get_queue_cb(struct dlm_lkb *lkb, uint32_t flags, int mode,
148 int status, uint32_t sbflags,
149 struct dlm_callback **cb)
150{
151 int rv;
152
153 rv = dlm_get_cb(lkb, flags, mode, status, sbflags, cb);
154 if (rv)
155 return rv;
156
157 (*cb)->astfn = lkb->lkb_astfn;
158 (*cb)->bastfn = lkb->lkb_bastfn;
159 (*cb)->astparam = lkb->lkb_astparam;
160 INIT_WORK(&(*cb)->work, dlm_callback_work);
161
162 return 0;
163}
164
165void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
166 uint32_t sbflags)
167{
168 struct dlm_rsb *rsb = lkb->lkb_resource;
169 struct dlm_ls *ls = rsb->res_ls;
170 struct dlm_callback *cb;
171 int rv;
172
173 if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
174 dlm_user_add_ast(lkb, flags, mode, status, sbflags);
175 return;
176 }
177
178 if (dlm_may_skip_callback(lkb, flags, mode, status, sbflags, NULL))
179 return;
180
181 spin_lock_bh(&ls->ls_cb_lock);
182 if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
183 rv = dlm_get_queue_cb(lkb, flags, mode, status, sbflags, &cb);
184 if (!rv)
185 list_add(&cb->list, &ls->ls_cb_delay);
186 } else {
187 if (test_bit(LSFL_SOFTIRQ, &ls->ls_flags)) {
188 dlm_run_callback(ls->ls_global_id, lkb->lkb_id, mode, flags,
189 sbflags, status, lkb->lkb_lksb,
190 lkb->lkb_astfn, lkb->lkb_bastfn,
191 lkb->lkb_astparam, rsb->res_name,
192 rsb->res_length);
193 } else {
194 rv = dlm_get_queue_cb(lkb, flags, mode, status, sbflags, &cb);
195 if (!rv)
196 queue_work(ls->ls_callback_wq, &cb->work);
197 }
198 }
199 spin_unlock_bh(&ls->ls_cb_lock);
200}
201
202int dlm_callback_start(struct dlm_ls *ls)
203{
204 if (!test_bit(LSFL_FS, &ls->ls_flags) ||
205 test_bit(LSFL_SOFTIRQ, &ls->ls_flags))
206 return 0;
207
208 ls->ls_callback_wq = alloc_ordered_workqueue("dlm_callback",
209 WQ_HIGHPRI | WQ_MEM_RECLAIM);
210 if (!ls->ls_callback_wq) {
211 log_print("can't start dlm_callback workqueue");
212 return -ENOMEM;
213 }
214 return 0;
215}
216
217void dlm_callback_stop(struct dlm_ls *ls)
218{
219 if (ls->ls_callback_wq)
220 destroy_workqueue(ls->ls_callback_wq);
221}
222
223void dlm_callback_suspend(struct dlm_ls *ls)
224{
225 if (!test_bit(LSFL_FS, &ls->ls_flags))
226 return;
227
228 spin_lock_bh(&ls->ls_cb_lock);
229 set_bit(LSFL_CB_DELAY, &ls->ls_flags);
230 spin_unlock_bh(&ls->ls_cb_lock);
231
232 if (ls->ls_callback_wq)
233 flush_workqueue(ls->ls_callback_wq);
234}
235
236#define MAX_CB_QUEUE 25
237
238void dlm_callback_resume(struct dlm_ls *ls)
239{
240 struct dlm_callback *cb, *safe;
241 int count = 0, sum = 0;
242 bool empty;
243
244 if (!test_bit(LSFL_FS, &ls->ls_flags))
245 return;
246
247more:
248 spin_lock_bh(&ls->ls_cb_lock);
249 list_for_each_entry_safe(cb, safe, &ls->ls_cb_delay, list) {
250 list_del(&cb->list);
251 if (test_bit(LSFL_SOFTIRQ, &ls->ls_flags))
252 dlm_do_callback(cb);
253 else
254 queue_work(ls->ls_callback_wq, &cb->work);
255
256 count++;
257 if (count == MAX_CB_QUEUE)
258 break;
259 }
260 empty = list_empty(&ls->ls_cb_delay);
261 if (empty)
262 clear_bit(LSFL_CB_DELAY, &ls->ls_flags);
263 spin_unlock_bh(&ls->ls_cb_lock);
264
265 sum += count;
266 if (!empty) {
267 count = 0;
268 cond_resched();
269 goto more;
270 }
271
272 if (sum)
273 log_rinfo(ls, "%s %d", __func__, sum);
274}
275
1// SPDX-License-Identifier: GPL-2.0-only
2/******************************************************************************
3*******************************************************************************
4**
5** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
7**
8**
9*******************************************************************************
10******************************************************************************/
11
12#include <trace/events/dlm.h>
13
14#include "dlm_internal.h"
15#include "memory.h"
16#include "lock.h"
17#include "user.h"
18#include "ast.h"
19
20void dlm_release_callback(struct kref *ref)
21{
22 struct dlm_callback *cb = container_of(ref, struct dlm_callback, ref);
23
24 dlm_free_cb(cb);
25}
26
27void dlm_callback_set_last_ptr(struct dlm_callback **from,
28 struct dlm_callback *to)
29{
30 if (*from)
31 kref_put(&(*from)->ref, dlm_release_callback);
32
33 if (to)
34 kref_get(&to->ref);
35
36 *from = to;
37}
38
39void dlm_purge_lkb_callbacks(struct dlm_lkb *lkb)
40{
41 struct dlm_callback *cb, *safe;
42
43 list_for_each_entry_safe(cb, safe, &lkb->lkb_callbacks, list) {
44 list_del(&cb->list);
45 kref_put(&cb->ref, dlm_release_callback);
46 }
47
48 lkb->lkb_flags &= ~DLM_IFL_CB_PENDING;
49
50 /* invalidate */
51 dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL);
52 dlm_callback_set_last_ptr(&lkb->lkb_last_cb, NULL);
53 lkb->lkb_last_bast_mode = -1;
54}
55
56int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
57 int status, uint32_t sbflags)
58{
59 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
60 int rv = DLM_ENQUEUE_CALLBACK_SUCCESS;
61 struct dlm_callback *cb;
62 int prev_mode;
63
64 if (flags & DLM_CB_BAST) {
65 /* if cb is a bast, it should be skipped if the blocking mode is
66 * compatible with the last granted mode
67 */
68 if (lkb->lkb_last_cast) {
69 if (dlm_modes_compat(mode, lkb->lkb_last_cast->mode)) {
70 log_debug(ls, "skip %x bast mode %d for cast mode %d",
71 lkb->lkb_id, mode,
72 lkb->lkb_last_cast->mode);
73 goto out;
74 }
75 }
76
77 /*
78 * Suppress some redundant basts here, do more on removal.
79 * Don't even add a bast if the callback just before it
80 * is a bast for the same mode or a more restrictive mode.
81 * (the addional > PR check is needed for PR/CW inversion)
82 */
83 if (lkb->lkb_last_cb && lkb->lkb_last_cb->flags & DLM_CB_BAST) {
84 prev_mode = lkb->lkb_last_cb->mode;
85
86 if ((prev_mode == mode) ||
87 (prev_mode > mode && prev_mode > DLM_LOCK_PR)) {
88 log_debug(ls, "skip %x add bast mode %d for bast mode %d",
89 lkb->lkb_id, mode, prev_mode);
90 goto out;
91 }
92 }
93 }
94
95 cb = dlm_allocate_cb();
96 if (!cb) {
97 rv = DLM_ENQUEUE_CALLBACK_FAILURE;
98 goto out;
99 }
100
101 cb->flags = flags;
102 cb->mode = mode;
103 cb->sb_status = status;
104 cb->sb_flags = (sbflags & 0x000000FF);
105 kref_init(&cb->ref);
106 if (!(lkb->lkb_flags & DLM_IFL_CB_PENDING)) {
107 lkb->lkb_flags |= DLM_IFL_CB_PENDING;
108 rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
109 }
110 list_add_tail(&cb->list, &lkb->lkb_callbacks);
111
112 if (flags & DLM_CB_CAST)
113 dlm_callback_set_last_ptr(&lkb->lkb_last_cast, cb);
114
115 dlm_callback_set_last_ptr(&lkb->lkb_last_cb, cb);
116
117 out:
118 return rv;
119}
120
121int dlm_dequeue_lkb_callback(struct dlm_lkb *lkb, struct dlm_callback **cb)
122{
123 /* oldest undelivered cb is callbacks first entry */
124 *cb = list_first_entry_or_null(&lkb->lkb_callbacks,
125 struct dlm_callback, list);
126 if (!*cb)
127 return DLM_DEQUEUE_CALLBACK_EMPTY;
128
129 /* remove it from callbacks so shift others down */
130 list_del(&(*cb)->list);
131 if (list_empty(&lkb->lkb_callbacks))
132 return DLM_DEQUEUE_CALLBACK_LAST;
133
134 return DLM_DEQUEUE_CALLBACK_SUCCESS;
135}
136
137void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
138 uint32_t sbflags)
139{
140 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
141 int rv;
142
143 if (lkb->lkb_flags & DLM_IFL_USER) {
144 dlm_user_add_ast(lkb, flags, mode, status, sbflags);
145 return;
146 }
147
148 spin_lock(&lkb->lkb_cb_lock);
149 rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags);
150 switch (rv) {
151 case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
152 kref_get(&lkb->lkb_ref);
153
154 spin_lock(&ls->ls_cb_lock);
155 if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
156 list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay);
157 } else {
158 queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
159 }
160 spin_unlock(&ls->ls_cb_lock);
161 break;
162 case DLM_ENQUEUE_CALLBACK_FAILURE:
163 WARN_ON_ONCE(1);
164 break;
165 case DLM_ENQUEUE_CALLBACK_SUCCESS:
166 break;
167 default:
168 WARN_ON_ONCE(1);
169 break;
170 }
171 spin_unlock(&lkb->lkb_cb_lock);
172}
173
174void dlm_callback_work(struct work_struct *work)
175{
176 struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work);
177 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
178 void (*castfn) (void *astparam);
179 void (*bastfn) (void *astparam, int mode);
180 struct dlm_callback *cb;
181 int rv;
182
183 spin_lock(&lkb->lkb_cb_lock);
184 rv = dlm_dequeue_lkb_callback(lkb, &cb);
185 spin_unlock(&lkb->lkb_cb_lock);
186
187 if (WARN_ON_ONCE(rv == DLM_DEQUEUE_CALLBACK_EMPTY))
188 goto out;
189
190 for (;;) {
191 castfn = lkb->lkb_astfn;
192 bastfn = lkb->lkb_bastfn;
193
194 if (cb->flags & DLM_CB_BAST) {
195 trace_dlm_bast(ls, lkb, cb->mode);
196 lkb->lkb_last_bast_time = ktime_get();
197 lkb->lkb_last_bast_mode = cb->mode;
198 bastfn(lkb->lkb_astparam, cb->mode);
199 } else if (cb->flags & DLM_CB_CAST) {
200 lkb->lkb_lksb->sb_status = cb->sb_status;
201 lkb->lkb_lksb->sb_flags = cb->sb_flags;
202 trace_dlm_ast(ls, lkb);
203 lkb->lkb_last_cast_time = ktime_get();
204 castfn(lkb->lkb_astparam);
205 }
206
207 kref_put(&cb->ref, dlm_release_callback);
208
209 spin_lock(&lkb->lkb_cb_lock);
210 rv = dlm_dequeue_lkb_callback(lkb, &cb);
211 if (rv == DLM_DEQUEUE_CALLBACK_EMPTY) {
212 lkb->lkb_flags &= ~DLM_IFL_CB_PENDING;
213 spin_unlock(&lkb->lkb_cb_lock);
214 break;
215 }
216 spin_unlock(&lkb->lkb_cb_lock);
217 }
218
219out:
220 /* undo kref_get from dlm_add_callback, may cause lkb to be freed */
221 dlm_put_lkb(lkb);
222}
223
224int dlm_callback_start(struct dlm_ls *ls)
225{
226 ls->ls_callback_wq = alloc_workqueue("dlm_callback",
227 WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
228 if (!ls->ls_callback_wq) {
229 log_print("can't start dlm_callback workqueue");
230 return -ENOMEM;
231 }
232 return 0;
233}
234
235void dlm_callback_stop(struct dlm_ls *ls)
236{
237 if (ls->ls_callback_wq)
238 destroy_workqueue(ls->ls_callback_wq);
239}
240
241void dlm_callback_suspend(struct dlm_ls *ls)
242{
243 if (ls->ls_callback_wq) {
244 spin_lock(&ls->ls_cb_lock);
245 set_bit(LSFL_CB_DELAY, &ls->ls_flags);
246 spin_unlock(&ls->ls_cb_lock);
247
248 flush_workqueue(ls->ls_callback_wq);
249 }
250}
251
252#define MAX_CB_QUEUE 25
253
254void dlm_callback_resume(struct dlm_ls *ls)
255{
256 struct dlm_lkb *lkb, *safe;
257 int count = 0, sum = 0;
258 bool empty;
259
260 if (!ls->ls_callback_wq)
261 return;
262
263more:
264 spin_lock(&ls->ls_cb_lock);
265 list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
266 list_del_init(&lkb->lkb_cb_list);
267 queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
268 count++;
269 if (count == MAX_CB_QUEUE)
270 break;
271 }
272 empty = list_empty(&ls->ls_cb_delay);
273 if (empty)
274 clear_bit(LSFL_CB_DELAY, &ls->ls_flags);
275 spin_unlock(&ls->ls_cb_lock);
276
277 sum += count;
278 if (!empty) {
279 count = 0;
280 cond_resched();
281 goto more;
282 }
283
284 if (sum)
285 log_rinfo(ls, "%s %d", __func__, sum);
286}
287