glock.c (6144464937fe1e6135b13a30502a339d549bf093) glock.c (dc732906c2450939c319fec6e258aa89ecb5a632)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8

--- 44 unchanged lines hidden (view full) ---

53 struct rhashtable_iter hti; /* rhashtable iterator */
54 struct gfs2_glock *gl; /* current glock struct */
55 loff_t last_pos; /* last position */
56};
57
58typedef void (*glock_examiner) (struct gfs2_glock * gl);
59
60static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8

--- 44 unchanged lines hidden (view full) ---

53 struct rhashtable_iter hti; /* rhashtable iterator */
54 struct gfs2_glock *gl; /* current glock struct */
55 loff_t last_pos; /* last position */
56};
57
58typedef void (*glock_examiner) (struct gfs2_glock * gl);
59
60static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
61static void __gfs2_glock_dq(struct gfs2_holder *gh);
61
62static struct dentry *gfs2_root;
63static struct workqueue_struct *glock_workqueue;
64struct workqueue_struct *gfs2_delete_workqueue;
65static LIST_HEAD(lru_list);
66static atomic_t lru_count = ATOMIC_INIT(0);
67static DEFINE_SPINLOCK(lru_lock);
68

--- 123 unchanged lines hidden (view full) ---

192 */
193
194static int demote_ok(const struct gfs2_glock *gl)
195{
196 const struct gfs2_glock_operations *glops = gl->gl_ops;
197
198 if (gl->gl_state == LM_ST_UNLOCKED)
199 return 0;
62
63static struct dentry *gfs2_root;
64static struct workqueue_struct *glock_workqueue;
65struct workqueue_struct *gfs2_delete_workqueue;
66static LIST_HEAD(lru_list);
67static atomic_t lru_count = ATOMIC_INIT(0);
68static DEFINE_SPINLOCK(lru_lock);
69

--- 123 unchanged lines hidden (view full) ---

193 */
194
195static int demote_ok(const struct gfs2_glock *gl)
196{
197 const struct gfs2_glock_operations *glops = gl->gl_ops;
198
199 if (gl->gl_state == LM_ST_UNLOCKED)
200 return 0;
201 /*
202 * Note that demote_ok is used for the lru process of disposing of
203 * glocks. For this purpose, we don't care if the glock's holders
204 * have the HIF_MAY_DEMOTE flag set or not. If someone is using
205 * them, don't demote.
206 */
200 if (!list_empty(&gl->gl_holders))
201 return 0;
202 if (glops->go_demote_ok)
203 return glops->go_demote_ok(gl);
204 return 1;
205}
206
207

--- 166 unchanged lines hidden (view full) ---

374 * @ret: The status from the DLM
375 */
376
377static void do_error(struct gfs2_glock *gl, const int ret)
378{
379 struct gfs2_holder *gh, *tmp;
380
381 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
207 if (!list_empty(&gl->gl_holders))
208 return 0;
209 if (glops->go_demote_ok)
210 return glops->go_demote_ok(gl);
211 return 1;
212}
213
214

--- 166 unchanged lines hidden (view full) ---

381 * @ret: The status from the DLM
382 */
383
384static void do_error(struct gfs2_glock *gl, const int ret)
385{
386 struct gfs2_holder *gh, *tmp;
387
388 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
382 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
389 if (!test_bit(HIF_WAIT, &gh->gh_iflags))
383 continue;
384 if (ret & LM_OUT_ERROR)
385 gh->gh_error = -EIO;
386 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
387 gh->gh_error = GLR_TRYFAILED;
388 else
389 continue;
390 list_del_init(&gh->gh_list);
391 trace_gfs2_glock_queue(gh, 0);
392 gfs2_holder_wake(gh);
393 }
394}
395
396/**
390 continue;
391 if (ret & LM_OUT_ERROR)
392 gh->gh_error = -EIO;
393 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
394 gh->gh_error = GLR_TRYFAILED;
395 else
396 continue;
397 list_del_init(&gh->gh_list);
398 trace_gfs2_glock_queue(gh, 0);
399 gfs2_holder_wake(gh);
400 }
401}
402
403/**
404 * demote_incompat_holders - demote incompatible demoteable holders
405 * @gl: the glock we want to promote
406 * @new_gh: the new holder to be promoted
407 */
408static void demote_incompat_holders(struct gfs2_glock *gl,
409 struct gfs2_holder *new_gh)
410{
411 struct gfs2_holder *gh;
412
413 /*
414 * Demote incompatible holders before we make ourselves eligible.
415 * (This holder may or may not allow auto-demoting, but we don't want
416 * to demote the new holder before it's even granted.)
417 */
418 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
419 /*
420 * Since holders are at the front of the list, we stop when we
421 * find the first non-holder.
422 */
423 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
424 return;
425 if (test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags) &&
426 !may_grant(gl, new_gh, gh)) {
427 /*
428 * We should not recurse into do_promote because
429 * __gfs2_glock_dq only calls handle_callback,
430 * gfs2_glock_add_to_lru and __gfs2_glock_queue_work.
431 */
432 __gfs2_glock_dq(gh);
433 }
434 }
435}
436
437/**
397 * find_first_holder - find the first "holder" gh
398 * @gl: the glock
399 */
400
401static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
402{
403 struct gfs2_holder *gh;
404
405 if (!list_empty(&gl->gl_holders)) {
406 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder,
407 gh_list);
408 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
409 return gh;
410 }
411 return NULL;
412}
413
414/**
438 * find_first_holder - find the first "holder" gh
439 * @gl: the glock
440 */
441
442static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
443{
444 struct gfs2_holder *gh;
445
446 if (!list_empty(&gl->gl_holders)) {
447 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder,
448 gh_list);
449 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
450 return gh;
451 }
452 return NULL;
453}
454
455/**
456 * find_first_strong_holder - find the first non-demoteable holder
457 * @gl: the glock
458 *
459 * Find the first holder that doesn't have the HIF_MAY_DEMOTE flag set.
460 */
461static inline struct gfs2_holder *
462find_first_strong_holder(struct gfs2_glock *gl)
463{
464 struct gfs2_holder *gh;
465
466 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
467 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
468 return NULL;
469 if (!test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags))
470 return gh;
471 }
472 return NULL;
473}
474
475/**
415 * do_promote - promote as many requests as possible on the current queue
416 * @gl: The glock
417 *
418 * Returns: 1 if there is a blocked holder at the head of the list, or 2
419 * if a type specific operation is underway.
420 */
421
422static int do_promote(struct gfs2_glock *gl)
423__releases(&gl->gl_lockref.lock)
424__acquires(&gl->gl_lockref.lock)
425{
426 const struct gfs2_glock_operations *glops = gl->gl_ops;
427 struct gfs2_holder *gh, *tmp, *first_gh;
476 * do_promote - promote as many requests as possible on the current queue
477 * @gl: The glock
478 *
479 * Returns: 1 if there is a blocked holder at the head of the list, or 2
480 * if a type specific operation is underway.
481 */
482
483static int do_promote(struct gfs2_glock *gl)
484__releases(&gl->gl_lockref.lock)
485__acquires(&gl->gl_lockref.lock)
486{
487 const struct gfs2_glock_operations *glops = gl->gl_ops;
488 struct gfs2_holder *gh, *tmp, *first_gh;
489 bool incompat_holders_demoted = false;
428 int ret;
429
430restart:
490 int ret;
491
492restart:
431 first_gh = find_first_holder(gl);
493 first_gh = find_first_strong_holder(gl);
432 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
494 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
433 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
495 if (!test_bit(HIF_WAIT, &gh->gh_iflags))
434 continue;
435 if (may_grant(gl, first_gh, gh)) {
496 continue;
497 if (may_grant(gl, first_gh, gh)) {
498 if (!incompat_holders_demoted) {
499 demote_incompat_holders(gl, first_gh);
500 incompat_holders_demoted = true;
501 first_gh = gh;
502 }
436 if (gh->gh_list.prev == &gl->gl_holders &&
437 glops->go_lock) {
438 spin_unlock(&gl->gl_lockref.lock);
439 /* FIXME: eliminate this eventually */
440 ret = glops->go_lock(gh);
441 spin_lock(&gl->gl_lockref.lock);
442 if (ret) {
443 if (ret == 1)

--- 9 unchanged lines hidden (view full) ---

453 gfs2_holder_wake(gh);
454 goto restart;
455 }
456 set_bit(HIF_HOLDER, &gh->gh_iflags);
457 trace_gfs2_promote(gh, 0);
458 gfs2_holder_wake(gh);
459 continue;
460 }
503 if (gh->gh_list.prev == &gl->gl_holders &&
504 glops->go_lock) {
505 spin_unlock(&gl->gl_lockref.lock);
506 /* FIXME: eliminate this eventually */
507 ret = glops->go_lock(gh);
508 spin_lock(&gl->gl_lockref.lock);
509 if (ret) {
510 if (ret == 1)

--- 9 unchanged lines hidden (view full) ---

520 gfs2_holder_wake(gh);
521 goto restart;
522 }
523 set_bit(HIF_HOLDER, &gh->gh_iflags);
524 trace_gfs2_promote(gh, 0);
525 gfs2_holder_wake(gh);
526 continue;
527 }
528 /*
529 * If we get here, it means we may not grant this holder for
530 * some reason. If this holder is the head of the list, it
531 * means we have a blocked holder at the head, so return 1.
532 */
461 if (gh->gh_list.prev == &gl->gl_holders)
462 return 1;
463 do_error(gl, 0);
464 break;
465 }
466 return 0;
467}
468

--- 898 unchanged lines hidden (view full) ---

1367 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
1368 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1369 GLOCK_BUG_ON(gl, true);
1370
1371 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1372 if (test_bit(GLF_LOCK, &gl->gl_flags)) {
1373 struct gfs2_holder *first_gh;
1374
533 if (gh->gh_list.prev == &gl->gl_holders)
534 return 1;
535 do_error(gl, 0);
536 break;
537 }
538 return 0;
539}
540

--- 898 unchanged lines hidden (view full) ---

1439 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
1440 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1441 GLOCK_BUG_ON(gl, true);
1442
1443 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1444 if (test_bit(GLF_LOCK, &gl->gl_flags)) {
1445 struct gfs2_holder *first_gh;
1446
1375 first_gh = find_first_holder(gl);
1447 first_gh = find_first_strong_holder(gl);
1376 try_futile = !may_grant(gl, first_gh, gh);
1377 }
1378 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1379 goto fail;
1380 }
1381
1382 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1383 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1448 try_futile = !may_grant(gl, first_gh, gh);
1449 }
1450 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1451 goto fail;
1452 }
1453
1454 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1455 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1384 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1456 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK) &&
1457 !test_bit(HIF_MAY_DEMOTE, &gh2->gh_iflags)))
1385 goto trap_recursive;
1386 if (try_futile &&
1387 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
1388fail:
1389 gh->gh_error = GLR_TRYFAILED;
1390 gfs2_holder_wake(gh);
1391 return;
1392 }

--- 79 unchanged lines hidden (view full) ---

1472 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1473 */
1474
1475int gfs2_glock_poll(struct gfs2_holder *gh)
1476{
1477 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1478}
1479
1458 goto trap_recursive;
1459 if (try_futile &&
1460 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
1461fail:
1462 gh->gh_error = GLR_TRYFAILED;
1463 gfs2_holder_wake(gh);
1464 return;
1465 }

--- 79 unchanged lines hidden (view full) ---

1545 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1546 */
1547
1548int gfs2_glock_poll(struct gfs2_holder *gh)
1549{
1550 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1551}
1552
1480/**
1481 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1482 * @gh: the glock holder
1483 *
1484 */
1553static inline bool needs_demote(struct gfs2_glock *gl)
1554{
1555 return (test_bit(GLF_DEMOTE, &gl->gl_flags) ||
1556 test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags));
1557}
1485
1558
1486void gfs2_glock_dq(struct gfs2_holder *gh)
1559static void __gfs2_glock_dq(struct gfs2_holder *gh)
1487{
1488 struct gfs2_glock *gl = gh->gh_gl;
1489 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1490 unsigned delay = 0;
1491 int fast_path = 0;
1492
1560{
1561 struct gfs2_glock *gl = gh->gh_gl;
1562 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1563 unsigned delay = 0;
1564 int fast_path = 0;
1565
1493 spin_lock(&gl->gl_lockref.lock);
1494 /*
1566 /*
1495 * If we're in the process of file system withdraw, we cannot just
1496 * dequeue any glocks until our journal is recovered, lest we
1497 * introduce file system corruption. We need two exceptions to this
1498 * rule: We need to allow unlocking of nondisk glocks and the glock
1499 * for our own journal that needs recovery.
1567 * This while loop is similar to function demote_incompat_holders:
1568 * If the glock is due to be demoted (which may be from another node
1569 * or even if this holder is GL_NOCACHE), the weak holders are
1570 * demoted as well, allowing the glock to be demoted.
1500 */
1571 */
1501 if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
1502 glock_blocked_by_withdraw(gl) &&
1503 gh->gh_gl != sdp->sd_jinode_gl) {
1504 sdp->sd_glock_dqs_held++;
1505 spin_unlock(&gl->gl_lockref.lock);
1506 might_sleep();
1507 wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
1508 TASK_UNINTERRUPTIBLE);
1509 spin_lock(&gl->gl_lockref.lock);
1510 }
1511 if (gh->gh_flags & GL_NOCACHE)
1512 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1572 while (gh) {
1573 /*
1574 * If we're in the process of file system withdraw, we cannot
1575 * just dequeue any glocks until our journal is recovered, lest
1576 * we introduce file system corruption. We need two exceptions
1577 * to this rule: We need to allow unlocking of nondisk glocks
1578 * and the glock for our own journal that needs recovery.
1579 */
1580 if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
1581 glock_blocked_by_withdraw(gl) &&
1582 gh->gh_gl != sdp->sd_jinode_gl) {
1583 sdp->sd_glock_dqs_held++;
1584 spin_unlock(&gl->gl_lockref.lock);
1585 might_sleep();
1586 wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
1587 TASK_UNINTERRUPTIBLE);
1588 spin_lock(&gl->gl_lockref.lock);
1589 }
1513
1590
1514 list_del_init(&gh->gh_list);
1515 clear_bit(HIF_HOLDER, &gh->gh_iflags);
1516 if (list_empty(&gl->gl_holders) &&
1517 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1518 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1519 fast_path = 1;
1591 /*
1592 * This holder should not be cached, so mark it for demote.
1593 * Note: this should be done before the check for needs_demote
1594 * below.
1595 */
1596 if (gh->gh_flags & GL_NOCACHE)
1597 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1520
1598
1599 list_del_init(&gh->gh_list);
1600 clear_bit(HIF_HOLDER, &gh->gh_iflags);
1601 trace_gfs2_glock_queue(gh, 0);
1602
1603 /*
1604 * If there hasn't been a demote request we are done.
1605 * (Let the remaining holders, if any, keep holding it.)
1606 */
1607 if (!needs_demote(gl)) {
1608 if (list_empty(&gl->gl_holders))
1609 fast_path = 1;
1610 break;
1611 }
1612 /*
1613 * If we have another strong holder (we cannot auto-demote)
1614 * we are done. It keeps holding it until it is done.
1615 */
1616 if (find_first_strong_holder(gl))
1617 break;
1618
1619 /*
1620 * If we have a weak holder at the head of the list, it
1621 * (and all others like it) must be auto-demoted. If there
1622 * are no more weak holders, we exit the while loop.
1623 */
1624 gh = find_first_holder(gl);
1625 }
1626
1521 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
1522 gfs2_glock_add_to_lru(gl);
1523
1627 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
1628 gfs2_glock_add_to_lru(gl);
1629
1524 trace_gfs2_glock_queue(gh, 0);
1525 if (unlikely(!fast_path)) {
1526 gl->gl_lockref.count++;
1527 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1528 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1529 gl->gl_name.ln_type == LM_TYPE_INODE)
1530 delay = gl->gl_hold_time;
1531 __gfs2_glock_queue_work(gl, delay);
1532 }
1630 if (unlikely(!fast_path)) {
1631 gl->gl_lockref.count++;
1632 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1633 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1634 gl->gl_name.ln_type == LM_TYPE_INODE)
1635 delay = gl->gl_hold_time;
1636 __gfs2_glock_queue_work(gl, delay);
1637 }
1638}
1639
1640/**
1641 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1642 * @gh: the glock holder
1643 *
1644 */
1645void gfs2_glock_dq(struct gfs2_holder *gh)
1646{
1647 struct gfs2_glock *gl = gh->gh_gl;
1648
1649 spin_lock(&gl->gl_lockref.lock);
1650 __gfs2_glock_dq(gh);
1533 spin_unlock(&gl->gl_lockref.lock);
1534}
1535
1536void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1537{
1538 struct gfs2_glock *gl = gh->gh_gl;
1539 gfs2_glock_dq(gh);
1540 might_sleep();

--- 146 unchanged lines hidden (view full) ---

1687void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1688{
1689 while (num_gh--)
1690 gfs2_glock_dq(&ghs[num_gh]);
1691}
1692
1693void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1694{
1651 spin_unlock(&gl->gl_lockref.lock);
1652}
1653
1654void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1655{
1656 struct gfs2_glock *gl = gh->gh_gl;
1657 gfs2_glock_dq(gh);
1658 might_sleep();

--- 146 unchanged lines hidden (view full) ---

1805void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1806{
1807 while (num_gh--)
1808 gfs2_glock_dq(&ghs[num_gh]);
1809}
1810
1811void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1812{
1813 struct gfs2_holder mock_gh = { .gh_gl = gl, .gh_state = state, };
1695 unsigned long delay = 0;
1696 unsigned long holdtime;
1697 unsigned long now = jiffies;
1698
1699 gfs2_glock_hold(gl);
1700 spin_lock(&gl->gl_lockref.lock);
1701 holdtime = gl->gl_tchange + gl->gl_hold_time;
1702 if (!list_empty(&gl->gl_holders) &&
1703 gl->gl_name.ln_type == LM_TYPE_INODE) {
1704 if (time_before(now, holdtime))
1705 delay = holdtime - now;
1706 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1707 delay = gl->gl_hold_time;
1708 }
1814 unsigned long delay = 0;
1815 unsigned long holdtime;
1816 unsigned long now = jiffies;
1817
1818 gfs2_glock_hold(gl);
1819 spin_lock(&gl->gl_lockref.lock);
1820 holdtime = gl->gl_tchange + gl->gl_hold_time;
1821 if (!list_empty(&gl->gl_holders) &&
1822 gl->gl_name.ln_type == LM_TYPE_INODE) {
1823 if (time_before(now, holdtime))
1824 delay = holdtime - now;
1825 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1826 delay = gl->gl_hold_time;
1827 }
1828 /*
1829 * Note 1: We cannot call demote_incompat_holders from handle_callback
1830 * or gfs2_set_demote due to recursion problems like: gfs2_glock_dq ->
1831 * handle_callback -> demote_incompat_holders -> gfs2_glock_dq
1832 * Plus, we only want to demote the holders if the request comes from
1833 * a remote cluster node because local holder conflicts are resolved
1834 * elsewhere.
1835 *
1836 * Note 2: if a remote node wants this glock in EX mode, lock_dlm will
1837 * request that we set our state to UNLOCKED. Here we mock up a holder
1838 * to make it look like someone wants the lock EX locally. Any SH
1839 * and DF requests should be able to share the lock without demoting.
1840 *
1841 * Note 3: We only want to demote the demoteable holders when there
1842 * are no more strong holders. The demoteable holders might as well
1843 * keep the glock until the last strong holder is done with it.
1844 */
1845 if (!find_first_strong_holder(gl)) {
1846 if (state == LM_ST_UNLOCKED)
1847 mock_gh.gh_state = LM_ST_EXCLUSIVE;
1848 demote_incompat_holders(gl, &mock_gh);
1849 }
1709 handle_callback(gl, state, delay, true);
1710 __gfs2_glock_queue_work(gl, delay);
1711 spin_unlock(&gl->gl_lockref.lock);
1712}
1713
1714/**
1715 * gfs2_should_freeze - Figure out if glock should be frozen
1716 * @gl: The glock in question

--- 373 unchanged lines hidden (view full) ---

2090 if (flags & GL_EXACT)
2091 *p++ = 'E';
2092 if (flags & GL_NOCACHE)
2093 *p++ = 'c';
2094 if (test_bit(HIF_HOLDER, &iflags))
2095 *p++ = 'H';
2096 if (test_bit(HIF_WAIT, &iflags))
2097 *p++ = 'W';
1850 handle_callback(gl, state, delay, true);
1851 __gfs2_glock_queue_work(gl, delay);
1852 spin_unlock(&gl->gl_lockref.lock);
1853}
1854
1855/**
1856 * gfs2_should_freeze - Figure out if glock should be frozen
1857 * @gl: The glock in question

--- 373 unchanged lines hidden (view full) ---

2231 if (flags & GL_EXACT)
2232 *p++ = 'E';
2233 if (flags & GL_NOCACHE)
2234 *p++ = 'c';
2235 if (test_bit(HIF_HOLDER, &iflags))
2236 *p++ = 'H';
2237 if (test_bit(HIF_WAIT, &iflags))
2238 *p++ = 'W';
2239 if (test_bit(HIF_MAY_DEMOTE, &iflags))
2240 *p++ = 'D';
2098 *p = 0;
2099 return buf;
2100}
2101
2102/**
2103 * dump_holder - print information about a glock holder
2104 * @seq: the seq_file struct
2105 * @gh: the glock holder

--- 462 unchanged lines hidden ---
2241 *p = 0;
2242 return buf;
2243}
2244
2245/**
2246 * dump_holder - print information about a glock holder
2247 * @seq: the seq_file struct
2248 * @gh: the glock holder

--- 462 unchanged lines hidden ---