Previously, variable i_all_lf was allocated and released in several
functions: lf_setlock(), lf_clearlock(), and lf_getlock(), and was
used only temporarily as noted in flock.cc. This pattern easily leads
to bugs like those that occurred in flock.cc, such as:
lf_setlock() lf_clearlock()
| .
i_all_lf = tp.w_get() .
| .
+---------------------->+
|
i_all_lf = tp.wget()
|
do something
|
(release i_all_lf implicitly)
|
+<----------------------+
| .
accessing i_all_lf (may destroy tmp_pathbuf area)
| .
The similar problem also happens in multi-thread case.
With this patch, to fix and prevent the bugs, make i_all_lf non-
temporary and allocate/free in constructor/destructor of inode_t.
To prevent inter-thread-access-conflicts in i_all_lf list,
inode_t::get_all_locks_list() is guarded by inode_t::LOCK().
In addition, move get_all_locks_list() call in lf_clearlock() to
fhandler_base::lock() to avoid calling the function twice.
Addresses: https://cygwin.com/pipermail/cygwin/2025-October/258914.html
Fixes: ae181b0ff122 ("Cygwin: lockf: Make lockf() return ENOLCK when too many
locks")
Reported-by: Nahor <[email protected]>
Co-authored-by: Corinna Vinshen <[email protected]>
Signed-off-by: Takashi Yano <[email protected]>
---
winsup/cygwin/flock.cc | 41 +++++++++++++++++++++--------------------
1 file changed, 21 insertions(+), 20 deletions(-)
diff --git a/winsup/cygwin/flock.cc b/winsup/cygwin/flock.cc
index e03caba27..9e0101965 100644
--- a/winsup/cygwin/flock.cc
+++ b/winsup/cygwin/flock.cc
@@ -264,7 +264,6 @@ class lockf_t
/* Used to store own lock list in the cygheap. */
void *operator new (size_t size)
{ return cmalloc (HEAP_FHANDLER, sizeof (lockf_t)); }
- /* Never call on node->i_all_lf! */
void operator delete (void *p)
{ cfree (p); }
@@ -285,7 +284,7 @@ class inode_t
public:
LIST_ENTRY (inode_t) i_next;
lockf_t *i_lockf; /* List of locks of this process. */
- lockf_t *i_all_lf; /* Temp list of all locks for this file. */
+ lockf_t *i_all_lf; /* List of all locks for this file. */
dev_t i_dev; /* Device ID */
ino_t i_ino; /* inode number */
@@ -327,6 +326,7 @@ class inode_t
inode_t::~inode_t ()
{
+ free (i_all_lf);
lockf_t *lock, *n_lock;
for (lock = i_lockf; lock && (n_lock = lock->lf_next, 1); lock = n_lock)
delete lock;
@@ -502,6 +502,14 @@ inode_t::get (dev_t dev, ino_t ino, bool
create_if_missing, bool lock)
return node;
}
+/* Enumerate all lock event objects for this file and create a lockf_t
+ list in the i_all_lf. This list is searched in lf_getblock for locks
+ which potentially block our lock request. */
+
+/* Number of lockf_t structs which fit in the i_all_lf buffer. */
+#define MAX_LOCKF_CNT ((intptr_t)((NT_MAX_PATH * sizeof (WCHAR)) \
+ / sizeof (lockf_t)))
+
inode_t::inode_t (dev_t dev, ino_t ino)
: i_lockf (NULL), i_all_lf (NULL), i_dev (dev), i_ino (ino), i_cnt (0L),
i_lock_cnt (0)
@@ -529,16 +537,11 @@ inode_t::inode_t (dev_t dev, ino_t ino)
status = NtCreateMutant (&i_mtx, CYG_MUTANT_ACCESS, &attr, FALSE);
if (!NT_SUCCESS (status))
api_fatal ("NtCreateMutant(inode): %y", status);
+ i_all_lf = (lockf_t *) malloc (MAX_LOCKF_CNT * sizeof (lockf_t));
+ if (!i_all_lf)
+ api_fatal ("Allocating lock list memory failed.");
}
-/* Enumerate all lock event objects for this file and create a lockf_t
- list in the i_all_lf member. This list is searched in lf_getblock
- for locks which potentially block our lock request. */
-
-/* Number of lockf_t structs which fit in the temporary buffer. */
-#define MAX_LOCKF_CNT ((intptr_t)((NT_MAX_PATH * sizeof (WCHAR)) \
- / sizeof (lockf_t)))
-
bool
lockf_t::from_obj_name (inode_t *node, lockf_t **head, const wchar_t *name)
{
@@ -1157,6 +1160,11 @@ restart: /* Entry point after a restartable signal came
in. */
break;
case F_UNLCK:
+ /* lf_createlock() is called from here as well as from lf_setlock().
+ lf_setlock() already calls get_all_locks_list(), so we don't call it
+ from lf_clearlock() but rather here to avoid calling the (potentially
+ timeconsuming) function twice in called through lf_setlock(). */
+ node->get_all_locks_list ();
error = lf_clearlock (lock, &clean, get_handle ());
lock->lf_next = clean;
clean = lock;
@@ -1218,7 +1226,6 @@ lf_setlock (lockf_t *lock, inode_t *node, lockf_t
**clean, HANDLE fhdl)
lockf_t **head = lock->lf_head;
lockf_t **prev, *overlap;
int ovcase, priority, old_prio, needtolink;
- tmp_pathbuf tp;
/*
* Set the priority
@@ -1229,8 +1236,6 @@ lf_setlock (lockf_t *lock, inode_t *node, lockf_t
**clean, HANDLE fhdl)
/*
* Scan lock list for this file looking for locks that would block us.
*/
- /* Create temporary space for the all locks list. */
- node->i_all_lf = (lockf_t *) (void *) tp.w_get ();
while ((block = lf_getblock(lock, node)))
{
HANDLE obj = block->lf_obj;
@@ -1282,6 +1287,8 @@ lf_setlock (lockf_t *lock, inode_t *node, lockf_t
**clean, HANDLE fhdl)
lock->lf_type = F_WRLCK;
}
+ DWORD lf_wid = block->lf_wid;
+
/*
* Add our lock to the blocked list and sleep until we're free.
* Remember who blocked us (for deadlock detection).
@@ -1298,7 +1305,7 @@ lf_setlock (lockf_t *lock, inode_t *node, lockf_t
**clean, HANDLE fhdl)
HANDLE proc = NULL;
if (lock->lf_flags & F_POSIX)
{
- proc = OpenProcess (SYNCHRONIZE, FALSE, block->lf_wid);
+ proc = OpenProcess (SYNCHRONIZE, FALSE, lf_wid);
if (!proc)
timeout = 0L;
else
@@ -1543,9 +1550,6 @@ lf_clearlock (lockf_t *unlock, lockf_t **clean, HANDLE
fhdl)
return 0;
inode_t *node = lf->lf_inode;
- tmp_pathbuf tp;
- node->i_all_lf = (lockf_t *) tp.w_get ();
- node->get_all_locks_list (); /* Update lock count */
uint32_t lock_cnt = node->get_lock_count ();
bool first_loop = true;
@@ -1631,10 +1635,7 @@ static int
lf_getlock (lockf_t *lock, inode_t *node, struct flock *fl)
{
lockf_t *block;
- tmp_pathbuf tp;
- /* Create temporary space for the all locks list. */
- node->i_all_lf = (lockf_t *) (void * ) tp.w_get ();
if ((block = lf_getblock (lock, node)))
{
if (block->lf_obj)
--
2.51.0