Skip to content

Commit c4d6fe7

Browse files
committed
Merge tag 'xarray-5.9' of git://git.infradead.org/users/willy/xarray
Pull XArray updates from Matthew Wilcox: - Fix the test suite after introduction of the local_lock - Fix a bug in the IDA spotted by Coverity - Change the API that allows the workingset code to delete a node - Fix xas_reload() when dealing with entries that occupy multiple indices - Add a few more tests to the test suite - Fix an unsigned int being shifted into an unsigned long * tag 'xarray-5.9' of git://git.infradead.org/users/willy/xarray: XArray: Fix xas_create_range for ranges above 4 billion radix-tree: fix the comment of radix_tree_next_slot() XArray: Fix xas_reload for multi-index entries XArray: Add private interface for workingset node deletion XArray: Fix xas_for_each_conflict documentation XArray: Test marked multiorder iterations XArray: Test two more things about xa_cmpxchg ida: Free allocated bitmap in error path radix tree test suite: Fix compilation
2 parents 59f0e7e + 84c34df commit c4d6fe7

File tree

11 files changed

+116
-35
lines changed

11 files changed

+116
-35
lines changed

include/linux/radix-tree.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#include <linux/bitops.h>
1212
#include <linux/kernel.h>
1313
#include <linux/list.h>
14+
#include <linux/percpu.h>
1415
#include <linux/preempt.h>
1516
#include <linux/rcupdate.h>
1617
#include <linux/spinlock.h>
@@ -376,7 +377,7 @@ radix_tree_chunk_size(struct radix_tree_iter *iter)
376377
* radix_tree_next_slot - find next slot in chunk
377378
*
378379
* @slot: pointer to current slot
379-
* @iter: pointer to interator state
380+
* @iter: pointer to iterator state
380381
* @flags: RADIX_TREE_ITER_*, should be constant
381382
* Returns: pointer to next slot, or NULL if there no more left
382383
*

include/linux/xarray.h

Lines changed: 23 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1286,6 +1286,8 @@ static inline bool xa_is_advanced(const void *entry)
12861286
*/
12871287
typedef void (*xa_update_node_t)(struct xa_node *node);
12881288

1289+
void xa_delete_node(struct xa_node *, xa_update_node_t);
1290+
12891291
/*
12901292
* The xa_state is opaque to its users. It contains various different pieces
12911293
* of state involved in the current operation on the XArray. It should be
@@ -1544,10 +1546,21 @@ static inline void xas_split_alloc(struct xa_state *xas, void *entry,
15441546
static inline void *xas_reload(struct xa_state *xas)
15451547
{
15461548
struct xa_node *node = xas->xa_node;
1547-
1548-
if (node)
1549-
return xa_entry(xas->xa, node, xas->xa_offset);
1550-
return xa_head(xas->xa);
1549+
void *entry;
1550+
char offset;
1551+
1552+
if (!node)
1553+
return xa_head(xas->xa);
1554+
if (IS_ENABLED(CONFIG_XARRAY_MULTI)) {
1555+
offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK;
1556+
entry = xa_entry(xas->xa, node, offset);
1557+
if (!xa_is_sibling(entry))
1558+
return entry;
1559+
offset = xa_to_sibling(entry);
1560+
} else {
1561+
offset = xas->xa_offset;
1562+
}
1563+
return xa_entry(xas->xa, node, offset);
15511564
}
15521565

15531566
/**
@@ -1736,13 +1749,12 @@ enum {
17361749
* @xas: XArray operation state.
17371750
* @entry: Entry retrieved from the array.
17381751
*
1739-
* The loop body will be executed for each entry in the XArray that lies
1740-
* within the range specified by @xas. If the loop completes successfully,
1741-
* any entries that lie in this range will be replaced by @entry. The caller
1742-
* may break out of the loop; if they do so, the contents of the XArray will
1743-
* be unchanged. The operation may fail due to an out of memory condition.
1744-
* The caller may also call xa_set_err() to exit the loop while setting an
1745-
* error to record the reason.
1752+
* The loop body will be executed for each entry in the XArray that
1753+
* lies within the range specified by @xas. If the loop terminates
1754+
* normally, @entry will be %NULL. The user may break out of the loop,
1755+
* which will leave @entry set to the conflicting entry. The caller
1756+
* may also call xa_set_err() to exit the loop while setting an error
1757+
* to record the reason.
17461758
*/
17471759
#define xas_for_each_conflict(xas, entry) \
17481760
while ((entry = xas_find_conflict(xas)))

lib/idr.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -471,6 +471,7 @@ int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
471471
goto retry;
472472
nospc:
473473
xas_unlock_irqrestore(&xas, flags);
474+
kfree(alloc);
474475
return -ENOSPC;
475476
}
476477
EXPORT_SYMBOL(ida_alloc_range);

lib/radix-tree.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
#include <linux/kernel.h>
2121
#include <linux/kmemleak.h>
2222
#include <linux/percpu.h>
23-
#include <linux/local_lock.h>
2423
#include <linux/preempt.h> /* in_interrupt() */
2524
#include <linux/radix-tree.h>
2625
#include <linux/rcupdate.h>

lib/test_xarray.c

Lines changed: 26 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,27 @@ static noinline void check_xa_mark_2(struct xarray *xa)
289289
xa_destroy(xa);
290290
}
291291

292+
static noinline void check_xa_mark_3(struct xarray *xa)
293+
{
294+
#ifdef CONFIG_XARRAY_MULTI
295+
XA_STATE(xas, xa, 0x41);
296+
void *entry;
297+
int count = 0;
298+
299+
xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL);
300+
xa_set_mark(xa, 0x41, XA_MARK_0);
301+
302+
rcu_read_lock();
303+
xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) {
304+
count++;
305+
XA_BUG_ON(xa, entry != xa_mk_index(0x40));
306+
}
307+
XA_BUG_ON(xa, count != 1);
308+
rcu_read_unlock();
309+
xa_destroy(xa);
310+
#endif
311+
}
312+
292313
static noinline void check_xa_mark(struct xarray *xa)
293314
{
294315
unsigned long index;
@@ -297,6 +318,7 @@ static noinline void check_xa_mark(struct xarray *xa)
297318
check_xa_mark_1(xa, index);
298319

299320
check_xa_mark_2(xa);
321+
check_xa_mark_3(xa);
300322
}
301323

302324
static noinline void check_xa_shrink(struct xarray *xa)
@@ -393,6 +415,9 @@ static noinline void check_cmpxchg(struct xarray *xa)
393415
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
394416
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
395417
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
418+
XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY);
419+
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE);
420+
XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY);
396421
xa_erase_index(xa, 12345678);
397422
xa_erase_index(xa, 5);
398423
XA_BUG_ON(xa, !xa_empty(xa));
@@ -1618,14 +1643,9 @@ static noinline void shadow_remove(struct xarray *xa)
16181643
xa_lock(xa);
16191644
while ((node = list_first_entry_or_null(&shadow_nodes,
16201645
struct xa_node, private_list))) {
1621-
XA_STATE(xas, node->array, 0);
16221646
XA_BUG_ON(xa, node->array != xa);
16231647
list_del_init(&node->private_list);
1624-
xas.xa_node = xa_parent_locked(node->array, node);
1625-
xas.xa_offset = node->offset;
1626-
xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
1627-
xas_set_update(&xas, test_update_node);
1628-
xas_store(&xas, NULL);
1648+
xa_delete_node(node, test_update_node);
16291649
}
16301650
xa_unlock(xa);
16311651
}

lib/xarray.c

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -706,7 +706,7 @@ void xas_create_range(struct xa_state *xas)
706706
unsigned char shift = xas->xa_shift;
707707
unsigned char sibs = xas->xa_sibs;
708708

709-
xas->xa_index |= ((sibs + 1) << shift) - 1;
709+
xas->xa_index |= ((sibs + 1UL) << shift) - 1;
710710
if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
711711
xas->xa_offset |= sibs;
712712
xas->xa_shift = 0;
@@ -2163,6 +2163,29 @@ unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
21632163
}
21642164
EXPORT_SYMBOL(xa_extract);
21652165

2166+
/**
2167+
* xa_delete_node() - Private interface for workingset code.
2168+
* @node: Node to be removed from the tree.
2169+
* @update: Function to call to update ancestor nodes.
2170+
*
2171+
* Context: xa_lock must be held on entry and will not be released.
2172+
*/
2173+
void xa_delete_node(struct xa_node *node, xa_update_node_t update)
2174+
{
2175+
struct xa_state xas = {
2176+
.xa = node->array,
2177+
.xa_index = (unsigned long)node->offset <<
2178+
(node->shift + XA_CHUNK_SHIFT),
2179+
.xa_shift = node->shift + XA_CHUNK_SHIFT,
2180+
.xa_offset = node->offset,
2181+
.xa_node = xa_parent_locked(node->array, node),
2182+
.xa_update = update,
2183+
};
2184+
2185+
xas_store(&xas, NULL);
2186+
}
2187+
EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */
2188+
21662189
/**
21672190
* xa_destroy() - Free all internal data structures.
21682191
* @xa: XArray.

mm/workingset.c

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -519,12 +519,11 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
519519
void *arg) __must_hold(lru_lock)
520520
{
521521
struct xa_node *node = container_of(item, struct xa_node, private_list);
522-
XA_STATE(xas, node->array, 0);
523522
struct address_space *mapping;
524523
int ret;
525524

526525
/*
527-
* Page cache insertions and deletions synchroneously maintain
526+
* Page cache insertions and deletions synchronously maintain
528527
* the shadow node LRU under the i_pages lock and the
529528
* lru_lock. Because the page cache tree is emptied before
530529
* the inode can be destroyed, holding the lru_lock pins any
@@ -559,15 +558,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
559558
if (WARN_ON_ONCE(node->count != node->nr_values))
560559
goto out_invalid;
561560
mapping->nrexceptional -= node->nr_values;
562-
xas.xa_node = xa_parent_locked(&mapping->i_pages, node);
563-
xas.xa_offset = node->offset;
564-
xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
565-
xas_set_update(&xas, workingset_update_node);
566-
/*
567-
* We could store a shadow entry here which was the minimum of the
568-
* shadow entries we were tracking ...
569-
*/
570-
xas_store(&xas, NULL);
561+
xa_delete_node(node, workingset_update_node);
571562
__inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM);
572563

573564
out_invalid:

tools/testing/radix-tree/idr-test.c

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -523,8 +523,27 @@ static void *ida_random_fn(void *arg)
523523
return NULL;
524524
}
525525

526+
static void *ida_leak_fn(void *arg)
527+
{
528+
struct ida *ida = arg;
529+
time_t s = time(NULL);
530+
int i, ret;
531+
532+
rcu_register_thread();
533+
534+
do for (i = 0; i < 1000; i++) {
535+
ret = ida_alloc_range(ida, 128, 128, GFP_KERNEL);
536+
if (ret >= 0)
537+
ida_free(ida, 128);
538+
} while (time(NULL) < s + 2);
539+
540+
rcu_unregister_thread();
541+
return NULL;
542+
}
543+
526544
void ida_thread_tests(void)
527545
{
546+
DEFINE_IDA(ida);
528547
pthread_t threads[20];
529548
int i;
530549

@@ -536,6 +555,16 @@ void ida_thread_tests(void)
536555

537556
while (i--)
538557
pthread_join(threads[i], NULL);
558+
559+
for (i = 0; i < ARRAY_SIZE(threads); i++)
560+
if (pthread_create(&threads[i], NULL, ida_leak_fn, &ida)) {
561+
perror("creating ida thread");
562+
exit(1);
563+
}
564+
565+
while (i--)
566+
pthread_join(threads[i], NULL);
567+
assert(ida_is_empty(&ida));
539568
}
540569

541570
void ida_tests(void)

tools/testing/radix-tree/linux/kernel.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,4 +22,5 @@
2222
#define __releases(x)
2323
#define __must_hold(x)
2424

25+
#define EXPORT_PER_CPU_SYMBOL_GPL(x)
2526
#endif /* _KERNEL_H */
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
#ifndef _LINUX_LOCAL_LOCK
2+
#define _LINUX_LOCAL_LOCK
3+
typedef struct { } local_lock_t;
4+
5+
static inline void local_lock(local_lock_t *lock) { }
6+
static inline void local_unlock(local_lock_t *lock) { }
7+
#define INIT_LOCAL_LOCK(x) { }
8+
#endif

0 commit comments

Comments
 (0)