[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 07/15] accel/tcg: Pass last not end to page_collection_lock
From: |
Richard Henderson |
Subject: |
[PULL 07/15] accel/tcg: Pass last not end to page_collection_lock |
Date: |
Tue, 28 Mar 2023 15:57:58 -0700 |
Pass the address of the last byte to be changed, rather than
the first address past the last byte. This avoids overflow
when the last page of the address space is involved.
Fixes a bug in the loop comparision where "<= end" would lock
one more page than required.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
accel/tcg/tb-maint.c | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index 04d2751bb6..57da2feb2f 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -511,20 +511,20 @@ static gint tb_page_addr_cmp(gconstpointer ap,
gconstpointer bp, gpointer udata)
}
/*
- * Lock a range of pages ([@start,@end[) as well as the pages of all
+ * Lock a range of pages ([@start,@last]) as well as the pages of all
* intersecting TBs.
* Locking order: acquire locks in ascending order of page index.
*/
static struct page_collection *page_collection_lock(tb_page_addr_t start,
- tb_page_addr_t end)
+ tb_page_addr_t last)
{
struct page_collection *set = g_malloc(sizeof(*set));
tb_page_addr_t index;
PageDesc *pd;
start >>= TARGET_PAGE_BITS;
- end >>= TARGET_PAGE_BITS;
- g_assert(start <= end);
+ last >>= TARGET_PAGE_BITS;
+ g_assert(start <= last);
set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL,
page_entry_destroy);
@@ -534,7 +534,7 @@ static struct page_collection
*page_collection_lock(tb_page_addr_t start,
retry:
q_tree_foreach(set->tree, page_entry_lock, NULL);
- for (index = start; index <= end; index++) {
+ for (index = start; index <= last; index++) {
TranslationBlock *tb;
PageForEachNext n;
@@ -1154,7 +1154,7 @@ tb_invalidate_phys_page_range__locked(struct
page_collection *pages,
void tb_invalidate_phys_page(tb_page_addr_t addr)
{
struct page_collection *pages;
- tb_page_addr_t start, end;
+ tb_page_addr_t start, last;
PageDesc *p;
p = page_find(addr >> TARGET_PAGE_BITS);
@@ -1163,9 +1163,9 @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
}
start = addr & TARGET_PAGE_MASK;
- end = start + TARGET_PAGE_SIZE;
- pages = page_collection_lock(start, end);
- tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
+ last = addr | ~TARGET_PAGE_MASK;
+ pages = page_collection_lock(start, last);
+ tb_invalidate_phys_page_range__locked(pages, p, start, last + 1, 0);
page_collection_unlock(pages);
}
@@ -1181,7 +1181,7 @@ void tb_invalidate_phys_range(tb_page_addr_t start,
tb_page_addr_t end)
struct page_collection *pages;
tb_page_addr_t next;
- pages = page_collection_lock(start, end);
+ pages = page_collection_lock(start, end - 1);
for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
start < end;
start = next, next += TARGET_PAGE_SIZE) {
@@ -1226,7 +1226,7 @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
{
struct page_collection *pages;
- pages = page_collection_lock(ram_addr, ram_addr + size);
+ pages = page_collection_lock(ram_addr, ram_addr + size - 1);
tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
page_collection_unlock(pages);
}
--
2.34.1
- [PULL 00/15] tcg patch queue, Richard Henderson, 2023/03/28
- [PULL 02/15] tcg: use QTree instead of GTree, Richard Henderson, 2023/03/28
- [PULL 03/15] linux-user: Diagnose misaligned -R size, Richard Henderson, 2023/03/28
- [PULL 06/15] accel/tcg: Pass last not end to PAGE_FOR_EACH_TB, Richard Henderson, 2023/03/28
- [PULL 01/15] util: import GTree as QTree, Richard Henderson, 2023/03/28
- [PULL 04/15] accel/tcg: Pass last not end to page_set_flags, Richard Henderson, 2023/03/28
- [PULL 05/15] accel/tcg: Pass last not end to page_reset_target_data, Richard Henderson, 2023/03/28
- [PULL 07/15] accel/tcg: Pass last not end to page_collection_lock,
Richard Henderson <=
- [PULL 08/15] accel/tcg: Pass last not end to tb_invalidate_phys_page_range__locked, Richard Henderson, 2023/03/28
- [PULL 10/15] linux-user: Pass last not end to probe_guest_base, Richard Henderson, 2023/03/28
- [PULL 09/15] accel/tcg: Pass last not end to tb_invalidate_phys_range, Richard Henderson, 2023/03/28
- [PULL 11/15] include/exec: Change reserved_va semantics to last byte, Richard Henderson, 2023/03/28
- [PULL 13/15] softmmu: Restrict cpu_check_watchpoint / address_matches to TCG accel, Richard Henderson, 2023/03/28
- [PULL 15/15] softmmu: Restore use of CPU watchpoint for all accelerators, Richard Henderson, 2023/03/28
- [PULL 12/15] linux-user/arm: Take more care allocating commpage, Richard Henderson, 2023/03/28
- [PULL 14/15] softmmu/watchpoint: Add missing 'qemu/error-report.h' include, Richard Henderson, 2023/03/28
- Re: [PULL 00/15] tcg patch queue, Peter Maydell, 2023/03/29
- Re: [PULL 00/15] tcg patch queue, Joel Stanley, 2023/03/30