[Midnightbsd-cvs] src [8091] trunk/sys: page flag PGA_WRITEABLE is set and cleared exclusively by the pmap layer, but it is read directly by the MI VM layer.
laffer1 at midnightbsd.org
laffer1 at midnightbsd.org
Thu Sep 15 18:47:28 EDT 2016
Revision: 8091
http://svnweb.midnightbsd.org/src/?rev=8091
Author: laffer1
Date: 2016-09-15 18:47:28 -0400 (Thu, 15 Sep 2016)
Log Message:
-----------
page flag PGA_WRITEABLE is set and cleared exclusively by the pmap layer, but it is read directly by the MI VM layer.
Modified Paths:
--------------
trunk/sys/amd64/include/pmap.h
trunk/sys/i386/include/pmap.h
trunk/sys/vm/pmap.h
trunk/sys/vm/swap_pager.c
trunk/sys/vm/vm_page.c
trunk/sys/vm/vm_page.h
trunk/sys/vm/vm_pageout.c
trunk/sys/vm/vnode_pager.c
Modified: trunk/sys/amd64/include/pmap.h
===================================================================
--- trunk/sys/amd64/include/pmap.h 2016-09-15 22:45:54 UTC (rev 8090)
+++ trunk/sys/amd64/include/pmap.h 2016-09-15 22:47:28 UTC (rev 8091)
@@ -309,6 +309,7 @@
extern vm_offset_t virtual_end;
#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
+#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
void pmap_bootstrap(vm_paddr_t *);
Modified: trunk/sys/i386/include/pmap.h
===================================================================
--- trunk/sys/i386/include/pmap.h 2016-09-15 22:45:54 UTC (rev 8090)
+++ trunk/sys/i386/include/pmap.h 2016-09-15 22:47:28 UTC (rev 8091)
@@ -498,6 +498,7 @@
extern vm_offset_t virtual_end;
#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
+#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
/*
Modified: trunk/sys/vm/pmap.h
===================================================================
--- trunk/sys/vm/pmap.h 2016-09-15 22:45:54 UTC (rev 8090)
+++ trunk/sys/vm/pmap.h 2016-09-15 22:47:28 UTC (rev 8091)
@@ -80,10 +80,11 @@
typedef struct pmap_statistics *pmap_statistics_t;
/*
- * Each machine dependent implementation is expected to provide:
+ * Each machine-dependent implementation is required to provide:
*
* vm_memattr_t pmap_page_get_memattr(vm_page_t);
* boolean_t pmap_page_is_mapped(vm_page_t);
+ * boolean_t pmap_page_is_write_mapped(vm_page_t);
* void pmap_page_set_memattr(vm_page_t, vm_memattr_t);
*/
#include <machine/pmap.h>
Modified: trunk/sys/vm/swap_pager.c
===================================================================
--- trunk/sys/vm/swap_pager.c 2016-09-15 22:45:54 UTC (rev 8090)
+++ trunk/sys/vm/swap_pager.c 2016-09-15 22:47:28 UTC (rev 8091)
@@ -1593,7 +1593,7 @@
* status, then finish the I/O ( which decrements the
* busy count and possibly wakes waiter's up ).
*/
- KASSERT((m->aflags & PGA_WRITEABLE) == 0,
+ KASSERT(!pmap_page_is_write_mapped(m),
("swp_pager_async_iodone: page %p is not write"
" protected", m));
vm_page_undirty(m);
Modified: trunk/sys/vm/vm_page.c
===================================================================
--- trunk/sys/vm/vm_page.c 2016-09-15 22:45:54 UTC (rev 8090)
+++ trunk/sys/vm/vm_page.c 2016-09-15 22:47:28 UTC (rev 8091)
@@ -965,7 +965,7 @@
* Since we are inserting a new and possibly dirty page,
* update the object's OBJ_MIGHTBEDIRTY flag.
*/
- if (m->aflags & PGA_WRITEABLE)
+ if (pmap_page_is_write_mapped(m))
vm_object_set_writeable_dirty(object);
}
@@ -2504,11 +2504,11 @@
/*
* If the object is locked and the page is neither VPO_BUSY nor
- * PGA_WRITEABLE, then the page's dirty field cannot possibly be
+ * write mapped, then the page's dirty field cannot possibly be
* set by a concurrent pmap operation.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
- if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0)
+ if ((m->oflags & VPO_BUSY) == 0 && !pmap_page_is_write_mapped(m))
m->dirty &= ~pagebits;
else {
/*
Modified: trunk/sys/vm/vm_page.h
===================================================================
--- trunk/sys/vm/vm_page.h 2016-09-15 22:45:54 UTC (rev 8090)
+++ trunk/sys/vm/vm_page.h 2016-09-15 22:47:28 UTC (rev 8091)
@@ -237,20 +237,22 @@
#endif
#define vm_page_queue_free_mtx vm_page_queue_free_lock.data
+
/*
* These are the flags defined for vm_page.
*
- * aflags are updated by atomic accesses. Use the vm_page_aflag_set()
+ * aflags are updated by atomic accesses. Use the vm_page_aflag_set()
* and vm_page_aflag_clear() functions to set and clear the flags.
*
* PGA_REFERENCED may be cleared only if the object containing the page is
- * locked.
+ * locked. It is set by both the MI and MD VM layers.
*
* PGA_WRITEABLE is set exclusively on managed pages by pmap_enter(). When it
- * does so, the page must be VPO_BUSY.
+ * does so, the page must be VPO_BUSY. The MI VM layer must never access this
+ * flag directly. Instead, it should call pmap_page_is_write_mapped().
*
* PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
- * at least one executable mapping. It is not consumed by the VM layer.
+ * at least one executable mapping. It is not consumed by the MI VM layer.
*/
#define PGA_WRITEABLE 0x01 /* page may be mapped writeable */
#define PGA_REFERENCED 0x02 /* page has been referenced */
@@ -262,12 +264,12 @@
*/
#define PG_CACHED 0x01 /* page is cached */
#define PG_FREE 0x02 /* page is free */
-#define PG_FICTITIOUS 0x04 /* physical page doesn't exist (O) */
+#define PG_FICTITIOUS 0x04 /* physical page doesn't exist */
#define PG_ZERO 0x08 /* page is zeroed */
#define PG_MARKER 0x10 /* special queue marker page */
#define PG_SLAB 0x20 /* object pointer is actually a slab */
#define PG_WINATCFLS 0x40 /* flush dirty page on inactive q */
-#define PG_NODUMP 0x80 /* don't include this page in the dump */
+#define PG_NODUMP 0x80 /* don't include this page in a dump */
/*
* Misc constants.
Modified: trunk/sys/vm/vm_pageout.c
===================================================================
--- trunk/sys/vm/vm_pageout.c 2016-09-15 22:45:54 UTC (rev 8090)
+++ trunk/sys/vm/vm_pageout.c 2016-09-15 22:47:28 UTC (rev 8091)
@@ -503,7 +503,7 @@
vm_page_t mt = mc[i];
KASSERT(pageout_status[i] == VM_PAGER_PEND ||
- (mt->aflags & PGA_WRITEABLE) == 0,
+ !pmap_page_is_write_mapped(mt),
("vm_pageout_flush: page %p is not write protected", mt));
switch (pageout_status[i]) {
case VM_PAGER_OK:
@@ -922,7 +922,7 @@
* be updated.
*/
if (m->dirty != VM_PAGE_BITS_ALL &&
- (m->aflags & PGA_WRITEABLE) != 0) {
+ pmap_page_is_write_mapped(m)) {
/*
* Avoid a race condition: Unless write access is
* removed from the page, another processor could
Modified: trunk/sys/vm/vnode_pager.c
===================================================================
--- trunk/sys/vm/vnode_pager.c 2016-09-15 22:45:54 UTC (rev 8090)
+++ trunk/sys/vm/vnode_pager.c 2016-09-15 22:47:28 UTC (rev 8091)
@@ -1116,7 +1116,7 @@
m = ma[ncount - 1];
KASSERT(m->busy > 0,
("vnode_pager_generic_putpages: page %p is not busy", m));
- KASSERT((m->aflags & PGA_WRITEABLE) == 0,
+ KASSERT(!pmap_page_is_write_mapped(m),
("vnode_pager_generic_putpages: page %p is not read-only", m));
vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
pgoff);
More information about the Midnightbsd-cvs
mailing list