Browse Source

Kernel: Harden sys$purge Vector usage against OOM.

sys$purge() is a bit unique, in that it is probably in the systems
advantage to attempt to limp along if we hit OOM while processing
the vmobjects to purge. This change modifies the algorithm to observe
OOM and continue trying to purge any previously visited VMObjects.
Brian Gianforcaro 4 years ago
parent
commit
569c5a8922
1 changed files with 24 additions and 4 deletions
  1. 24 4
      Kernel/Syscalls/purge.cpp

+ 24 - 4
Kernel/Syscalls/purge.cpp

@@ -21,12 +21,22 @@ KResultOr<int> Process::sys$purge(int mode)
     if (mode & PURGE_ALL_VOLATILE) {
         NonnullRefPtrVector<AnonymousVMObject> vmobjects;
         {
+            KResult result(KSuccess);
             InterruptDisabler disabler;
             MM.for_each_vmobject([&](auto& vmobject) {
-                if (vmobject.is_anonymous())
-                    vmobjects.append(vmobject);
+                if (vmobject.is_anonymous()) {
+                    // In the event that the append fails, only attempt to continue
+                    // the purge if we have already appended something successfully.
+                    if (!vmobjects.try_append(vmobject) && vmobjects.is_empty()) {
+                        result = ENOMEM;
+                        return IterationDecision::Break;
+                    }
+                }
                 return IterationDecision::Continue;
             });
+
+            if (result.is_error())
+                return result.error();
         }
         for (auto& vmobject : vmobjects) {
             purged_page_count += vmobject.purge();
@@ -35,12 +45,22 @@ KResultOr<int> Process::sys$purge(int mode)
     if (mode & PURGE_ALL_CLEAN_INODE) {
         NonnullRefPtrVector<InodeVMObject> vmobjects;
         {
+            KResult result(KSuccess);
             InterruptDisabler disabler;
             MM.for_each_vmobject([&](auto& vmobject) {
-                if (vmobject.is_inode())
-                    vmobjects.append(static_cast<InodeVMObject&>(vmobject));
+                if (vmobject.is_inode()) {
+                    // In the event that the append fails, only attempt to continue
+                    // the purge if we have already appended something successfully.
+                    if (!vmobjects.try_append(static_cast<InodeVMObject&>(vmobject)) && vmobjects.is_empty()) {
+                        result = ENOMEM;
+                        return IterationDecision::Break;
+                    }
+                }
                 return IterationDecision::Continue;
             });
+
+            if (result.is_error())
+                return result.error();
         }
         for (auto& vmobject : vmobjects) {
             purged_page_count += vmobject.release_all_clean_pages();