Lines 723-763
static unsigned long shrink_page_list(struct list_head *page_list,
Link Here
|
723 |
(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); |
723 |
(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); |
724 |
|
724 |
|
725 |
if (PageWriteback(page)) { |
725 |
if (PageWriteback(page)) { |
726 |
/* |
726 |
nr_writeback++; |
727 |
* memcg doesn't have any dirty pages throttling so we |
727 |
unlock_page(page); |
728 |
* could easily OOM just because too many pages are in |
728 |
goto keep; |
729 |
* writeback and there is nothing else to reclaim. |
|
|
730 |
* |
731 |
* Check __GFP_IO, certainly because a loop driver |
732 |
* thread might enter reclaim, and deadlock if it waits |
733 |
* on a page for which it is needed to do the write |
734 |
* (loop masks off __GFP_IO|__GFP_FS for this reason); |
735 |
* but more thought would probably show more reasons. |
736 |
* |
737 |
* Don't require __GFP_FS, since we're not going into |
738 |
* the FS, just waiting on its writeback completion. |
739 |
* Worryingly, ext4 gfs2 and xfs allocate pages with |
740 |
* grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so |
741 |
* testing may_enter_fs here is liable to OOM on them. |
742 |
*/ |
743 |
if (global_reclaim(sc) || |
744 |
!PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) { |
745 |
/* |
746 |
* This is slightly racy - end_page_writeback() |
747 |
* might have just cleared PageReclaim, then |
748 |
* setting PageReclaim here end up interpreted |
749 |
* as PageReadahead - but that does not matter |
750 |
* enough to care. What we do want is for this |
751 |
* page to have PageReclaim set next time memcg |
752 |
* reclaim reaches the tests above, so it will |
753 |
* then wait_on_page_writeback() to avoid OOM; |
754 |
* and it's also appropriate in global reclaim. |
755 |
*/ |
756 |
SetPageReclaim(page); |
757 |
nr_writeback++; |
758 |
goto keep_locked; |
759 |
} |
760 |
wait_on_page_writeback(page); |
761 |
} |
729 |
} |
762 |
|
730 |
|
763 |
if (!force_reclaim) |
731 |
if (!force_reclaim) |