Lines 1340-1345
Link Here
|
1340 |
SetPageReferenced(page); |
1340 |
SetPageReferenced(page); |
1341 |
} |
1341 |
} |
1342 |
|
1342 |
|
|
|
1343 |
/** |
1344 |
* shrink_list - non-blockingly drop pages from the given cache list |
1345 |
* @mapping: the mapping from which we want to drop pages |
1346 |
* @list: which list (e.g. locked, dirty, clean)? |
1347 |
* @max_index: greatest index from which we will drop pages |
1348 |
*/ |
1349 |
static unsigned long shrink_list(struct address_space *mapping, |
1350 |
struct list_head *list, |
1351 |
unsigned long max_index) |
1352 |
{ |
1353 |
struct list_head *curr = list->prev; |
1354 |
unsigned long nr_shrunk = 0; |
1355 |
|
1356 |
spin_lock(&pagemap_lru_lock); |
1357 |
spin_lock(&pagecache_lock); |
1358 |
|
1359 |
while ((curr != list)) { |
1360 |
struct page *page = list_entry(curr, struct page, list); |
1361 |
|
1362 |
curr = curr->prev; |
1363 |
|
1364 |
if (page->index > max_index) |
1365 |
continue; |
1366 |
|
1367 |
if (PageDirty(page)) |
1368 |
continue; |
1369 |
|
1370 |
if (TryLockPage(page)) |
1371 |
break; |
1372 |
|
1373 |
if (page->buffers && !try_to_release_page(page, 0)) { |
1374 |
/* probably dirty buffers */ |
1375 |
unlock_page(page); |
1376 |
break; |
1377 |
} |
1378 |
|
1379 |
if (page_count(page) != 1) { |
1380 |
unlock_page(page); |
1381 |
continue; |
1382 |
} |
1383 |
|
1384 |
__lru_cache_del(page); |
1385 |
__remove_inode_page(page); |
1386 |
unlock_page(page); |
1387 |
page_cache_release(page); |
1388 |
nr_shrunk++; |
1389 |
} |
1390 |
|
1391 |
spin_unlock(&pagecache_lock); |
1392 |
spin_unlock(&pagemap_lru_lock); |
1393 |
|
1394 |
return nr_shrunk; |
1395 |
} |
1396 |
|
1397 |
/** |
1398 |
* shrink_pagecache - nonblockingly drop pages from the mapping. |
1399 |
* @file: the file we are doing I/O on |
1400 |
* @max_index: the maximum index from which we are willing to drop pages |
1401 |
* |
1402 |
* This is for O_STREAMING, which says "I am streaming data, I know I will not |
1403 |
* revisit this; do not cache anything". |
1404 |
* |
1405 |
* max_index allows us to only drop pages which are behind `index', to avoid |
1406 |
* trashing readahead. |
1407 |
*/ |
1408 |
static unsigned long shrink_pagecache(struct file *file, |
1409 |
unsigned long max_index) |
1410 |
{ |
1411 |
struct address_space *mapping = file->f_dentry->d_inode->i_mapping; |
1412 |
unsigned long nr_locked, nr_clean, nr_dirty; |
1413 |
|
1414 |
/* |
1415 |
* ensure we have a decent amount of work todo |
1416 |
*/ |
1417 |
if (mapping->nrpages < 256) |
1418 |
return 0; |
1419 |
|
1420 |
nr_locked = shrink_list(mapping, &mapping->locked_pages, max_index); |
1421 |
nr_clean = shrink_list(mapping, &mapping->clean_pages, max_index); |
1422 |
nr_dirty = shrink_list(mapping, &mapping->dirty_pages, max_index); |
1423 |
|
1424 |
return nr_locked + nr_clean + nr_dirty; |
1425 |
} |
1426 |
|
1343 |
/* |
1427 |
/* |
1344 |
* This is a generic file read routine, and uses the |
1428 |
* This is a generic file read routine, and uses the |
1345 |
* inode->i_op->readpage() function for the actual low-level |
1429 |
* inode->i_op->readpage() function for the actual low-level |
Lines 1556-1561
Link Here
|
1556 |
filp->f_reada = 1; |
1640 |
filp->f_reada = 1; |
1557 |
if (cached_page) |
1641 |
if (cached_page) |
1558 |
page_cache_release(cached_page); |
1642 |
page_cache_release(cached_page); |
|
|
1643 |
if (filp->f_flags & O_STREAMING) |
1644 |
shrink_pagecache(filp, index); |
1559 |
UPDATE_ATIME(inode); |
1645 |
UPDATE_ATIME(inode); |
1560 |
} |
1646 |
} |
1561 |
|
1647 |
|
Lines 3125-3130
Link Here
|
3125 |
inode->i_ctime = inode->i_mtime = CURRENT_TIME; |
3211 |
inode->i_ctime = inode->i_mtime = CURRENT_TIME; |
3126 |
mark_inode_dirty_sync(inode); |
3212 |
mark_inode_dirty_sync(inode); |
3127 |
|
3213 |
|
|
|
3214 |
if (file->f_flags & O_STREAMING) |
3215 |
shrink_pagecache(file, pos >> PAGE_CACHE_SHIFT); |
3216 |
|
3128 |
do { |
3217 |
do { |
3129 |
unsigned long index, offset; |
3218 |
unsigned long index, offset; |
3130 |
long page_fault; |
3219 |
long page_fault; |