@@ -2261,25 +2261,6 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
2261
2261
2262
2262
}
2263
2263
2264
- #ifdef CONFIG_CMA
2265
- /*
2266
- * It is waste of effort to scan and reclaim CMA pages if it is not available
2267
- * for current allocation context. Kswapd can not be enrolled as it can not
2268
- * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL
2269
- */
2270
- static bool skip_cma (struct folio * folio , struct scan_control * sc )
2271
- {
2272
- return !current_is_kswapd () &&
2273
- gfp_migratetype (sc -> gfp_mask ) != MIGRATE_MOVABLE &&
2274
- folio_migratetype (folio ) == MIGRATE_CMA ;
2275
- }
2276
- #else
2277
- static bool skip_cma (struct folio * folio , struct scan_control * sc )
2278
- {
2279
- return false;
2280
- }
2281
- #endif
2282
-
2283
2264
/*
2284
2265
* Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
2285
2266
*
@@ -2326,8 +2307,7 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
2326
2307
nr_pages = folio_nr_pages (folio );
2327
2308
total_scan += nr_pages ;
2328
2309
2329
- if (folio_zonenum (folio ) > sc -> reclaim_idx ||
2330
- skip_cma (folio , sc )) {
2310
+ if (folio_zonenum (folio ) > sc -> reclaim_idx ) {
2331
2311
nr_skipped [folio_zonenum (folio )] += nr_pages ;
2332
2312
move_to = & folios_skipped ;
2333
2313
goto move ;
@@ -4971,7 +4951,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
4971
4951
}
4972
4952
4973
4953
/* ineligible */
4974
- if (zone > sc -> reclaim_idx || skip_cma ( folio , sc ) ) {
4954
+ if (zone > sc -> reclaim_idx ) {
4975
4955
gen = folio_inc_gen (lruvec , folio , false);
4976
4956
list_move_tail (& folio -> lru , & lrugen -> folios [gen ][type ][zone ]);
4977
4957
return true;
0 commit comments