#include "ceph_debug.h"
#include <linux/backing-dev.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/writeback.h> /* generic_writepages */
#include <linux/slab.h>
#include <linux/pagevec.h>
#include <linux/task_io_accounting_ops.h>
#include "super.h"
#include "osd_client.h"
/*
* Ceph address space ops.
*
* There are a few funny things going on here.
*
* The page->private field is used to reference a struct
* ceph_snap_context for _every_ dirty page. This indicates which
* snapshot the page was logically dirtied in, and thus which snap
* context needs to be associated with the osd write during writeback.
*
* Similarly, struct ceph_inode_info maintains a set of counters to
* count dirty pages on the inode. In the absense of snapshots,
* i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
*
* When a snapshot is taken (that is, when the client receives
* notification that a snapshot was taken), each inode with caps and
* with dirty pages (dirty pages implies there is a cap) gets a new
* ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
* order, new snaps go to the tail). The i_wrbuffer_ref_head count is
* moved to capsnap->dirty. (Unless a sync write is currently in
* progress. In that case, the capsnap is said to be "pending", new
* writes cannot start, and the capsnap isn't "finalized" until the
* write completes (or fails) and a final size/mtime for the inode for
* that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0.
*
* On writeback, we must submit writes to the osd IN SNAP ORDER. So,
* we look for the first capsnap in i_cap_snaps and write out pages in
* that snap context _only_. Then we move on to the next capsnap,
* eventually reaching the "live" or "head" context (i.e., pages that
* are not yet snapped) and are writing the most recently dirtied
* pages.
*
* Invalidate and so forth must take care to ensure the dirty page
* accounting is preserved.
*/
#define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
#define CONGESTION_OFF_THRESH(congestion_kb) \
(CONGESTION_ON_THRESH(congestion_kb) - \
(CONGESTION_ON_THRESH(congestion_kb) >> 2))
/*
* Dirty a page. Optimistically adjust accounting, on the assumption
* that we won't race with invalidate. If we do, readjust.
*/
static int ceph_set_page_dirty(struct page *page)
{
struct address_space *mapping = page->mapping;
struct inode *inode;
struct ceph_inode_info *ci;
int undo = 0;
struct ceph_snap_context *snapc;
if (unlikely(!mapping))
return !TestSetPageDirty(page);
if (TestSetPageDirty(page)) {
dout("%p set_page_dirty %p idx %lu -- already dirty\n",
mapping->host, page, page->index);
return 0;
}
inode = mapping->host;
ci = ceph_inode(inode);
/*
* Note that we're grabbing a snapc ref here without holding
* any locks!
*/
snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
/* dirty the head */
spin_lock(&inode->i_lock);
if (ci->i_wrbuffer_ref_head == 0)
ci->i_head_snapc = ceph_get_snap_context(snapc);
++ci->i_wrbuffer_ref_head;
if (ci->i_wrbuffer_ref == 0)
igrab(inode);
++ci->i_wrbuffer_ref;
dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
"snapc %p seq %lld (%d snaps)\n",
mapping->host, page, page->index,
ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
snapc, snapc->seq, snapc->num_snaps);
spin_unlock(&inode->i_lock);
/* now adjust page */
spin_lock_irq(&mapping->tree_lock);
if (page->mapping) { /* Race with truncate? */
WARN_ON_ONCE(!PageUptodate(page));
if (mapping_cap_account_dirty(mapping)) {
__inc_zone_page_state(page, NR_FILE_DIRTY);
__inc_bdi_stat(mapping->backing_dev_info,
BDI_RECLAIMABLE);
task_io_account_write(PAGE_CACHE_SIZE);
}
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
/*
* Reference snap context in page->private. Also set
* PagePrivate so that we get invalidatepage callback.
*/
page->private = (unsigned long)snapc;
SetPagePrivate(page);
} else {
dout("ANON set_page_dirty %p (raced truncate?)\n", page);
undo = 1;
}
spin_unlock_irq(&mapping->tree_lock);
if (undo)
/* whoops, we failed to dirty the page */
ceph_put_wrbuffer_cap_refs(ci,