Skip to content

Commit

Permalink
ore: RAID5 Write
Browse files Browse the repository at this point in the history
This is finally the RAID5 Write support.

The bigger part of this patch is not the XOR engine itself, But the
read4write logic, which is a complete mini prepare_for_striping
reading engine that can read scattered pages of a stripe into cache
so it can be used for XOR calculation. That is, if the write was not
stripe aligned.

The main algorithm behind the XOR engine is the 2 dimensional array:
	struct __stripe_pages_2d.
A drawing might save 1000 words
---

__stripe_pages_2d
       |
 n = pages_in_stripe_unit;
 w = group_width - parity;
       |                            pages array presented to the XOR lib
       |                                                |
       V                                                |
 __1_page_stripe[0].pages --> [c0][c1]..[cw][c_par] <---|
       |                                                |
 __1_page_stripe[1].pages --> [c0][c1]..[cw][c_par] <---
       |
...    |                         ...
       |
 __1_page_stripe[n].pages --> [c0][c1]..[cw][c_par]
                               ^
                               |
           data added columns first then row

---
The pages are put on this array columns first. .i.e:
	p0-of-c0, p1-of-c0, ... pn-of-c0, p0-of-c1, ...
So we are doing a corner turn of the pages.

Note that pages will zigzag down and left. but are put sequentially
in growing order. So when the time comes to XOR the stripe, only the
beginning and end of the array need be checked. We scan the array
and any NULL spot will be field by pages-to-be-read.

The FS that wants to support RAID5 needs to supply an
operations-vector that searches a given page in cache, and specifies
if the page is uptodate or need reading. All these pages to be read
are put on a slave ore_io_state and synchronously read. All the pages
of a stripe are read in one IO, using the scatter gather mechanism.

In write we constrain our IO to only be incomplete on a single
stripe. Meaning either the complete IO is within a single stripe so
we might have pages to read from both beginning  or end of the
strip. Or we have some reading to do at beginning but end at strip
boundary. The left over pages are pushed to the next IO by the API
already established by previous work, where an IO offset/length
combination presented to the ORE might get the length truncated and
the user must re-submit the leftover pages. (Both exofs and NFS
support this)

But any ORE user should make it's best effort to align it's IO
before hand and avoid complications. A cached ore_layout->stripe_size
member can be used for that calculation. (NOTE: that ORE demands
that stripe_size may not be bigger then 32bit)

What else? Well read it and tell me.

Signed-off-by: Boaz Harrosh <[email protected]>
  • Loading branch information
Boaz Harrosh committed Oct 25, 2011
1 parent a1fec1d commit 769ba8d
Show file tree
Hide file tree
Showing 5 changed files with 587 additions and 16 deletions.
9 changes: 8 additions & 1 deletion fs/exofs/Kconfig
Original file line number Diff line number Diff line change
@@ -1,10 +1,17 @@
# Note ORE needs to "select ASYNC_XOR". So Not to force multiple selects
# for every ORE user we do it like this. Any user should add itself here
# at the "depends on EXOFS_FS || ..." with an ||. The dependencies are
# selected here, and we default to "ON". So in effect it is like been
# selected by any of the users.
config ORE
tristate
depends on EXOFS_FS
select ASYNC_XOR
default SCSI_OSD_ULD

config EXOFS_FS
tristate "exofs: OSD based file system support"
depends on SCSI_OSD_ULD
select ORE
help
EXOFS is a file system that uses an OSD storage device,
as its backing storage.
Expand Down
36 changes: 28 additions & 8 deletions fs/exofs/ore.c
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,14 @@ int ore_verify_layout(unsigned total_comps, struct ore_layout *layout)
layout->max_io_length =
(BIO_MAX_PAGES_KMALLOC * PAGE_SIZE - layout->stripe_unit) *
layout->group_width;
if (layout->parity) {
unsigned stripe_length =
(layout->group_width - layout->parity) *
layout->stripe_unit;

layout->max_io_length /= stripe_length;
layout->max_io_length *= stripe_length;
}
return 0;
}
EXPORT_SYMBOL(ore_verify_layout);
Expand All @@ -118,7 +126,7 @@ static struct osd_dev *_ios_od(struct ore_io_state *ios, unsigned index)
return ore_comp_dev(ios->oc, index);
}

static int _ore_get_io_state(struct ore_layout *layout,
int _ore_get_io_state(struct ore_layout *layout,
struct ore_components *oc, unsigned numdevs,
unsigned sgs_per_dev, unsigned num_par_pages,
struct ore_io_state **pios)
Expand Down Expand Up @@ -334,7 +342,7 @@ static void _done_io(struct osd_request *or, void *p)
kref_put(&ios->kref, _last_io);
}

static int ore_io_execute(struct ore_io_state *ios)
int ore_io_execute(struct ore_io_state *ios)
{
DECLARE_COMPLETION_ONSTACK(wait);
bool sync = (ios->done == NULL);
Expand Down Expand Up @@ -597,6 +605,8 @@ int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
ret = -ENOMEM;
goto out;
}
_add_stripe_page(ios->sp2d, &ios->si, pages[pg]);

pgbase = 0;
++pg;
}
Expand Down Expand Up @@ -636,6 +646,7 @@ static int _prepare_for_striping(struct ore_io_state *ios)

dev_order = _dev_order(devs_in_group, mirrors_p1, si->par_dev, dev);
si->cur_comp = dev_order;
si->cur_pg = si->unit_off / PAGE_SIZE;

while (length) {
unsigned comp = dev - first_dev;
Expand Down Expand Up @@ -677,14 +688,14 @@ static int _prepare_for_striping(struct ore_io_state *ios)
length -= cur_len;

si->cur_comp = (si->cur_comp + 1) % group_width;
if (unlikely((dev == si->par_dev) ||
(!length && ios->parity_pages))) {
if (!length)
if (unlikely((dev == si->par_dev) || (!length && ios->sp2d))) {
if (!length && ios->sp2d) {
/* If we are writing and this is the very last
* stripe. then operate on parity dev.
*/
dev = si->par_dev;
if (ios->reading)
}
if (ios->sp2d)
/* In writes cur_len just means if it's the
* last one. See _ore_add_parity_unit.
*/
Expand All @@ -709,6 +720,7 @@ static int _prepare_for_striping(struct ore_io_state *ios)
devs_in_group + first_dev;
/* Next stripe, start fresh */
si->cur_comp = 0;
si->cur_pg = 0;
}
}
out:
Expand Down Expand Up @@ -873,6 +885,14 @@ int ore_write(struct ore_io_state *ios)
int i;
int ret;

if (unlikely(ios->sp2d && !ios->r4w)) {
/* A library is attempting a RAID-write without providing
* a pages lock interface.
*/
WARN_ON_ONCE(1);
return -ENOTSUPP;
}

ret = _prepare_for_striping(ios);
if (unlikely(ret))
return ret;
Expand All @@ -888,7 +908,7 @@ int ore_write(struct ore_io_state *ios)
}
EXPORT_SYMBOL(ore_write);

static int _read_mirror(struct ore_io_state *ios, unsigned cur_comp)
int _ore_read_mirror(struct ore_io_state *ios, unsigned cur_comp)
{
struct osd_request *or;
struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
Expand Down Expand Up @@ -952,7 +972,7 @@ int ore_read(struct ore_io_state *ios)
return ret;

for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
ret = _read_mirror(ios, i);
ret = _ore_read_mirror(ios, i);
if (unlikely(ret))
return ret;
}
Expand Down
Loading

0 comments on commit 769ba8d

Please sign in to comment.