Skip to content

Commit 8ba0005

Browse files
committed
landlock: Fix same-layer rule unions
The original behavior was to check if the full set of requested accesses was allowed by at least a rule of every relevant layer. This didn't take into account requests for multiple accesses and same-layer rules allowing the union of these accesses in a complementary way. As a result, multiple accesses requested on a file hierarchy matching rules that, together, allowed these accesses, but without a unique rule allowing all of them, was illegitimately denied. This case should be rare in practice and it can only be triggered by the path_rename or file_open hook implementations. For instance, if, for the same layer, a rule allows execution beneath /a/b and another rule allows read beneath /a, requesting access to read and execute at the same time for /a/b should be allowed for this layer. This was an inconsistency because the union of same-layer rule accesses was already allowed if requested once at a time anyway. This fix changes the way allowed accesses are gathered over a path walk. To take into account all these rule accesses, we store in a matrix all layer granting the set of requested accesses, according to the handled accesses. To avoid heap allocation, we use an array on the stack which is 2*13 bytes. A following commit bringing the LANDLOCK_ACCESS_FS_REFER access right will increase this size to reach 112 bytes (2*14*4) in case of link or rename actions. Add a new layout1.layer_rule_unions test to check that accesses from different rules pertaining to the same layer are ORed in a file hierarchy. Also test that it is not the case for rules from different layers. Reviewed-by: Paul Moore <[email protected]> Link: https://lore.kernel.org/r/[email protected] Cc: [email protected] Signed-off-by: Mickaël Salaün <[email protected]>
1 parent 2cd7cd6 commit 8ba0005

File tree

3 files changed

+161
-26
lines changed

3 files changed

+161
-26
lines changed

security/landlock/fs.c

+52-26
Original file line numberDiff line numberDiff line change
@@ -207,45 +207,67 @@ find_rule(const struct landlock_ruleset *const domain,
207207
return rule;
208208
}
209209

210-
static inline layer_mask_t unmask_layers(const struct landlock_rule *const rule,
211-
const access_mask_t access_request,
212-
layer_mask_t layer_mask)
210+
/*
211+
* @layer_masks is read and may be updated according to the access request and
212+
* the matching rule.
213+
*
214+
* Returns true if the request is allowed (i.e. relevant layer masks for the
215+
* request are empty).
216+
*/
217+
static inline bool
218+
unmask_layers(const struct landlock_rule *const rule,
219+
const access_mask_t access_request,
220+
layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
213221
{
214222
size_t layer_level;
215223

224+
if (!access_request || !layer_masks)
225+
return true;
216226
if (!rule)
217-
return layer_mask;
227+
return false;
218228

219229
/*
220230
* An access is granted if, for each policy layer, at least one rule
221-
* encountered on the pathwalk grants the requested accesses,
222-
* regardless of their position in the layer stack. We must then check
231+
* encountered on the pathwalk grants the requested access,
232+
* regardless of its position in the layer stack. We must then check
223233
* the remaining layers for each inode, from the first added layer to
224-
* the last one.
234+
* the last one. When there is multiple requested accesses, for each
235+
* policy layer, the full set of requested accesses may not be granted
236+
* by only one rule, but by the union (binary OR) of multiple rules.
237+
* E.g. /a/b <execute> + /a <read> => /a/b <execute + read>
225238
*/
226239
for (layer_level = 0; layer_level < rule->num_layers; layer_level++) {
227240
const struct landlock_layer *const layer =
228241
&rule->layers[layer_level];
229242
const layer_mask_t layer_bit = BIT_ULL(layer->level - 1);
243+
const unsigned long access_req = access_request;
244+
unsigned long access_bit;
245+
bool is_empty;
230246

231-
/* Checks that the layer grants access to the full request. */
232-
if ((layer->access & access_request) == access_request) {
233-
layer_mask &= ~layer_bit;
234-
235-
if (layer_mask == 0)
236-
return layer_mask;
247+
/*
248+
* Records in @layer_masks which layer grants access to each
249+
* requested access.
250+
*/
251+
is_empty = true;
252+
for_each_set_bit(access_bit, &access_req,
253+
ARRAY_SIZE(*layer_masks)) {
254+
if (layer->access & BIT_ULL(access_bit))
255+
(*layer_masks)[access_bit] &= ~layer_bit;
256+
is_empty = is_empty && !(*layer_masks)[access_bit];
237257
}
258+
if (is_empty)
259+
return true;
238260
}
239-
return layer_mask;
261+
return false;
240262
}
241263

242264
static int check_access_path(const struct landlock_ruleset *const domain,
243265
const struct path *const path,
244266
const access_mask_t access_request)
245267
{
246-
bool allowed = false;
268+
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
269+
bool allowed = false, has_access = false;
247270
struct path walker_path;
248-
layer_mask_t layer_mask;
249271
size_t i;
250272

251273
if (!access_request)
@@ -265,13 +287,20 @@ static int check_access_path(const struct landlock_ruleset *const domain,
265287
return -EACCES;
266288

267289
/* Saves all layers handling a subset of requested accesses. */
268-
layer_mask = 0;
269290
for (i = 0; i < domain->num_layers; i++) {
270-
if (domain->fs_access_masks[i] & access_request)
271-
layer_mask |= BIT_ULL(i);
291+
const unsigned long access_req = access_request;
292+
unsigned long access_bit;
293+
294+
for_each_set_bit(access_bit, &access_req,
295+
ARRAY_SIZE(layer_masks)) {
296+
if (domain->fs_access_masks[i] & BIT_ULL(access_bit)) {
297+
layer_masks[access_bit] |= BIT_ULL(i);
298+
has_access = true;
299+
}
300+
}
272301
}
273302
/* An access request not handled by the domain is allowed. */
274-
if (layer_mask == 0)
303+
if (!has_access)
275304
return 0;
276305

277306
walker_path = *path;
@@ -283,14 +312,11 @@ static int check_access_path(const struct landlock_ruleset *const domain,
283312
while (true) {
284313
struct dentry *parent_dentry;
285314

286-
layer_mask =
287-
unmask_layers(find_rule(domain, walker_path.dentry),
288-
access_request, layer_mask);
289-
if (layer_mask == 0) {
315+
allowed = unmask_layers(find_rule(domain, walker_path.dentry),
316+
access_request, &layer_masks);
317+
if (allowed)
290318
/* Stops when a rule from each layer grants access. */
291-
allowed = true;
292319
break;
293-
}
294320

295321
jump_up:
296322
if (walker_path.dentry == walker_path.mnt->mnt_root) {

security/landlock/ruleset.h

+2
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@
2222
typedef u16 access_mask_t;
2323
/* Makes sure all filesystem access rights can be stored. */
2424
static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_FS);
25+
/* Makes sure for_each_set_bit() and for_each_clear_bit() calls are OK. */
26+
static_assert(sizeof(unsigned long) >= sizeof(access_mask_t));
2527

2628
typedef u16 layer_mask_t;
2729
/* Makes sure all layers can be checked. */

tools/testing/selftests/landlock/fs_test.c

+107
Original file line numberDiff line numberDiff line change
@@ -758,6 +758,113 @@ TEST_F_FORK(layout1, ruleset_overlap)
758758
ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY));
759759
}
760760

761+
TEST_F_FORK(layout1, layer_rule_unions)
762+
{
763+
const struct rule layer1[] = {
764+
{
765+
.path = dir_s1d2,
766+
.access = LANDLOCK_ACCESS_FS_READ_FILE,
767+
},
768+
/* dir_s1d3 should allow READ_FILE and WRITE_FILE (O_RDWR). */
769+
{
770+
.path = dir_s1d3,
771+
.access = LANDLOCK_ACCESS_FS_WRITE_FILE,
772+
},
773+
{},
774+
};
775+
const struct rule layer2[] = {
776+
/* Doesn't change anything from layer1. */
777+
{
778+
.path = dir_s1d2,
779+
.access = LANDLOCK_ACCESS_FS_READ_FILE |
780+
LANDLOCK_ACCESS_FS_WRITE_FILE,
781+
},
782+
{},
783+
};
784+
const struct rule layer3[] = {
785+
/* Only allows write (but not read) to dir_s1d3. */
786+
{
787+
.path = dir_s1d2,
788+
.access = LANDLOCK_ACCESS_FS_WRITE_FILE,
789+
},
790+
{},
791+
};
792+
int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer1);
793+
794+
ASSERT_LE(0, ruleset_fd);
795+
enforce_ruleset(_metadata, ruleset_fd);
796+
ASSERT_EQ(0, close(ruleset_fd));
797+
798+
/* Checks s1d1 hierarchy with layer1. */
799+
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY));
800+
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
801+
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR));
802+
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
803+
804+
/* Checks s1d2 hierarchy with layer1. */
805+
ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
806+
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
807+
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDWR));
808+
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
809+
810+
/* Checks s1d3 hierarchy with layer1. */
811+
ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
812+
ASSERT_EQ(0, test_open(file1_s1d3, O_WRONLY));
813+
/* dir_s1d3 should allow READ_FILE and WRITE_FILE (O_RDWR). */
814+
ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR));
815+
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
816+
817+
/* Doesn't change anything from layer1. */
818+
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer2);
819+
ASSERT_LE(0, ruleset_fd);
820+
enforce_ruleset(_metadata, ruleset_fd);
821+
ASSERT_EQ(0, close(ruleset_fd));
822+
823+
/* Checks s1d1 hierarchy with layer2. */
824+
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY));
825+
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
826+
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR));
827+
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
828+
829+
/* Checks s1d2 hierarchy with layer2. */
830+
ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY));
831+
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
832+
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDWR));
833+
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
834+
835+
/* Checks s1d3 hierarchy with layer2. */
836+
ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY));
837+
ASSERT_EQ(0, test_open(file1_s1d3, O_WRONLY));
838+
/* dir_s1d3 should allow READ_FILE and WRITE_FILE (O_RDWR). */
839+
ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR));
840+
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
841+
842+
/* Only allows write (but not read) to dir_s1d3. */
843+
ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer3);
844+
ASSERT_LE(0, ruleset_fd);
845+
enforce_ruleset(_metadata, ruleset_fd);
846+
ASSERT_EQ(0, close(ruleset_fd));
847+
848+
/* Checks s1d1 hierarchy with layer3. */
849+
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY));
850+
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY));
851+
ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR));
852+
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
853+
854+
/* Checks s1d2 hierarchy with layer3. */
855+
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDONLY));
856+
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY));
857+
ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDWR));
858+
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
859+
860+
/* Checks s1d3 hierarchy with layer3. */
861+
ASSERT_EQ(EACCES, test_open(file1_s1d3, O_RDONLY));
862+
ASSERT_EQ(0, test_open(file1_s1d3, O_WRONLY));
863+
/* dir_s1d3 should now deny READ_FILE and WRITE_FILE (O_RDWR). */
864+
ASSERT_EQ(EACCES, test_open(file1_s1d3, O_RDWR));
865+
ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY));
866+
}
867+
761868
TEST_F_FORK(layout1, non_overlapping_accesses)
762869
{
763870
const struct rule layer1[] = {

0 commit comments

Comments
 (0)