forked from torvalds/linux
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfslog.c
5213 lines (4272 loc) · 122 KB
/
fslog.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// SPDX-License-Identifier: GPL-2.0
/*
*
* Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
*
*/
#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/random.h>
#include <linux/slab.h>
#include "debug.h"
#include "ntfs.h"
#include "ntfs_fs.h"
/*
* LOG FILE structs
*/
// clang-format off
#define MaxLogFileSize 0x100000000ull
#define DefaultLogPageSize 4096
#define MinLogRecordPages 0x30
struct RESTART_HDR {
struct NTFS_RECORD_HEADER rhdr; // 'RSTR'
__le32 sys_page_size; // 0x10: Page size of the system which initialized the log.
__le32 page_size; // 0x14: Log page size used for this log file.
__le16 ra_off; // 0x18:
__le16 minor_ver; // 0x1A:
__le16 major_ver; // 0x1C:
__le16 fixups[];
};
#define LFS_NO_CLIENT 0xffff
#define LFS_NO_CLIENT_LE cpu_to_le16(0xffff)
struct CLIENT_REC {
__le64 oldest_lsn;
__le64 restart_lsn; // 0x08:
__le16 prev_client; // 0x10:
__le16 next_client; // 0x12:
__le16 seq_num; // 0x14:
u8 align[6]; // 0x16:
__le32 name_bytes; // 0x1C: In bytes.
__le16 name[32]; // 0x20: Name of client.
};
static_assert(sizeof(struct CLIENT_REC) == 0x60);
/* Two copies of these will exist at the beginning of the log file */
struct RESTART_AREA {
__le64 current_lsn; // 0x00: Current logical end of log file.
__le16 log_clients; // 0x08: Maximum number of clients.
__le16 client_idx[2]; // 0x0A: Free/use index into the client record arrays.
__le16 flags; // 0x0E: See RESTART_SINGLE_PAGE_IO.
__le32 seq_num_bits; // 0x10: The number of bits in sequence number.
__le16 ra_len; // 0x14:
__le16 client_off; // 0x16:
__le64 l_size; // 0x18: Usable log file size.
__le32 last_lsn_data_len; // 0x20:
__le16 rec_hdr_len; // 0x24: Log page data offset.
__le16 data_off; // 0x26: Log page data length.
__le32 open_log_count; // 0x28:
__le32 align[5]; // 0x2C:
struct CLIENT_REC clients[]; // 0x40:
};
struct LOG_REC_HDR {
__le16 redo_op; // 0x00: NTFS_LOG_OPERATION
__le16 undo_op; // 0x02: NTFS_LOG_OPERATION
__le16 redo_off; // 0x04: Offset to Redo record.
__le16 redo_len; // 0x06: Redo length.
__le16 undo_off; // 0x08: Offset to Undo record.
__le16 undo_len; // 0x0A: Undo length.
__le16 target_attr; // 0x0C:
__le16 lcns_follow; // 0x0E:
__le16 record_off; // 0x10:
__le16 attr_off; // 0x12:
__le16 cluster_off; // 0x14:
__le16 reserved; // 0x16:
__le64 target_vcn; // 0x18:
__le64 page_lcns[]; // 0x20:
};
static_assert(sizeof(struct LOG_REC_HDR) == 0x20);
#define RESTART_ENTRY_ALLOCATED 0xFFFFFFFF
#define RESTART_ENTRY_ALLOCATED_LE cpu_to_le32(0xFFFFFFFF)
struct RESTART_TABLE {
__le16 size; // 0x00: In bytes
__le16 used; // 0x02: Entries
__le16 total; // 0x04: Entries
__le16 res[3]; // 0x06:
__le32 free_goal; // 0x0C:
__le32 first_free; // 0x10:
__le32 last_free; // 0x14:
};
static_assert(sizeof(struct RESTART_TABLE) == 0x18);
struct ATTR_NAME_ENTRY {
__le16 off; // Offset in the Open attribute Table.
__le16 name_bytes;
__le16 name[];
};
struct OPEN_ATTR_ENRTY {
__le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated
__le32 bytes_per_index; // 0x04:
enum ATTR_TYPE type; // 0x08:
u8 is_dirty_pages; // 0x0C:
u8 is_attr_name; // 0x0B: Faked field to manage 'ptr'
u8 name_len; // 0x0C: Faked field to manage 'ptr'
u8 res;
struct MFT_REF ref; // 0x10: File Reference of file containing attribute
__le64 open_record_lsn; // 0x18:
void *ptr; // 0x20:
};
/* 32 bit version of 'struct OPEN_ATTR_ENRTY' */
struct OPEN_ATTR_ENRTY_32 {
__le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated
__le32 ptr; // 0x04:
struct MFT_REF ref; // 0x08:
__le64 open_record_lsn; // 0x10:
u8 is_dirty_pages; // 0x18:
u8 is_attr_name; // 0x19:
u8 res1[2];
enum ATTR_TYPE type; // 0x1C:
u8 name_len; // 0x20: In wchar
u8 res2[3];
__le32 AttributeName; // 0x24:
__le32 bytes_per_index; // 0x28:
};
#define SIZEOF_OPENATTRIBUTEENTRY0 0x2c
// static_assert( 0x2C == sizeof(struct OPEN_ATTR_ENRTY_32) );
static_assert(sizeof(struct OPEN_ATTR_ENRTY) < SIZEOF_OPENATTRIBUTEENTRY0);
/*
* One entry exists in the Dirty Pages Table for each page which is dirty at
* the time the Restart Area is written.
*/
struct DIR_PAGE_ENTRY {
__le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated
__le32 target_attr; // 0x04: Index into the Open attribute Table
__le32 transfer_len; // 0x08:
__le32 lcns_follow; // 0x0C:
__le64 vcn; // 0x10: Vcn of dirty page
__le64 oldest_lsn; // 0x18:
__le64 page_lcns[]; // 0x20:
};
static_assert(sizeof(struct DIR_PAGE_ENTRY) == 0x20);
/* 32 bit version of 'struct DIR_PAGE_ENTRY' */
struct DIR_PAGE_ENTRY_32 {
__le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated
__le32 target_attr; // 0x04: Index into the Open attribute Table
__le32 transfer_len; // 0x08:
__le32 lcns_follow; // 0x0C:
__le32 reserved; // 0x10:
__le32 vcn_low; // 0x14: Vcn of dirty page
__le32 vcn_hi; // 0x18: Vcn of dirty page
__le32 oldest_lsn_low; // 0x1C:
__le32 oldest_lsn_hi; // 0x1C:
__le32 page_lcns_low; // 0x24:
__le32 page_lcns_hi; // 0x24:
};
static_assert(offsetof(struct DIR_PAGE_ENTRY_32, vcn_low) == 0x14);
static_assert(sizeof(struct DIR_PAGE_ENTRY_32) == 0x2c);
enum transact_state {
TransactionUninitialized = 0,
TransactionActive,
TransactionPrepared,
TransactionCommitted
};
struct TRANSACTION_ENTRY {
__le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated
u8 transact_state; // 0x04:
u8 reserved[3]; // 0x05:
__le64 first_lsn; // 0x08:
__le64 prev_lsn; // 0x10:
__le64 undo_next_lsn; // 0x18:
__le32 undo_records; // 0x20: Number of undo log records pending abort
__le32 undo_len; // 0x24: Total undo size
};
static_assert(sizeof(struct TRANSACTION_ENTRY) == 0x28);
struct NTFS_RESTART {
__le32 major_ver; // 0x00:
__le32 minor_ver; // 0x04:
__le64 check_point_start; // 0x08:
__le64 open_attr_table_lsn; // 0x10:
__le64 attr_names_lsn; // 0x18:
__le64 dirty_pages_table_lsn; // 0x20:
__le64 transact_table_lsn; // 0x28:
__le32 open_attr_len; // 0x30: In bytes
__le32 attr_names_len; // 0x34: In bytes
__le32 dirty_pages_len; // 0x38: In bytes
__le32 transact_table_len; // 0x3C: In bytes
};
static_assert(sizeof(struct NTFS_RESTART) == 0x40);
struct NEW_ATTRIBUTE_SIZES {
__le64 alloc_size;
__le64 valid_size;
__le64 data_size;
__le64 total_size;
};
struct BITMAP_RANGE {
__le32 bitmap_off;
__le32 bits;
};
struct LCN_RANGE {
__le64 lcn;
__le64 len;
};
/* The following type defines the different log record types. */
#define LfsClientRecord cpu_to_le32(1)
#define LfsClientRestart cpu_to_le32(2)
/* This is used to uniquely identify a client for a particular log file. */
struct CLIENT_ID {
__le16 seq_num;
__le16 client_idx;
};
/* This is the header that begins every Log Record in the log file. */
struct LFS_RECORD_HDR {
__le64 this_lsn; // 0x00:
__le64 client_prev_lsn; // 0x08:
__le64 client_undo_next_lsn; // 0x10:
__le32 client_data_len; // 0x18:
struct CLIENT_ID client; // 0x1C: Owner of this log record.
__le32 record_type; // 0x20: LfsClientRecord or LfsClientRestart.
__le32 transact_id; // 0x24:
__le16 flags; // 0x28: LOG_RECORD_MULTI_PAGE
u8 align[6]; // 0x2A:
};
#define LOG_RECORD_MULTI_PAGE cpu_to_le16(1)
static_assert(sizeof(struct LFS_RECORD_HDR) == 0x30);
struct LFS_RECORD {
__le16 next_record_off; // 0x00: Offset of the free space in the page,
u8 align[6]; // 0x02:
__le64 last_end_lsn; // 0x08: lsn for the last log record which ends on the page,
};
static_assert(sizeof(struct LFS_RECORD) == 0x10);
struct RECORD_PAGE_HDR {
struct NTFS_RECORD_HEADER rhdr; // 'RCRD'
__le32 rflags; // 0x10: See LOG_PAGE_LOG_RECORD_END
__le16 page_count; // 0x14:
__le16 page_pos; // 0x16:
struct LFS_RECORD record_hdr; // 0x18:
__le16 fixups[10]; // 0x28:
__le32 file_off; // 0x3c: Used when major version >= 2
};
// clang-format on
// Page contains the end of a log record.
#define LOG_PAGE_LOG_RECORD_END cpu_to_le32(0x00000001)
static inline bool is_log_record_end(const struct RECORD_PAGE_HDR *hdr)
{
return hdr->rflags & LOG_PAGE_LOG_RECORD_END;
}
static_assert(offsetof(struct RECORD_PAGE_HDR, file_off) == 0x3c);
/*
* END of NTFS LOG structures
*/
/* Define some tuning parameters to keep the restart tables a reasonable size. */
#define INITIAL_NUMBER_TRANSACTIONS 5
enum NTFS_LOG_OPERATION {
Noop = 0x00,
CompensationLogRecord = 0x01,
InitializeFileRecordSegment = 0x02,
DeallocateFileRecordSegment = 0x03,
WriteEndOfFileRecordSegment = 0x04,
CreateAttribute = 0x05,
DeleteAttribute = 0x06,
UpdateResidentValue = 0x07,
UpdateNonresidentValue = 0x08,
UpdateMappingPairs = 0x09,
DeleteDirtyClusters = 0x0A,
SetNewAttributeSizes = 0x0B,
AddIndexEntryRoot = 0x0C,
DeleteIndexEntryRoot = 0x0D,
AddIndexEntryAllocation = 0x0E,
DeleteIndexEntryAllocation = 0x0F,
WriteEndOfIndexBuffer = 0x10,
SetIndexEntryVcnRoot = 0x11,
SetIndexEntryVcnAllocation = 0x12,
UpdateFileNameRoot = 0x13,
UpdateFileNameAllocation = 0x14,
SetBitsInNonresidentBitMap = 0x15,
ClearBitsInNonresidentBitMap = 0x16,
HotFix = 0x17,
EndTopLevelAction = 0x18,
PrepareTransaction = 0x19,
CommitTransaction = 0x1A,
ForgetTransaction = 0x1B,
OpenNonresidentAttribute = 0x1C,
OpenAttributeTableDump = 0x1D,
AttributeNamesDump = 0x1E,
DirtyPageTableDump = 0x1F,
TransactionTableDump = 0x20,
UpdateRecordDataRoot = 0x21,
UpdateRecordDataAllocation = 0x22,
UpdateRelativeDataInIndex =
0x23, // NtOfsRestartUpdateRelativeDataInIndex
UpdateRelativeDataInIndex2 = 0x24,
ZeroEndOfFileRecord = 0x25,
};
/*
* Array for log records which require a target attribute.
* A true indicates that the corresponding restart operation
* requires a target attribute.
*/
static const u8 AttributeRequired[] = {
0xFC, 0xFB, 0xFF, 0x10, 0x06,
};
static inline bool is_target_required(u16 op)
{
bool ret = op <= UpdateRecordDataAllocation &&
(AttributeRequired[op >> 3] >> (op & 7) & 1);
return ret;
}
static inline bool can_skip_action(enum NTFS_LOG_OPERATION op)
{
switch (op) {
case Noop:
case DeleteDirtyClusters:
case HotFix:
case EndTopLevelAction:
case PrepareTransaction:
case CommitTransaction:
case ForgetTransaction:
case CompensationLogRecord:
case OpenNonresidentAttribute:
case OpenAttributeTableDump:
case AttributeNamesDump:
case DirtyPageTableDump:
case TransactionTableDump:
return true;
default:
return false;
}
}
enum { lcb_ctx_undo_next, lcb_ctx_prev, lcb_ctx_next };
/* Bytes per restart table. */
static inline u32 bytes_per_rt(const struct RESTART_TABLE *rt)
{
return le16_to_cpu(rt->used) * le16_to_cpu(rt->size) +
sizeof(struct RESTART_TABLE);
}
/* Log record length. */
static inline u32 lrh_length(const struct LOG_REC_HDR *lr)
{
u16 t16 = le16_to_cpu(lr->lcns_follow);
return struct_size(lr, page_lcns, max_t(u16, 1, t16));
}
struct lcb {
struct LFS_RECORD_HDR *lrh; // Log record header of the current lsn.
struct LOG_REC_HDR *log_rec;
u32 ctx_mode; // lcb_ctx_undo_next/lcb_ctx_prev/lcb_ctx_next
struct CLIENT_ID client;
bool alloc; // If true the we should deallocate 'log_rec'.
};
static void lcb_put(struct lcb *lcb)
{
if (lcb->alloc)
kfree(lcb->log_rec);
kfree(lcb->lrh);
kfree(lcb);
}
/* Find the oldest lsn from active clients. */
static inline void oldest_client_lsn(const struct CLIENT_REC *ca,
__le16 next_client, u64 *oldest_lsn)
{
while (next_client != LFS_NO_CLIENT_LE) {
const struct CLIENT_REC *cr = ca + le16_to_cpu(next_client);
u64 lsn = le64_to_cpu(cr->oldest_lsn);
/* Ignore this block if it's oldest lsn is 0. */
if (lsn && lsn < *oldest_lsn)
*oldest_lsn = lsn;
next_client = cr->next_client;
}
}
static inline bool is_rst_page_hdr_valid(u32 file_off,
const struct RESTART_HDR *rhdr)
{
u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
u32 page_size = le32_to_cpu(rhdr->page_size);
u32 end_usa;
u16 ro;
if (sys_page < SECTOR_SIZE || page_size < SECTOR_SIZE ||
sys_page & (sys_page - 1) || page_size & (page_size - 1)) {
return false;
}
/* Check that if the file offset isn't 0, it is the system page size. */
if (file_off && file_off != sys_page)
return false;
/* Check support version 1.1+. */
if (le16_to_cpu(rhdr->major_ver) <= 1 && !rhdr->minor_ver)
return false;
if (le16_to_cpu(rhdr->major_ver) > 2)
return false;
ro = le16_to_cpu(rhdr->ra_off);
if (!IS_ALIGNED(ro, 8) || ro > sys_page)
return false;
end_usa = ((sys_page >> SECTOR_SHIFT) + 1) * sizeof(short);
end_usa += le16_to_cpu(rhdr->rhdr.fix_off);
if (ro < end_usa)
return false;
return true;
}
static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
{
const struct RESTART_AREA *ra;
u16 cl, fl, ul;
u32 off, l_size, file_dat_bits, file_size_round;
u16 ro = le16_to_cpu(rhdr->ra_off);
u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
if (ro + offsetof(struct RESTART_AREA, l_size) >
SECTOR_SIZE - sizeof(short))
return false;
ra = Add2Ptr(rhdr, ro);
cl = le16_to_cpu(ra->log_clients);
if (cl > 1)
return false;
off = le16_to_cpu(ra->client_off);
if (!IS_ALIGNED(off, 8) || ro + off > SECTOR_SIZE - sizeof(short))
return false;
off += cl * sizeof(struct CLIENT_REC);
if (off > sys_page)
return false;
/*
* Check the restart length field and whether the entire
* restart area is contained that length.
*/
if (le16_to_cpu(rhdr->ra_off) + le16_to_cpu(ra->ra_len) > sys_page ||
off > le16_to_cpu(ra->ra_len)) {
return false;
}
/*
* As a final check make sure that the use list and the free list
* are either empty or point to a valid client.
*/
fl = le16_to_cpu(ra->client_idx[0]);
ul = le16_to_cpu(ra->client_idx[1]);
if ((fl != LFS_NO_CLIENT && fl >= cl) ||
(ul != LFS_NO_CLIENT && ul >= cl))
return false;
/* Make sure the sequence number bits match the log file size. */
l_size = le64_to_cpu(ra->l_size);
file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits);
file_size_round = 1u << (file_dat_bits + 3);
if (file_size_round != l_size &&
(file_size_round < l_size || (file_size_round / 2) > l_size)) {
return false;
}
/* The log page data offset and record header length must be quad-aligned. */
if (!IS_ALIGNED(le16_to_cpu(ra->data_off), 8) ||
!IS_ALIGNED(le16_to_cpu(ra->rec_hdr_len), 8))
return false;
return true;
}
static inline bool is_client_area_valid(const struct RESTART_HDR *rhdr,
bool usa_error)
{
u16 ro = le16_to_cpu(rhdr->ra_off);
const struct RESTART_AREA *ra = Add2Ptr(rhdr, ro);
u16 ra_len = le16_to_cpu(ra->ra_len);
const struct CLIENT_REC *ca;
u32 i;
if (usa_error && ra_len + ro > SECTOR_SIZE - sizeof(short))
return false;
/* Find the start of the client array. */
ca = Add2Ptr(ra, le16_to_cpu(ra->client_off));
/*
* Start with the free list.
* Check that all the clients are valid and that there isn't a cycle.
* Do the in-use list on the second pass.
*/
for (i = 0; i < 2; i++) {
u16 client_idx = le16_to_cpu(ra->client_idx[i]);
bool first_client = true;
u16 clients = le16_to_cpu(ra->log_clients);
while (client_idx != LFS_NO_CLIENT) {
const struct CLIENT_REC *cr;
if (!clients ||
client_idx >= le16_to_cpu(ra->log_clients))
return false;
clients -= 1;
cr = ca + client_idx;
client_idx = le16_to_cpu(cr->next_client);
if (first_client) {
first_client = false;
if (cr->prev_client != LFS_NO_CLIENT_LE)
return false;
}
}
}
return true;
}
/*
* remove_client
*
* Remove a client record from a client record list an restart area.
*/
static inline void remove_client(struct CLIENT_REC *ca,
const struct CLIENT_REC *cr, __le16 *head)
{
if (cr->prev_client == LFS_NO_CLIENT_LE)
*head = cr->next_client;
else
ca[le16_to_cpu(cr->prev_client)].next_client = cr->next_client;
if (cr->next_client != LFS_NO_CLIENT_LE)
ca[le16_to_cpu(cr->next_client)].prev_client = cr->prev_client;
}
/*
* add_client - Add a client record to the start of a list.
*/
static inline void add_client(struct CLIENT_REC *ca, u16 index, __le16 *head)
{
struct CLIENT_REC *cr = ca + index;
cr->prev_client = LFS_NO_CLIENT_LE;
cr->next_client = *head;
if (*head != LFS_NO_CLIENT_LE)
ca[le16_to_cpu(*head)].prev_client = cpu_to_le16(index);
*head = cpu_to_le16(index);
}
static inline void *enum_rstbl(struct RESTART_TABLE *t, void *c)
{
__le32 *e;
u32 bprt;
u16 rsize = t ? le16_to_cpu(t->size) : 0;
if (!c) {
if (!t || !t->total)
return NULL;
e = Add2Ptr(t, sizeof(struct RESTART_TABLE));
} else {
e = Add2Ptr(c, rsize);
}
/* Loop until we hit the first one allocated, or the end of the list. */
for (bprt = bytes_per_rt(t); PtrOffset(t, e) < bprt;
e = Add2Ptr(e, rsize)) {
if (*e == RESTART_ENTRY_ALLOCATED_LE)
return e;
}
return NULL;
}
/*
* find_dp - Search for a @vcn in Dirty Page Table.
*/
static inline struct DIR_PAGE_ENTRY *find_dp(struct RESTART_TABLE *dptbl,
u32 target_attr, u64 vcn)
{
__le32 ta = cpu_to_le32(target_attr);
struct DIR_PAGE_ENTRY *dp = NULL;
while ((dp = enum_rstbl(dptbl, dp))) {
u64 dp_vcn = le64_to_cpu(dp->vcn);
if (dp->target_attr == ta && vcn >= dp_vcn &&
vcn < dp_vcn + le32_to_cpu(dp->lcns_follow)) {
return dp;
}
}
return NULL;
}
static inline u32 norm_file_page(u32 page_size, u32 *l_size, bool use_default)
{
if (use_default)
page_size = DefaultLogPageSize;
/* Round the file size down to a system page boundary. */
*l_size &= ~(page_size - 1);
/* File should contain at least 2 restart pages and MinLogRecordPages pages. */
if (*l_size < (MinLogRecordPages + 2) * page_size)
return 0;
return page_size;
}
static bool check_log_rec(const struct LOG_REC_HDR *lr, u32 bytes, u32 tr,
u32 bytes_per_attr_entry)
{
u16 t16;
if (bytes < sizeof(struct LOG_REC_HDR))
return false;
if (!tr)
return false;
if ((tr - sizeof(struct RESTART_TABLE)) %
sizeof(struct TRANSACTION_ENTRY))
return false;
if (le16_to_cpu(lr->redo_off) & 7)
return false;
if (le16_to_cpu(lr->undo_off) & 7)
return false;
if (lr->target_attr)
goto check_lcns;
if (is_target_required(le16_to_cpu(lr->redo_op)))
return false;
if (is_target_required(le16_to_cpu(lr->undo_op)))
return false;
check_lcns:
if (!lr->lcns_follow)
goto check_length;
t16 = le16_to_cpu(lr->target_attr);
if ((t16 - sizeof(struct RESTART_TABLE)) % bytes_per_attr_entry)
return false;
check_length:
if (bytes < lrh_length(lr))
return false;
return true;
}
static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes)
{
u32 ts;
u32 i, off;
u16 rsize = le16_to_cpu(rt->size);
u16 ne = le16_to_cpu(rt->used);
u32 ff = le32_to_cpu(rt->first_free);
u32 lf = le32_to_cpu(rt->last_free);
ts = rsize * ne + sizeof(struct RESTART_TABLE);
if (!rsize || rsize > bytes ||
rsize + sizeof(struct RESTART_TABLE) > bytes || bytes < ts ||
le16_to_cpu(rt->total) > ne || ff > ts || lf > ts ||
(ff && ff < sizeof(struct RESTART_TABLE)) ||
(lf && lf < sizeof(struct RESTART_TABLE))) {
return false;
}
/*
* Verify each entry is either allocated or points
* to a valid offset the table.
*/
for (i = 0; i < ne; i++) {
off = le32_to_cpu(*(__le32 *)Add2Ptr(
rt, i * rsize + sizeof(struct RESTART_TABLE)));
if (off != RESTART_ENTRY_ALLOCATED && off &&
(off < sizeof(struct RESTART_TABLE) ||
((off - sizeof(struct RESTART_TABLE)) % rsize))) {
return false;
}
}
/*
* Walk through the list headed by the first entry to make
* sure none of the entries are currently being used.
*/
for (off = ff; off;) {
if (off == RESTART_ENTRY_ALLOCATED)
return false;
off = le32_to_cpu(*(__le32 *)Add2Ptr(rt, off));
}
return true;
}
/*
* free_rsttbl_idx - Free a previously allocated index a Restart Table.
*/
static inline void free_rsttbl_idx(struct RESTART_TABLE *rt, u32 off)
{
__le32 *e;
u32 lf = le32_to_cpu(rt->last_free);
__le32 off_le = cpu_to_le32(off);
e = Add2Ptr(rt, off);
if (off < le32_to_cpu(rt->free_goal)) {
*e = rt->first_free;
rt->first_free = off_le;
if (!lf)
rt->last_free = off_le;
} else {
if (lf)
*(__le32 *)Add2Ptr(rt, lf) = off_le;
else
rt->first_free = off_le;
rt->last_free = off_le;
*e = 0;
}
le16_sub_cpu(&rt->total, 1);
}
static inline struct RESTART_TABLE *init_rsttbl(u16 esize, u16 used)
{
__le32 *e, *last_free;
u32 off;
u32 bytes = esize * used + sizeof(struct RESTART_TABLE);
u32 lf = sizeof(struct RESTART_TABLE) + (used - 1) * esize;
struct RESTART_TABLE *t = kzalloc(bytes, GFP_NOFS);
if (!t)
return NULL;
t->size = cpu_to_le16(esize);
t->used = cpu_to_le16(used);
t->free_goal = cpu_to_le32(~0u);
t->first_free = cpu_to_le32(sizeof(struct RESTART_TABLE));
t->last_free = cpu_to_le32(lf);
e = (__le32 *)(t + 1);
last_free = Add2Ptr(t, lf);
for (off = sizeof(struct RESTART_TABLE) + esize; e < last_free;
e = Add2Ptr(e, esize), off += esize) {
*e = cpu_to_le32(off);
}
return t;
}
static inline struct RESTART_TABLE *extend_rsttbl(struct RESTART_TABLE *tbl,
u32 add, u32 free_goal)
{
u16 esize = le16_to_cpu(tbl->size);
__le32 osize = cpu_to_le32(bytes_per_rt(tbl));
u32 used = le16_to_cpu(tbl->used);
struct RESTART_TABLE *rt;
rt = init_rsttbl(esize, used + add);
if (!rt)
return NULL;
memcpy(rt + 1, tbl + 1, esize * used);
rt->free_goal = free_goal == ~0u
? cpu_to_le32(~0u)
: cpu_to_le32(sizeof(struct RESTART_TABLE) +
free_goal * esize);
if (tbl->first_free) {
rt->first_free = tbl->first_free;
*(__le32 *)Add2Ptr(rt, le32_to_cpu(tbl->last_free)) = osize;
} else {
rt->first_free = osize;
}
rt->total = tbl->total;
kfree(tbl);
return rt;
}
/*
* alloc_rsttbl_idx
*
* Allocate an index from within a previously initialized Restart Table.
*/
static inline void *alloc_rsttbl_idx(struct RESTART_TABLE **tbl)
{
u32 off;
__le32 *e;
struct RESTART_TABLE *t = *tbl;
if (!t->first_free) {
*tbl = t = extend_rsttbl(t, 16, ~0u);
if (!t)
return NULL;
}
off = le32_to_cpu(t->first_free);
/* Dequeue this entry and zero it. */
e = Add2Ptr(t, off);
t->first_free = *e;
memset(e, 0, le16_to_cpu(t->size));
*e = RESTART_ENTRY_ALLOCATED_LE;
/* If list is going empty, then we fix the last_free as well. */
if (!t->first_free)
t->last_free = 0;
le16_add_cpu(&t->total, 1);
return Add2Ptr(t, off);
}
/*
* alloc_rsttbl_from_idx
*
* Allocate a specific index from within a previously initialized Restart Table.
*/
static inline void *alloc_rsttbl_from_idx(struct RESTART_TABLE **tbl, u32 vbo)
{
u32 off;
__le32 *e;
struct RESTART_TABLE *rt = *tbl;
u32 bytes = bytes_per_rt(rt);
u16 esize = le16_to_cpu(rt->size);
/* If the entry is not the table, we will have to extend the table. */
if (vbo >= bytes) {
/*
* Extend the size by computing the number of entries between
* the existing size and the desired index and adding 1 to that.
*/
u32 bytes2idx = vbo - bytes;
/*
* There should always be an integral number of entries
* being added. Now extend the table.
*/
*tbl = rt = extend_rsttbl(rt, bytes2idx / esize + 1, bytes);
if (!rt)
return NULL;
}
/* See if the entry is already allocated, and just return if it is. */
e = Add2Ptr(rt, vbo);
if (*e == RESTART_ENTRY_ALLOCATED_LE)
return e;
/*
* Walk through the table, looking for the entry we're
* interested and the previous entry.
*/
off = le32_to_cpu(rt->first_free);
e = Add2Ptr(rt, off);
if (off == vbo) {
/* this is a match */
rt->first_free = *e;
goto skip_looking;
}
/*
* Need to walk through the list looking for the predecessor
* of our entry.
*/
for (;;) {
/* Remember the entry just found */
u32 last_off = off;
__le32 *last_e = e;
/* Should never run of entries. */
/* Lookup up the next entry the list. */
off = le32_to_cpu(*last_e);
e = Add2Ptr(rt, off);
/* If this is our match we are done. */
if (off == vbo) {
*last_e = *e;
/*
* If this was the last entry, we update that
* table as well.
*/
if (le32_to_cpu(rt->last_free) == off)
rt->last_free = cpu_to_le32(last_off);
break;
}
}
skip_looking:
/* If the list is now empty, we fix the last_free as well. */
if (!rt->first_free)
rt->last_free = 0;
/* Zero this entry. */
memset(e, 0, esize);
*e = RESTART_ENTRY_ALLOCATED_LE;
le16_add_cpu(&rt->total, 1);
return e;
}
#define RESTART_SINGLE_PAGE_IO cpu_to_le16(0x0001)
#define NTFSLOG_WRAPPED 0x00000001
#define NTFSLOG_MULTIPLE_PAGE_IO 0x00000002
#define NTFSLOG_NO_LAST_LSN 0x00000004
#define NTFSLOG_REUSE_TAIL 0x00000010
#define NTFSLOG_NO_OLDEST_LSN 0x00000020
/* Helper struct to work with NTFS $LogFile. */
struct ntfs_log {
struct ntfs_inode *ni;
u32 l_size;
u32 sys_page_size;
u32 sys_page_mask;
u32 page_size;
u32 page_mask; // page_size - 1
u8 page_bits;
struct RECORD_PAGE_HDR *one_page_buf;
struct RESTART_TABLE *open_attr_tbl;
u32 transaction_id;
u32 clst_per_page;