@@ -51,48 +51,48 @@ int kdbgetsymval(const char *symname, kdb_symtab_t *symtab)
51
51
}
52
52
EXPORT_SYMBOL (kdbgetsymval );
53
53
54
- static char * kdb_name_table [ 100 ]; /* arbitrary size */
55
-
56
- /*
57
- * kdbnearsym - Return the name of the symbol with the nearest address
58
- * less than 'addr'.
54
+ /**
55
+ * kdbnearsym() - Return the name of the symbol with the nearest address
56
+ * less than @addr.
57
+ * @addr: Address to check for near symbol
58
+ * @symtab: Structure to receive results
59
59
*
60
- * Parameters:
61
- * addr Address to check for symbol near
62
- * symtab Structure to receive results
63
- * Returns:
64
- * 0 No sections contain this address, symtab zero filled
65
- * 1 Address mapped to module/symbol/section, data in symtab
66
- * Remarks:
67
- * 2.6 kallsyms has a "feature" where it unpacks the name into a
68
- * string. If that string is reused before the caller expects it
69
- * then the caller sees its string change without warning. To
70
- * avoid cluttering up the main kdb code with lots of kdb_strdup,
71
- * tests and kfree calls, kdbnearsym maintains an LRU list of the
72
- * last few unique strings. The list is sized large enough to
73
- * hold active strings, no kdb caller of kdbnearsym makes more
74
- * than ~20 later calls before using a saved value.
60
+ * WARNING: This function may return a pointer to a single statically
61
+ * allocated buffer (namebuf). kdb's unusual calling context (single
62
+ * threaded, all other CPUs halted) provides us sufficient locking for
63
+ * this to be safe. The only constraint imposed by the static buffer is
64
+ * that the caller must consume any previous reply prior to another call
65
+ * to lookup a new symbol.
66
+ *
67
+ * Note that, strictly speaking, some architectures may re-enter the kdb
68
+ * trap if the system turns out to be very badly damaged and this breaks
69
+ * the single-threaded assumption above. In these circumstances successful
70
+ * continuation and exit from the inner trap is unlikely to work and any
71
+ * user attempting this receives a prominent warning before being allowed
72
+ * to progress. In these circumstances we remain memory safe because
73
+ * namebuf[KSYM_NAME_LEN-1] will never change from '\0' although we do
74
+ * tolerate the possibility of garbled symbol display from the outer kdb
75
+ * trap.
76
+ *
77
+ * Return:
78
+ * * 0 - No sections contain this address, symtab zero filled
79
+ * * 1 - Address mapped to module/symbol/section, data in symtab
75
80
*/
76
81
int kdbnearsym (unsigned long addr , kdb_symtab_t * symtab )
77
82
{
78
83
int ret = 0 ;
79
84
unsigned long symbolsize = 0 ;
80
85
unsigned long offset = 0 ;
81
- #define knt1_size 128 /* must be >= kallsyms table size */
82
- char * knt1 = NULL ;
86
+ static char namebuf [KSYM_NAME_LEN ];
83
87
84
88
kdb_dbg_printf (AR , "addr=0x%lx, symtab=%px\n" , addr , symtab );
85
89
memset (symtab , 0 , sizeof (* symtab ));
86
90
87
91
if (addr < 4096 )
88
92
goto out ;
89
- knt1 = debug_kmalloc (knt1_size , GFP_ATOMIC );
90
- if (!knt1 ) {
91
- kdb_func_printf ("addr=0x%lx cannot kmalloc knt1\n" , addr );
92
- goto out ;
93
- }
93
+
94
94
symtab -> sym_name = kallsyms_lookup (addr , & symbolsize , & offset ,
95
- (char * * )(& symtab -> mod_name ), knt1 );
95
+ (char * * )(& symtab -> mod_name ), namebuf );
96
96
if (offset > 8 * 1024 * 1024 ) {
97
97
symtab -> sym_name = NULL ;
98
98
addr = offset = symbolsize = 0 ;
@@ -101,63 +101,14 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
101
101
symtab -> sym_end = symtab -> sym_start + symbolsize ;
102
102
ret = symtab -> sym_name != NULL && * (symtab -> sym_name ) != '\0' ;
103
103
104
- if (ret ) {
105
- int i ;
106
- /* Another 2.6 kallsyms "feature". Sometimes the sym_name is
107
- * set but the buffer passed into kallsyms_lookup is not used,
108
- * so it contains garbage. The caller has to work out which
109
- * buffer needs to be saved.
110
- *
111
- * What was Rusty smoking when he wrote that code?
112
- */
113
- if (symtab -> sym_name != knt1 ) {
114
- strncpy (knt1 , symtab -> sym_name , knt1_size );
115
- knt1 [knt1_size - 1 ] = '\0' ;
116
- }
117
- for (i = 0 ; i < ARRAY_SIZE (kdb_name_table ); ++ i ) {
118
- if (kdb_name_table [i ] &&
119
- strcmp (kdb_name_table [i ], knt1 ) == 0 )
120
- break ;
121
- }
122
- if (i >= ARRAY_SIZE (kdb_name_table )) {
123
- debug_kfree (kdb_name_table [0 ]);
124
- memmove (kdb_name_table , kdb_name_table + 1 ,
125
- sizeof (kdb_name_table [0 ]) *
126
- (ARRAY_SIZE (kdb_name_table )- 1 ));
127
- } else {
128
- debug_kfree (knt1 );
129
- knt1 = kdb_name_table [i ];
130
- memmove (kdb_name_table + i , kdb_name_table + i + 1 ,
131
- sizeof (kdb_name_table [0 ]) *
132
- (ARRAY_SIZE (kdb_name_table )- i - 1 ));
133
- }
134
- i = ARRAY_SIZE (kdb_name_table ) - 1 ;
135
- kdb_name_table [i ] = knt1 ;
136
- symtab -> sym_name = kdb_name_table [i ];
137
- knt1 = NULL ;
138
- }
139
-
140
104
if (symtab -> mod_name == NULL )
141
105
symtab -> mod_name = "kernel" ;
142
106
kdb_dbg_printf (AR , "returns %d symtab->sym_start=0x%lx, symtab->mod_name=%px, symtab->sym_name=%px (%s)\n" ,
143
107
ret , symtab -> sym_start , symtab -> mod_name , symtab -> sym_name , symtab -> sym_name );
144
-
145
108
out :
146
- debug_kfree (knt1 );
147
109
return ret ;
148
110
}
149
111
150
- void kdbnearsym_cleanup (void )
151
- {
152
- int i ;
153
- for (i = 0 ; i < ARRAY_SIZE (kdb_name_table ); ++ i ) {
154
- if (kdb_name_table [i ]) {
155
- debug_kfree (kdb_name_table [i ]);
156
- kdb_name_table [i ] = NULL ;
157
- }
158
- }
159
- }
160
-
161
112
static char ks_namebuf [KSYM_NAME_LEN + 1 ], ks_namebuf_prev [KSYM_NAME_LEN + 1 ];
162
113
163
114
/*
@@ -655,230 +606,6 @@ unsigned long kdb_task_state(const struct task_struct *p, unsigned long mask)
655
606
return (mask & kdb_task_state_string (state )) != 0 ;
656
607
}
657
608
658
- /* Last ditch allocator for debugging, so we can still debug even when
659
- * the GFP_ATOMIC pool has been exhausted. The algorithms are tuned
660
- * for space usage, not for speed. One smallish memory pool, the free
661
- * chain is always in ascending address order to allow coalescing,
662
- * allocations are done in brute force best fit.
663
- */
664
-
665
- struct debug_alloc_header {
666
- u32 next ; /* offset of next header from start of pool */
667
- u32 size ;
668
- void * caller ;
669
- };
670
-
671
- /* The memory returned by this allocator must be aligned, which means
672
- * so must the header size. Do not assume that sizeof(struct
673
- * debug_alloc_header) is a multiple of the alignment, explicitly
674
- * calculate the overhead of this header, including the alignment.
675
- * The rest of this code must not use sizeof() on any header or
676
- * pointer to a header.
677
- */
678
- #define dah_align 8
679
- #define dah_overhead ALIGN(sizeof(struct debug_alloc_header), dah_align)
680
-
681
- static u64 debug_alloc_pool_aligned [256 * 1024 /dah_align ]; /* 256K pool */
682
- static char * debug_alloc_pool = (char * )debug_alloc_pool_aligned ;
683
- static u32 dah_first , dah_first_call = 1 , dah_used , dah_used_max ;
684
-
685
- /* Locking is awkward. The debug code is called from all contexts,
686
- * including non maskable interrupts. A normal spinlock is not safe
687
- * in NMI context. Try to get the debug allocator lock, if it cannot
688
- * be obtained after a second then give up. If the lock could not be
689
- * previously obtained on this cpu then only try once.
690
- *
691
- * sparse has no annotation for "this function _sometimes_ acquires a
692
- * lock", so fudge the acquire/release notation.
693
- */
694
- static DEFINE_SPINLOCK (dap_lock );
695
- static int get_dap_lock (void )
696
- __acquires (dap_lock )
697
- {
698
- static int dap_locked = -1 ;
699
- int count ;
700
- if (dap_locked == smp_processor_id ())
701
- count = 1 ;
702
- else
703
- count = 1000 ;
704
- while (1 ) {
705
- if (spin_trylock (& dap_lock )) {
706
- dap_locked = -1 ;
707
- return 1 ;
708
- }
709
- if (!count -- )
710
- break ;
711
- udelay (1000 );
712
- }
713
- dap_locked = smp_processor_id ();
714
- __acquire (dap_lock );
715
- return 0 ;
716
- }
717
-
718
- void * debug_kmalloc (size_t size , gfp_t flags )
719
- {
720
- unsigned int rem , h_offset ;
721
- struct debug_alloc_header * best , * bestprev , * prev , * h ;
722
- void * p = NULL ;
723
- if (!get_dap_lock ()) {
724
- __release (dap_lock ); /* we never actually got it */
725
- return NULL ;
726
- }
727
- h = (struct debug_alloc_header * )(debug_alloc_pool + dah_first );
728
- if (dah_first_call ) {
729
- h -> size = sizeof (debug_alloc_pool_aligned ) - dah_overhead ;
730
- dah_first_call = 0 ;
731
- }
732
- size = ALIGN (size , dah_align );
733
- prev = best = bestprev = NULL ;
734
- while (1 ) {
735
- if (h -> size >= size && (!best || h -> size < best -> size )) {
736
- best = h ;
737
- bestprev = prev ;
738
- if (h -> size == size )
739
- break ;
740
- }
741
- if (!h -> next )
742
- break ;
743
- prev = h ;
744
- h = (struct debug_alloc_header * )(debug_alloc_pool + h -> next );
745
- }
746
- if (!best )
747
- goto out ;
748
- rem = best -> size - size ;
749
- /* The pool must always contain at least one header */
750
- if (best -> next == 0 && bestprev == NULL && rem < dah_overhead )
751
- goto out ;
752
- if (rem >= dah_overhead ) {
753
- best -> size = size ;
754
- h_offset = ((char * )best - debug_alloc_pool ) +
755
- dah_overhead + best -> size ;
756
- h = (struct debug_alloc_header * )(debug_alloc_pool + h_offset );
757
- h -> size = rem - dah_overhead ;
758
- h -> next = best -> next ;
759
- } else
760
- h_offset = best -> next ;
761
- best -> caller = __builtin_return_address (0 );
762
- dah_used += best -> size ;
763
- dah_used_max = max (dah_used , dah_used_max );
764
- if (bestprev )
765
- bestprev -> next = h_offset ;
766
- else
767
- dah_first = h_offset ;
768
- p = (char * )best + dah_overhead ;
769
- memset (p , POISON_INUSE , best -> size - 1 );
770
- * ((char * )p + best -> size - 1 ) = POISON_END ;
771
- out :
772
- spin_unlock (& dap_lock );
773
- return p ;
774
- }
775
-
776
- void debug_kfree (void * p )
777
- {
778
- struct debug_alloc_header * h ;
779
- unsigned int h_offset ;
780
- if (!p )
781
- return ;
782
- if ((char * )p < debug_alloc_pool ||
783
- (char * )p >= debug_alloc_pool + sizeof (debug_alloc_pool_aligned )) {
784
- kfree (p );
785
- return ;
786
- }
787
- if (!get_dap_lock ()) {
788
- __release (dap_lock ); /* we never actually got it */
789
- return ; /* memory leak, cannot be helped */
790
- }
791
- h = (struct debug_alloc_header * )((char * )p - dah_overhead );
792
- memset (p , POISON_FREE , h -> size - 1 );
793
- * ((char * )p + h -> size - 1 ) = POISON_END ;
794
- h -> caller = NULL ;
795
- dah_used -= h -> size ;
796
- h_offset = (char * )h - debug_alloc_pool ;
797
- if (h_offset < dah_first ) {
798
- h -> next = dah_first ;
799
- dah_first = h_offset ;
800
- } else {
801
- struct debug_alloc_header * prev ;
802
- unsigned int prev_offset ;
803
- prev = (struct debug_alloc_header * )(debug_alloc_pool +
804
- dah_first );
805
- while (1 ) {
806
- if (!prev -> next || prev -> next > h_offset )
807
- break ;
808
- prev = (struct debug_alloc_header * )
809
- (debug_alloc_pool + prev -> next );
810
- }
811
- prev_offset = (char * )prev - debug_alloc_pool ;
812
- if (prev_offset + dah_overhead + prev -> size == h_offset ) {
813
- prev -> size += dah_overhead + h -> size ;
814
- memset (h , POISON_FREE , dah_overhead - 1 );
815
- * ((char * )h + dah_overhead - 1 ) = POISON_END ;
816
- h = prev ;
817
- h_offset = prev_offset ;
818
- } else {
819
- h -> next = prev -> next ;
820
- prev -> next = h_offset ;
821
- }
822
- }
823
- if (h_offset + dah_overhead + h -> size == h -> next ) {
824
- struct debug_alloc_header * next ;
825
- next = (struct debug_alloc_header * )
826
- (debug_alloc_pool + h -> next );
827
- h -> size += dah_overhead + next -> size ;
828
- h -> next = next -> next ;
829
- memset (next , POISON_FREE , dah_overhead - 1 );
830
- * ((char * )next + dah_overhead - 1 ) = POISON_END ;
831
- }
832
- spin_unlock (& dap_lock );
833
- }
834
-
835
- void debug_kusage (void )
836
- {
837
- struct debug_alloc_header * h_free , * h_used ;
838
- #ifdef CONFIG_IA64
839
- /* FIXME: using dah for ia64 unwind always results in a memory leak.
840
- * Fix that memory leak first, then set debug_kusage_one_time = 1 for
841
- * all architectures.
842
- */
843
- static int debug_kusage_one_time ;
844
- #else
845
- static int debug_kusage_one_time = 1 ;
846
- #endif
847
- if (!get_dap_lock ()) {
848
- __release (dap_lock ); /* we never actually got it */
849
- return ;
850
- }
851
- h_free = (struct debug_alloc_header * )(debug_alloc_pool + dah_first );
852
- if (dah_first == 0 &&
853
- (h_free -> size == sizeof (debug_alloc_pool_aligned ) - dah_overhead ||
854
- dah_first_call ))
855
- goto out ;
856
- if (!debug_kusage_one_time )
857
- goto out ;
858
- debug_kusage_one_time = 0 ;
859
- kdb_func_printf ("debug_kmalloc memory leak dah_first %d\n" , dah_first );
860
- if (dah_first ) {
861
- h_used = (struct debug_alloc_header * )debug_alloc_pool ;
862
- kdb_func_printf ("h_used %px size %d\n" , h_used , h_used -> size );
863
- }
864
- do {
865
- h_used = (struct debug_alloc_header * )
866
- ((char * )h_free + dah_overhead + h_free -> size );
867
- kdb_func_printf ("h_used %px size %d caller %px\n" ,
868
- h_used , h_used -> size , h_used -> caller );
869
- h_free = (struct debug_alloc_header * )
870
- (debug_alloc_pool + h_free -> next );
871
- } while (h_free -> next );
872
- h_used = (struct debug_alloc_header * )
873
- ((char * )h_free + dah_overhead + h_free -> size );
874
- if ((char * )h_used - debug_alloc_pool !=
875
- sizeof (debug_alloc_pool_aligned ))
876
- kdb_func_printf ("h_used %px size %d caller %px\n" ,
877
- h_used , h_used -> size , h_used -> caller );
878
- out :
879
- spin_unlock (& dap_lock );
880
- }
881
-
882
609
/* Maintain a small stack of kdb_flags to allow recursion without disturbing
883
610
* the global kdb state.
884
611
*/
0 commit comments