Skip to content

Commit

Permalink
x86/HVM: address violations of MISRA C:2012 Rules 8.2 and 8.3
Browse files Browse the repository at this point in the history
Give a name to unnamed parameters thus addressing violations of
MISRA C:2012 Rule 8.2 ("Function types shall be in prototype form with
named parameters").
Keep consistency between parameter names and types used in function
declarations and the ones used in the corresponding function
definitions, thus addressing violations of MISRA C:2012 Rule 8.3
("All declarations of an object or function shall use the same names
and type qualifiers").

No functional changes.

Signed-off-by: Federico Serafini <[email protected]>
Acked-by: Jan Beulich <[email protected]>
  • Loading branch information
FedericoSerafini authored and jbeulich committed Jul 21, 2023
1 parent 6057672 commit 652d8fb
Show file tree
Hide file tree
Showing 10 changed files with 31 additions and 31 deletions.
2 changes: 1 addition & 1 deletion xen/arch/x86/hvm/domain.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ static int check_segment(struct segment_register *reg, enum x86_segment seg)
}

/* Called by VCPUOP_initialise for HVM guests. */
int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
int arch_set_info_hvm_guest(struct vcpu *v, const struct vcpu_hvm_context *ctx)
{
const struct domain *d = v->domain;
struct cpu_user_regs *uregs = &v->arch.user_regs;
Expand Down
6 changes: 3 additions & 3 deletions xen/arch/x86/hvm/hvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -272,15 +272,15 @@ uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2)
return X86_EXC_DF;
}

void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable)
void hvm_set_rdtsc_exiting(struct domain *d, bool enable)
{
struct vcpu *v;

for_each_vcpu ( d, v )
alternative_vcall(hvm_funcs.set_rdtsc_exiting, v, enable);
}

void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat)
void hvm_get_guest_pat(struct vcpu *v, uint64_t *guest_pat)
{
if ( !alternative_call(hvm_funcs.get_guest_pat, v, guest_pat) )
*guest_pat = v->arch.hvm.pat_cr;
Expand Down Expand Up @@ -426,7 +426,7 @@ static void hvm_set_guest_tsc_adjust(struct vcpu *v, u64 tsc_adjust)
update_vcpu_system_time(v);
}

u64 hvm_get_guest_tsc_fixed(struct vcpu *v, uint64_t at_tsc)
uint64_t hvm_get_guest_tsc_fixed(struct vcpu *v, uint64_t at_tsc)
{
uint64_t tsc;

Expand Down
8 changes: 4 additions & 4 deletions xen/arch/x86/hvm/svm/nestedsvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -837,12 +837,12 @@ nsvm_vcpu_vmexit_inject(struct vcpu *v, struct cpu_user_regs *regs,
}

int cf_check nsvm_vcpu_vmexit_event(
struct vcpu *v, const struct x86_event *trap)
struct vcpu *v, const struct x86_event *event)
{
ASSERT(vcpu_nestedhvm(v).nv_vvmcx != NULL);

nestedsvm_vmexit_defer(v, VMEXIT_EXCEPTION_DE + trap->vector,
trap->error_code, trap->cr2);
nestedsvm_vmexit_defer(v, VMEXIT_EXCEPTION_DE + event->vector,
event->error_code, event->cr2);
return NESTEDHVM_VMEXIT_DONE;
}

Expand Down Expand Up @@ -1538,7 +1538,7 @@ nestedsvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack)
return NSVM_INTR_NOTINTERCEPTED;
}

bool_t
bool
nestedsvm_gif_isset(struct vcpu *v)
{
struct nestedsvm *svm = &vcpu_nestedsvm(v);
Expand Down
2 changes: 1 addition & 1 deletion xen/arch/x86/hvm/vioapic.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
/* HACK: Route IRQ0 only to VCPU0 to prevent time jumps. */
#define IRQ0_SPECIAL_ROUTING 1

static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int irq);
static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin);

static struct hvm_vioapic *addr_vioapic(const struct domain *d,
unsigned long addr)
Expand Down
2 changes: 1 addition & 1 deletion xen/arch/x86/include/asm/hvm/domain.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ struct hvm_pi_ops {
* Hook into arch_vcpu_block(), which is called
* from vcpu_block() and vcpu_do_poll().
*/
void (*vcpu_block)(struct vcpu *);
void (*vcpu_block)(struct vcpu *v);
};

struct hvm_domain {
Expand Down
20 changes: 10 additions & 10 deletions xen/arch/x86/include/asm/hvm/hvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,8 @@ struct hvm_function_table {

void (*fpu_leave)(struct vcpu *v);

int (*get_guest_pat)(struct vcpu *v, u64 *);
int (*set_guest_pat)(struct vcpu *v, u64);
int (*get_guest_pat)(struct vcpu *v, uint64_t *gpat);
int (*set_guest_pat)(struct vcpu *v, uint64_t gpat);

void (*set_tsc_offset)(struct vcpu *v, u64 offset, u64 at_tsc);

Expand Down Expand Up @@ -180,8 +180,8 @@ struct hvm_function_table {
int (*msr_write_intercept)(unsigned int msr, uint64_t msr_content);
void (*handle_cd)(struct vcpu *v, unsigned long value);
void (*set_info_guest)(struct vcpu *v);
void (*set_rdtsc_exiting)(struct vcpu *v, bool_t);
void (*set_descriptor_access_exiting)(struct vcpu *v, bool);
void (*set_rdtsc_exiting)(struct vcpu *v, bool enable);
void (*set_descriptor_access_exiting)(struct vcpu *v, bool enable);

/* Nested HVM */
int (*nhvm_vcpu_initialise)(struct vcpu *v);
Expand Down Expand Up @@ -265,10 +265,10 @@ int hvm_vcpu_cacheattr_init(struct vcpu *v);
void hvm_vcpu_cacheattr_destroy(struct vcpu *v);
void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);

void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);
void hvm_get_guest_pat(struct vcpu *v, uint64_t *guest_pat);
int hvm_set_guest_pat(struct vcpu *v, uint64_t guest_pat);

u64 hvm_get_guest_tsc_fixed(struct vcpu *v, u64 at_tsc);
uint64_t hvm_get_guest_tsc_fixed(struct vcpu *v, uint64_t at_tsc);

u64 hvm_scale_tsc(const struct domain *d, u64 tsc);
u64 hvm_get_tsc_scaling_ratio(u32 gtsc_khz);
Expand All @@ -282,7 +282,7 @@ int vmsi_deliver(
uint8_t dest, uint8_t dest_mode,
uint8_t delivery_mode, uint8_t trig_mode);
struct hvm_pirq_dpci;
void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *);
void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *pirq_dpci);
int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode);

enum hvm_intblk
Expand All @@ -309,7 +309,7 @@ int hvm_event_needs_reinjection(uint8_t type, uint8_t vector);

uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2);

void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable);
void hvm_set_rdtsc_exiting(struct domain *d, bool enable);

enum hvm_task_switch_reason { TSW_jmp, TSW_iret, TSW_call_or_int };
void hvm_task_switch(
Expand Down Expand Up @@ -350,7 +350,7 @@ void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent,
bool_t *writable);
void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent);
void hvm_unmap_guest_frame(void *p, bool_t permanent);
void hvm_mapped_guest_frames_mark_dirty(struct domain *);
void hvm_mapped_guest_frames_mark_dirty(struct domain *d);

int hvm_debug_op(struct vcpu *v, int32_t op);

Expand Down
14 changes: 7 additions & 7 deletions xen/arch/x86/include/asm/hvm/irq.h
Original file line number Diff line number Diff line change
Expand Up @@ -160,17 +160,17 @@ struct hvm_pirq_dpci {
struct list_head softirq_list;
};

void pt_pirq_init(struct domain *, struct hvm_pirq_dpci *);
bool pt_pirq_cleanup_check(struct hvm_pirq_dpci *);
void pt_pirq_init(struct domain *d, struct hvm_pirq_dpci *dpci);
bool pt_pirq_cleanup_check(struct hvm_pirq_dpci *dpci);
int pt_pirq_iterate(struct domain *d,
int (*cb)(struct domain *,
struct hvm_pirq_dpci *, void *arg),
int (*cb)(struct domain *d,
struct hvm_pirq_dpci *dpci, void *arg),
void *arg);

#ifdef CONFIG_HVM
bool pt_pirq_softirq_active(struct hvm_pirq_dpci *);
bool pt_pirq_softirq_active(struct hvm_pirq_dpci *pirq_dpci);
#else
static inline bool pt_pirq_softirq_active(struct hvm_pirq_dpci *dpci)
static inline bool pt_pirq_softirq_active(struct hvm_pirq_dpci *pirq_dpci)
{
return false;
}
Expand Down Expand Up @@ -211,6 +211,6 @@ void hvm_assert_evtchn_irq(struct vcpu *v);
void hvm_set_callback_via(struct domain *d, uint64_t via);

struct pirq;
bool hvm_domain_use_pirq(const struct domain *, const struct pirq *);
bool hvm_domain_use_pirq(const struct domain *d, const struct pirq *pirq);

#endif /* __ASM_X86_HVM_IRQ_H__ */
4 changes: 2 additions & 2 deletions xen/arch/x86/include/asm/hvm/save.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ void _hvm_write_entry(struct hvm_domain_context *h,
r; })

/* Unmarshalling: test an entry's size and typecode and record the instance */
int _hvm_check_entry(struct hvm_domain_context *h,
uint16_t type, uint32_t len, bool_t strict_length);
int _hvm_check_entry(struct hvm_domain_context *h,
uint16_t type, uint32_t len, bool strict_length);

/* Unmarshalling: copy the contents in a type-safe way */
void _hvm_read_entry(struct hvm_domain_context *h,
Expand Down
2 changes: 1 addition & 1 deletion xen/arch/x86/include/asm/hvm/support.h
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ int hvm_descriptor_access_intercept(uint64_t exit_info,
unsigned int descriptor, bool is_write);
int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
void hvm_ud_intercept(struct cpu_user_regs *);
void hvm_ud_intercept(struct cpu_user_regs *regs);

/*
* May return X86EMUL_EXCEPTION, at which point the caller is responsible for
Expand Down
2 changes: 1 addition & 1 deletion xen/arch/x86/include/asm/hvm/svm/vmcb.h
Original file line number Diff line number Diff line change
Expand Up @@ -607,7 +607,7 @@ void setup_vmcb_dump(void);
#define MSR_INTERCEPT_READ 1
#define MSR_INTERCEPT_WRITE 2
#define MSR_INTERCEPT_RW (MSR_INTERCEPT_WRITE | MSR_INTERCEPT_READ)
void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable);
void svm_intercept_msr(struct vcpu *v, uint32_t msr, int flags);
#define svm_disable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_NONE)
#define svm_enable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), MSR_INTERCEPT_RW)

Expand Down

0 comments on commit 652d8fb

Please sign in to comment.