summaryrefslogtreecommitdiff
path: root/big-little/switcher/trigger/async_switchover.c
blob: 962735aec502b87a35e3879941b601c86d4fa4e8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
/*
 * Copyright (c) 2012, ARM Limited. All rights reserved.
 *       
 * Redistribution and use in source and binary forms, with
 * or without modification, are permitted provided that the
 * following conditions are met:
 *     
 * Redistributions of source code must retain the above
 * copyright notice, this list of conditions and the 
 * following disclaimer.
 *
 * Redistributions in binary form must reproduce the
 * above copyright notice, this list of conditions and 
 * the following disclaimer in the documentation 
 * and/or other materials provided with the distribution.
 *      
 * Neither the name of ARM nor the names of its
 * contributors may be used to endorse or promote products
 * derived from this software without specific prior written
 * permission.                        
 */ 

#include "virt_helpers.h"
#include "misc.h"
#include "stdlib.h"
#include "gic_registers.h"

extern void gic_enable_int(unsigned);
extern void gic_disable_int(unsigned);
extern void gic_send_ipi(unsigned, unsigned);
extern void gic_eoi_int(unsigned);
extern void gic_deactivate_int(unsigned);
extern int __rand_r(struct _rand_state *);
/* 
 * Set of flags used by the interrupt handling code
 * to distinguish between IPIs sent by the big-little
 * code and the payload software.
 * TODO: Assumes only one cpu will send an IPI at a 
 * time rather than multiple cpus sending the same
 * IPI to each other at the same time from within the
 * HYP mode.
 */
static unsigned lock_ipi_check;
static unsigned hyp_ipi_check[16];
static unsigned timer_count;
/* Support for the switchover interval randomly but sanely  */
static unsigned rand_async_switches = RAND_ASYNC;
/* Use HYP timer for async switches  */
unsigned hyp_timer_trigger = USE_HYP_TIMERS;

/*
 * Returns the id of the first IPI that is not pending on
 * our cpu interface or the first IPI that is pending but
 * was not generated by us. Returns 16 if no such IPI is
 * found
 */
static unsigned get_free_ipi(void)
{
        unsigned ctr, shift, cpu_if_bit, cpu_id = read_cpuid(), cluster_id =
                read_clusterid();

        cpu_if_bit = 1 << get_cpuif(cluster_id, cpu_id);

        /* Find the register offset */
        for (ctr = 0; ctr < 4; ctr++)
                /* Check whether IPI<shift> has already been generated by us */
                for (shift = 0; shift < 4; shift++) {
                        if (read32
                            (GIC_ID_PHY_BASE + GICD_SPENDSGIR +
                             (ctr << 2)) & (cpu_if_bit << (shift << 3)))
                                continue;

                        return (ctr << 2) + shift;
                }

        return 16;
}

static void ack_trigger(void)
{
        unsigned ctl = 0;

        ctl = read_cnthp_ctl();
        if (ctl & TIMER_IRQ_STAT) {
                /* Disable timer and mask interrupt */
                write_cnthp_ctl(TIMER_MASK_IRQ);
        } else {
                printf("Spurious HYP timer irq \n");
                panic();
        }

        return;
}

/*
 * Broadcast first available IPI so that all cpus can start switching to 
 * the other cluster.
 */
void signal_switchover(void)
{
        unsigned ipi_no = 0x0;

        /* If x is the no. of cpus then corresponding mask would be (1 << x) - 1 */
        unsigned cpu_mask = (1 << (num_secondaries() + 1)) - 1;
        /*
         * Map the target cpuids to their cpu interfaces as the 1:1 mapping
         * no longer exists with the external vGIC.
         */
        unsigned cpuif_mask = get_cpuif_mask(cpu_mask);

        /*
         * Send an ipi to all the cpus in the cluster including ourselves
         * to start a switch to the inbound cluster. First choose a non-
         * pending IPI to avoid a clash with the OS.
         */
        ipi_no = get_free_ipi();

        /* 
         * For this IPI set the mask in our global variable. We do it, payload software
         * does not. But, first check whether any earlier IPIs have already been acked
         */
        while (hyp_ipi_check[ipi_no]) ;
        spin_lock(&lock_ipi_check);
        hyp_ipi_check[ipi_no] = cpuif_mask;
        dsb();
        spin_unlock(&lock_ipi_check);

        /* Send the IPI to the cpu_mask */
        gic_send_ipi(cpuif_mask, ipi_no);

        return;
}

unsigned check_switchover_ipi(unsigned cpu_if, unsigned ipi_no)
{
        unsigned rc = FALSE;

        spin_lock(&lock_ipi_check);
        /*
         * If this IPI was sent by the big-little code then our cpu_if bit must have
         * been set in the ipi_check flag. Reset the bit an indicate that its an
         * internal IPI.
         */
        if (hyp_ipi_check[ipi_no] & (1 << cpu_if)) {
                rc = TRUE;
                hyp_ipi_check[ipi_no] &= ~(1 << cpu_if);
                dsb();
        }
        spin_unlock(&lock_ipi_check);

        return rc;
}

unsigned check_trigger(unsigned int_id, unsigned int_ack)
{
        unsigned cpuid = read_cpuid();
        unsigned platform = (read32(KFSCB_BASE + KFS_ID) >> 20) & 0xf;

        /*
         * If we are not using HYP mode timers for triggering a switchover
         * then check whether this is a suitable local timer interrupt to
         * switch
         */
        if (hyp_timer_trigger == FALSE) {
                /*
                 * We need to hijack every 128th timer interrupt on cpu0 and
                 * use it as a stimulus to switchover
                 */
                if (cpuid == 0 && int_id == LCL_TIMER_IRQ)
                        timer_count++;

                if (timer_count & LCL_TIMER_FREQ)
                        return FALSE;
        }
        /*
         * Trigger a switchover upon getting a HYP timer IRQ. Its
         * targetted only to cpu0.
         */
        else if (int_id != HYP_TIMER_IRQ)
                return FALSE;

        /*
         * Do the needful now that it is confirmed that we need to move
         * to the other cluster
         */

        /* Indicator on emulation that switches are actually taking place */
        if (platform != 0x1) 
                printf("%d", read_clusterid());

        /*
         * Send an IPI to all the cores in this cluster to start
         * a switchover.
         */
        signal_switchover();

        if (hyp_timer_trigger)
                ack_trigger();
        else
                /* 
                 * Complete handling of the local timer interrupt at the physical gic
                 * level. Its disabled as its level triggerred and will reassert as 
                 * soon as we leave this function since its not been cleared at the 
                 * peripheral just yet. The local timer context is saved and this irq
                 * cleared in "save_hyp_context". The interrupt is enabled then.
                 */
                gic_disable_int(int_id);

        /* Finish handling this interrupt */
        gic_eoi_int(int_ack);
        if (read32(GIC_IC_PHY_BASE + GICC_CTL) & 0x200)
                gic_deactivate_int(int_ack);

        return TRUE;
}

void keep_trigger_alive(void)
{
        /*
         * The OS might have disabled the HYP timer interrupt
         * while setting up its view of the vGIC. So enable
         * it if disabled upon receiving any other interrupt.
         * Better than virtualising vGIC accesses on the TARGET
         * CPU.
         */
        if (hyp_timer_trigger)
                if (!
                    (read32(GIC_ID_PHY_BASE + GICD_ENABLESET) &
                     (1 << HYP_TIMER_IRQ)))
                        gic_enable_int(HYP_TIMER_IRQ);

        return;
}

void enable_trigger(unsigned tval)
{
        unsigned ctl = TIMER_ENABLE;
        unsigned platform = read32((KFSCB_BASE + KFS_ID) >> 20) & 0xf;

        /* 
         * No need to lock this as its accessed by only one cpu
         * per cluster and that too one at a time.
         */
        static unsigned int rand_no = 0xdeadbeef;
        static struct _rand_state buffer;

        /*
         * Nothing needs to be done if physical local timers
         * are being used for doing a switchover.
         */
        if (hyp_timer_trigger == TRUE) {
                if (rand_async_switches) {
                        _srand_r(&buffer, rand_no);
                        rand_no = (unsigned) _rand_r(&buffer);
                }

                /* Enable timer and unmask interrupt */
                write_cnthp_ctl(ctl);

                if (rand_async_switches) {
                        unsigned interval;

                        /* 
                         * TODO: Assuming that the tval is always 12000000
                         * Increment or decrement the timer value randomly
                         * but never by more than a factor of 10
                         */
                        if (rand_no % 2)
                                interval = tval * (rand_no % 10);
                        else
                                interval = tval / (rand_no % 10);

                        write_cnthp_tval(interval);
                                
                } else {
                        /* 
                         * Program the timer to fire every 12000000 instructions
                         * on the FastModel while 1500000 cycles on the Emulator
                         */
                        if (platform == 0x1)
                                write_cnthp_tval(tval);
                        else
                                write_cnthp_tval(tval >> 3);
                }

                gic_enable_int(HYP_TIMER_IRQ);
        }

        return;
}