forked from mtalexander/hercules-390
-
Notifications
You must be signed in to change notification settings - Fork 0
/
clock.c
1243 lines (983 loc) · 34.1 KB
/
clock.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* CLOCK.C (c) Copyright Jan Jaeger, 2000-2012 */
/* TOD Clock functions */
/* The emulated hardware clock is based on the host clock, adjusted */
/* by means of an offset and a steering rate. */
/*
Hercules Clock and Timer Formats
64-bit Clock/Timer Format
+------------------------------+--+
| | |
+------------------------------+--+
0 59 63
128-bit Clock Format
+------------------------------+--+------------------------------+
| | | |
+------------------------------+--+------------------------------+
0 59 63 127
where:
- Bit-59 represents one microsecond
- Bit-63 represents 62.5 nanoseconds
Usage Notes:
- Bits 0-63 of the 64-bit clock format are identical to bits 0-63
of the 128-bit clock format
- The 128-bit clock format extends the 64-bit clock format by an
additional 64-bits to the right (low-order) of the 64-bit clock
format.
- Hercules timers only use the 64-bit clock/timer format.
- The Hercules clock format has a period of over 36,533 years.
- With masking, the 128-bit clock format may be used for extended TOD clock
operations.
- The ETOD2TOD() call may be used to convert a Hercules 128-bit clock value
to a standard TOD clock value.
*/
#include "hstdinc.h"
#if !defined(_HENGINE_DLL_)
#define _HENGINE_DLL_
#endif
#include "hercules.h"
#include "opcode.h"
#include "inline.h"
#include "sr.h"
#if !defined(_CLOCK_C_)
#define _CLOCK_C_
#include "clock.h"
/*----------------------------------------------------------------------------*/
/* host_ETOD - Primary high-resolution clock fetch and conversion */
/*----------------------------------------------------------------------------*/
ETOD*
host_ETOD (ETOD* ETOD)
{
struct timespec time;
/* Should use CLOCK_MONOTONIC + adjustment, but host sleep/hibernate
* destroys consistent monotonic clock.
*/
clock_gettime(CLOCK_REALTIME, &time);
timespec2ETOD(ETOD, &time);
return ( ETOD ); /* Return address of result */
}
// static int clock_state = CC_CLOCK_SET;
static CSR episode_old;
static CSR episode_new;
static CSR *episode_current = &episode_new;
void csr_reset()
{
episode_new.start_time = 0;
episode_new.base_offset = 0;
episode_new.fine_s_rate = 0;
episode_new.gross_s_rate = 0;
episode_current = &episode_new;
episode_old = episode_new;
}
static ETOD universal_tod;
static TOD universal_clock(void) /* really: any clock used as a base */
{
host_ETOD(&universal_tod);
return (universal_tod.high);
}
static TOD universal_clock_extended(ETOD* ETOD)
{
host_ETOD(ETOD);
return (ETOD->high);
}
/* The hercules hardware clock, based on the universal clock, but */
/* running at its own speed as optionally set by set_tod_steering() */
/* The hardware clock returns a unique value */
static double hw_steering = 0.0; /* Current TOD clock steering rate */
static TOD hw_episode; /* TOD of start of steering episode */
static S64 hw_offset = 0; /* Current offset between TOD and HW */
// static ETOD hw_tod = {0, 0}; /* Globally defined in clock.h */
static ETOD hw_unique_clock_tick = {0, 0};
static TOD hw_adjust(TOD base_tod);
static TOD
hw_calculate_unique_tick (void)
{
static const ETOD m1 = ETOD_init(0,65536);
ETOD temp;
register TOD result;
register int n;
temp.high = universal_tod.high;
temp.low = universal_tod.low;
hw_unique_clock_tick.low = 1;
for (n = 0; n < 65536; ++n)
result = hw_adjust(universal_clock());
ETOD_sub(&temp, universal_tod, temp);
ETOD_sub(&temp, temp, m1);
ETOD_shift(&hw_unique_clock_tick, temp, 16);
if (hw_unique_clock_tick.low == 0 &&
hw_unique_clock_tick.high == 0)
hw_unique_clock_tick.high = 1;
#if defined(TOD_95BIT_PRECISION) || \
defined(TOD_64BIT_PRECISION) || \
defined(TOD_MIN_PRECISION)
else
{
static const ETOD adj =
#if defined(TOD_95BIT_PRECISION)
ETOD_init(0,0x0000000100000000ULL);
#else
ETOD_init(0,0x8000000000000000ULL);
#endif
ETOD_add(&hw_unique_clock_tick, hw_unique_clock_tick, adj);
#if defined(TOD_95BIT_PRECISION)
hw_unique_clock_tick.low &= 0xFFFFFFFE00000000ULL;
#else
hw_unique_clock_tick.low = 0;
#endif
}
#endif
return ( result );
}
static TOD hw_adjust(TOD base_tod)
{
/* Apply hardware offset, this is the offset achieved by all
previous steering episodes */
base_tod += hw_offset;
/* Apply the steering offset from the current steering episode */
/* TODO: Shift resolution to permit adjustment by less than 62.5
* nanosecond increments (1/16 microsecond).
*/
base_tod += (S64)(base_tod - hw_episode) * hw_steering;
/* Ensure that the clock returns a unique value */
if (hw_tod.high < base_tod)
hw_tod.high = base_tod,
hw_tod.low = universal_tod.low;
else if (hw_unique_clock_tick.low == 0 &&
hw_unique_clock_tick.high == 0)
hw_calculate_unique_tick();
else
ETOD_add(&hw_tod, hw_tod, hw_unique_clock_tick);
return ( hw_tod.high );
}
static TOD hw_clock_l(void)
{
/* Get time of day (GMT); adjust speed and ensure uniqueness */
return ( hw_adjust(universal_clock()) );
}
TOD hw_clock(void)
{
register TOD temp_tod;
obtain_lock(&sysblk.todlock);
/* Get time of day (GMT); adjust speed and ensure uniqueness */
temp_tod = hw_clock_l();
release_lock(&sysblk.todlock);
return (temp_tod);
}
/* set_tod_steering(double) sets a new steering rate. */
/* When a new steering episode begins, the offset is adjusted, */
/* and the new steering rate takes effect */
void set_tod_steering(const double steering)
{
obtain_lock(&sysblk.todlock);
/* Get current offset between hw_adjust and universal TOD value */
hw_offset = hw_clock_l() - universal_tod.high;
hw_episode = hw_tod.high;
hw_steering = steering;
release_lock(&sysblk.todlock);
}
/* Start a new episode */
static INLINE void start_new_episode()
{
hw_offset = hw_tod.high - universal_tod.high;
hw_episode = hw_tod.high;
episode_new.start_time = hw_episode;
/* TODO: Convert to binary arithmetic to avoid floating point conversions */
hw_steering = ldexp(2,-44) *
(S32)(episode_new.fine_s_rate + episode_new.gross_s_rate);
episode_current = &episode_new;
}
/* Prepare for a new episode */
static INLINE void prepare_new_episode()
{
if(episode_current == &episode_new)
{
episode_old = episode_new;
episode_current = &episode_old;
}
}
/* Ajust the epoch for all active cpu's in the configuration */
static U64 adjust_epoch_cpu_all(const U64 epoch)
{
int cpu;
/* Update the TOD clock of all CPU's in the configuration
as we simulate 1 shared TOD clock, and do not support the
TOD clock sync check */
for (cpu = 0; cpu < sysblk.maxcpu; cpu++)
{
obtain_lock(&sysblk.cpulock[cpu]);
if (IS_CPU_ONLINE(cpu))
sysblk.regs[cpu]->tod_epoch = epoch;
release_lock(&sysblk.cpulock[cpu]);
}
return epoch;
}
double get_tod_steering(void)
{
return hw_steering;
}
void set_tod_epoch(const S64 epoch)
{
obtain_lock(&sysblk.todlock);
csr_reset();
tod_epoch = epoch;
release_lock(&sysblk.todlock);
adjust_epoch_cpu_all(epoch);
}
void adjust_tod_epoch(const S64 epoch)
{
obtain_lock(&sysblk.todlock);
csr_reset();
tod_epoch += epoch;
release_lock(&sysblk.todlock);
adjust_epoch_cpu_all(tod_epoch);
}
void set_tod_clock(const U64 tod)
{
set_tod_epoch(tod - hw_clock());
}
S64 get_tod_epoch()
{
return tod_epoch;
}
static void set_gross_steering_rate(const S32 gsr)
{
obtain_lock(&sysblk.todlock);
prepare_new_episode();
episode_new.gross_s_rate = gsr;
release_lock(&sysblk.todlock);
}
static void set_fine_steering_rate(const S32 fsr)
{
obtain_lock(&sysblk.todlock);
prepare_new_episode();
episode_new.fine_s_rate = fsr;
release_lock(&sysblk.todlock);
}
static void set_tod_offset(const S64 offset)
{
obtain_lock(&sysblk.todlock);
prepare_new_episode();
episode_new.base_offset = offset;
release_lock(&sysblk.todlock);
}
static void adjust_tod_offset(const S64 offset)
{
obtain_lock(&sysblk.todlock);
prepare_new_episode();
episode_new.base_offset = episode_old.base_offset + offset;
release_lock(&sysblk.todlock);
}
/* The CPU timer is internally kept as an offset to the thread CPU time
* used in LPAR mode, or TOD clock in BASIC mode. The CPU timer counts
* down as the clock approaches the timer epoch.
*
* To be in agreement with reporting of time in association with real
* diagnose code x'204' for partition management and resource reporting,
* only user time is considered as CPU time used. System time is
* considered to be overhead time of the system (partition overhead or
* management time).
*/
TOD
thread_cputime(const REGS *regs)
{
register TOD result;
struct rusage rusage;
int rc;
rc = getrusage((int)sysblk.cputid[regs->cpuad], &rusage);
if (unlikely(rc == -1))
result = host_tod();
else
result = timeval2etod(&rusage.ru_utime);
return (result);
}
U64
thread_cputime_us(const REGS *regs)
{
U64 result;
struct rusage rusage;
int rc;
rc = getrusage((int)sysblk.cputid[regs->cpuad], &rusage);
if (unlikely(rc == -1))
result = etod2us(host_tod());
else
result = timeval2us(&rusage.ru_utime);
return (result);
}
static INLINE void
set_cpu_timer_epoch(REGS *regs, const TOD epoch)
{
register REGS* guestregs = regs->guestregs;
register REGS* hostregs = regs->hostregs;
/* Set CPU timer epoch */
regs->cpu_timer_epoch = epoch;
/* Reset epoch value for host and guest */
if (hostregs && hostregs != regs)
hostregs->cpu_timer_epoch = epoch;
if (guestregs && guestregs != regs)
guestregs->cpu_timer_epoch = epoch;
}
static INLINE S64
cpu_timer_update(REGS *regs, TOD new_epoch)
{
register S64 interval = (S64)(new_epoch - regs->cpu_timer_epoch);
register S64 result = regs->cpu_timer;
if (interval > 0)
{
result -= interval;
regs->cpu_timer_epoch = new_epoch;
regs->cpu_timer = result;
}
return (result);
}
static INLINE void
cpu_timer_update_linked(REGS *regs, REGS *linked_regs, TOD new_epoch)
{
if (linked_regs && linked_regs != regs)
cpu_timer_update(linked_regs, new_epoch);
}
static INLINE TOD
mode_cputime(REGS *regs)
{
return (regs->cpu_timer_mode ? thread_cputime(regs) : host_tod());
}
static INLINE int
cpu_timer_mode(REGS *regs)
{
return (sysblk.lparmode && !WAITSTATE(®s->psw));
}
void
set_cpu_timer_mode(REGS *regs)
{
int newmode = cpu_timer_mode(regs);
/* Update CPU timer epoch if changing mode */
if ((U32)newmode != regs->cpu_timer_mode)
{
cpu_timer(regs);
regs->cpu_timer_mode = newmode;
set_cpu_timer_epoch(regs, mode_cputime(regs));
}
}
S64 set_cpu_timer(REGS *regs, const TOD timer)
{
regs->cpu_timer_mode = cpu_timer_mode(regs);
/* Prepare new timer value and epoch value */
regs->cpu_timer = tod2etod(timer);
set_cpu_timer_epoch(regs, mode_cputime(regs));
return (timer);
}
void set_cpu_timers(REGS *hostregs, const TOD host_timer, REGS *regs, const TOD timer)
{
set_cpu_timer(hostregs, host_timer);
if (regs != hostregs)
{
regs->cpu_timer = tod2etod(timer);
regs->cpu_timer_epoch = hostregs->cpu_timer_epoch;
}
}
void save_cpu_timers(REGS *hostregs, TOD *host_timer, REGS *regs, TOD *timer)
{
*host_timer = cpu_timer(hostregs);
*timer = (regs == hostregs) ?
*host_timer : (TOD)cpu_timer_SIE(regs);
}
S64 cpu_timer(REGS *regs)
{
register TOD new_epoch = mode_cputime(regs);
register U64 new_epoch_us = etod2us(new_epoch);
register S64 result;
/* If no change from epoch, don't bother updating */
if (new_epoch <= regs->cpu_timer_epoch)
{
result = regs->cpu_timer;
}
/* If CPU is stopped, return the CPU timer without updating */
else if (regs->cpustate == CPUSTATE_STOPPED)
{
result = regs->cpu_timer;
/* Update base CPU time epoch */
regs->bcputime = new_epoch_us;
/* Update CPU timer epoch */
set_cpu_timer_epoch(regs, new_epoch);
}
else /* Update and return the CPU timer */
{
/* Update real CPU time used and base CPU time epoch */
regs->rcputime += new_epoch_us - regs->bcputime;
regs->bcputime = new_epoch_us;
/* Process CPU timers */
result = cpu_timer_update(regs, new_epoch);
cpu_timer_update_linked(regs, regs->hostregs, new_epoch);
cpu_timer_update_linked(regs, regs->guestregs, new_epoch);
}
/* Change from ETOD format to TOD format */
result = (S64)etod2tod(result);
return (result);
}
S64 cpu_timer_SIE(REGS *regs)
{
register S64 result;
/* Ensure SIE guestregs are in use */
if (regs == regs->hostregs)
regs = regs->guestregs;
result = etod2tod(regs->cpu_timer);
return (result);
}
TOD etod_clock(REGS *regs, ETOD* ETOD, ETOD_format format)
{
/* STORE CLOCK and STORE CLOCK EXTENDED values must be in ascending
* order for comparison. Consequently, swap delays for a subsequent
* STORE CLOCK, STORE CLOCK EXTENDED, or TRACE instruction may be
* introduced when a STORE CLOCK value is advanced due to the use of
* the CPU address in bits 66-71.
*
* If the regs pointer is null, then the request is a raw request,
* and the format operand should specify ETOD_raw or ETOD_fast. For
* raw and fast requests, the CPU address is not inserted into the
* returned value.
*
* A spin loop is used for the introduction of the delay, moderated
* by obtaining and releasing of the TOD lock. This permits raw and
* fast clock requests to complete without additional delay.
*/
U64 high;
U64 low;
U8 swapped = 0;
do
{
obtain_lock(&sysblk.todlock);
high = hw_clock_l();
low = hw_tod.low;
/* If we are in the old episode, and the new episode has arrived
* then we must take action to start the new episode.
*/
if (episode_current == &episode_old)
start_new_episode();
/* Set the clock to the new updated value with offset applied */
high += episode_current->base_offset;
/* Place CPU stamp into clock value for Standard and Extended
* formats (raw or fast requests fall through)
*/
if (regs && format >= ETOD_standard)
{
register U64 cpuad;
register U64 amask;
register U64 lmask;
/* Set CPU address masks */
if (sysblk.maxcpu <= 64)
amask = 0x3F, lmask = 0xFFFFFFFFFFC00000ULL;
else if (sysblk.maxcpu <= 128)
amask = 0x7F, lmask = 0xFFFFFFFFFF800000ULL;
else /* sysblk.maxcpu <= 256) */
amask = 0xFF, lmask = 0xFFFFFFFFFF000000ULL;
/* Clean CPU address */
cpuad = (U64)regs->cpuad & amask;
switch (format)
{
/* Standard TOD format */
case ETOD_standard:
low &= lmask << 40;
low |= cpuad << 56;
break;
/* Extended TOD format */
case ETOD_extended:
low &= lmask;
low |= cpuad << 16;
if (low == 0)
low = (amask + 1) << 16;
low |= regs->todpr;
break;
default:
ASSERT(0); /* unexpected */
break;
}
}
if (/* New clock value > Old clock value */
high > tod_value.high ||
(high == tod_value.high &&
low > tod_value.low) ||
/* or Clock Wrap */
unlikely(unlikely((tod_value.high & 0x8000000000000000ULL) == 0x8000000000000000ULL &&
( high & 0x8000000000000000ULL) == 0)))
{
tod_value.high = high;
tod_value.low = low;
swapped = 1;
}
else if (format <= ETOD_fast)
{
high = tod_value.high;
low = tod_value.low;
swapped = 1;
}
if (swapped)
{
ETOD->high = high += regs->tod_epoch;
ETOD->low = low;
}
release_lock(&sysblk.todlock);
} while (!swapped);
return ( high );
}
TOD
tod_clock (REGS* regs)
{
ETOD ETOD;
return ( etod_clock(regs, &ETOD, ETOD_fast) );
}
#if defined(_FEATURE_INTERVAL_TIMER)
#if defined(_FEATURE_ECPSVM)
static INLINE S32 ecps_vtimer(const REGS *regs)
{
return (S32)TOD_TO_ITIMER((S64)(regs->ecps_vtimer - hw_clock()));
}
static INLINE void set_ecps_vtimer(REGS *regs, const S32 vtimer)
{
regs->ecps_vtimer = (U64)(hw_clock() + ITIMER_TO_TOD(vtimer));
regs->ecps_oldtmr = vtimer;
}
#endif /*defined(_FEATURE_ECPSVM)*/
static INLINE S32 int_timer(const REGS *regs)
{
return (S32)TOD_TO_ITIMER((S64)(regs->int_timer - hw_clock()));
}
void set_int_timer(REGS *regs, const S32 itimer)
{
regs->int_timer = (U64)(hw_clock() + ITIMER_TO_TOD(itimer));
regs->old_timer = itimer;
}
int chk_int_timer(REGS *regs)
{
S32 itimer;
int pending = 0;
itimer = int_timer(regs);
if(itimer < 0 && regs->old_timer >= 0)
{
ON_IC_ITIMER(regs);
pending = 1;
regs->old_timer=itimer;
}
#if defined(_FEATURE_ECPSVM)
if(regs->ecps_vtmrpt)
{
itimer = ecps_vtimer(regs);
if(itimer < 0 && regs->ecps_oldtmr >= 0)
{
ON_IC_ECPSVTIMER(regs);
pending += 2;
}
}
#endif /*defined(_FEATURE_ECPSVM)*/
return pending;
}
#endif /*defined(_FEATURE_INTERVAL_TIMER)*/
/*
* is_leapyear ( year )
*
* Returns:
*
* 0 - Specified year is NOT a leap year
* 1 - Specified year is a leap year
*
*
* Algorithm:
*
* if year modulo 400 is 0 then
* is_leap_year
* else if year modulo 100 is 0 then
* not_leap_year
* else if year modulo 4 is 0 then
* is_leap_year
* else
* not_leap_year
*
*
* Notes and Restrictions:
*
* 1) In reality, only valid for years 1582 and later. 1582 was the
* first year of Gregorian calendar; actual years are dependent upon
* year of acceptance by any given government and/or agency. For
* example, Britain and the British empire did not adopt the
* calendar until 1752; Alaska did not adopt the calendar until
* 1867.
*
* 2) Minimum validity period for algorithm is 3,300 years after 1582
* (4882), at which point the calendar may be off by one full day.
*
* 3) Most likely invalid for years after 8000 due to unpredictability
* in the earth's long-time rotational changes.
*
* 4) For our purposes, year zero is treated as a leap year.
*
*
* References:
*
* http://scienceworld.wolfram.com/astronomy/LeapYear.html
* http://www.timeanddate.com/date/leapyear.html
* http://www.usno.navy.mil/USNO/astronomical-applications/
* astronomical-information-center/leap-years
* http://en.wikipedia.org/wiki/Leap_year
* http://en.wikipedia.org/wiki/0_(year)
* http://en.wikipedia.org/wiki/1_BC
* http://en.wikipedia.org/wiki/Proleptic_calendar
* http://en.wikipedia.org/wiki/Proleptic_Julian_calendar
* http://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar
* http://dotat.at/tmp/ISO_8601-2004_E.pdf
* http://tools.ietf.org/html/rfc3339
*
*/
static INLINE unsigned int
is_leapyear ( const unsigned int year )
{
return (year % 4 == 0 && (year % 100 != 0 || year % 400 == 0));
}
static INLINE S64 lyear_adjust(const int epoch)
{
int year, leapyear;
TOD tod = hw_clock();
if(tod >= TOD_YEAR)
{
tod -= TOD_YEAR;
year = (tod / TOD_4YEARS * 4) + 1;
tod %= TOD_4YEARS;
if((leapyear = tod / TOD_YEAR) == 4)
year--;
year += leapyear;
}
else
year = 0;
if(epoch > 0)
return ( ((!is_leapyear(year)) && (((year % 4) - (epoch % 4)) <= 0)) ? -TOD_DAY : 0 );
else
return ( ((is_leapyear(year) && (-epoch % 4) != 0) || ((year % 4) + (-epoch % 4) > 4)) ? TOD_DAY : 0 );
}
int default_epoch = 1900;
int default_yroffset = 0;
int default_tzoffset = 0;
static INLINE void configure_time()
{
int epoch;
S64 ly1960;
/* Set up the system TOD clock offset: compute the number of
* microseconds offset to 0000 GMT, 1 January 1900.
*/
if( (epoch = default_epoch) == 1960 )
ly1960 = ETOD_DAY;
else
ly1960 = 0;
epoch -= 1900 + default_yroffset;
set_tod_epoch(((epoch*365+(epoch/4))*-ETOD_DAY)+lyear_adjust(epoch)+ly1960);
/* Set the timezone offset */
adjust_tod_epoch((((default_tzoffset / 100) * 60) + /* Hours -> Minutes */
(default_tzoffset % 100)) * /* Minutes */
ETOD_MIN); /* Convert to ETOD format */
}
/*-------------------------------------------------------------------*/
/* epoch 1900|1960 */
/*-------------------------------------------------------------------*/
int configure_epoch(int epoch)
{
if(epoch != 1900 && epoch != 1960)
return -1;
default_epoch = epoch;
configure_time();
return 0;
}
/*-------------------------------------------------------------------*/
/* yroffset +|-142 */
/*-------------------------------------------------------------------*/
int configure_yroffset(int yroffset)
{
if(yroffset < -142 || yroffset > 142)
return -1;
default_yroffset = yroffset;
configure_time();
return 0;
}
/*-------------------------------------------------------------------*/
/* tzoffset -2359..+2359 */
/*-------------------------------------------------------------------*/
int configure_tzoffset(int tzoffset)
{
if(tzoffset < -2359 || tzoffset > 2359)
return -1;
default_tzoffset = tzoffset;
configure_time();
return 0;
}
/*-------------------------------------------------------------------*/
/* Query current tzoffset value for reporting */
/*-------------------------------------------------------------------*/
int query_tzoffset(void)
{
return default_tzoffset;
}
/*-------------------------------------------------------------------*/
/* Update TOD clock */
/* */
/* This function updates the TOD clock. */
/* */
/* This function is called by timer_update_thread and by cpu_thread */
/* instructions that manipulate any of the timer related entities */
/* (clock comparator, cpu timer and interval timer). */
/* */
/* Internal function `check_timer_event' is called which will signal */
/* any timer related interrupts to the appropriate cpu_thread. */
/* */
/* Callers *must* own the todlock and *must not* own the intlock. */
/* */
/* update_tod_clock() returns the tod delta, by which the cpu timer */
/* has been adjusted. */
/* */
/*-------------------------------------------------------------------*/
// static ETOD tod_value;
TOD update_tod_clock(void)
{
TOD new_clock;
obtain_lock(&sysblk.todlock);
new_clock = hw_clock_l();
/* If we are in the old episode, and the new episode has arrived
then we must take action to start the new episode */
if (episode_current == &episode_old)
start_new_episode();
/* Set the clock to the new updated value with offset applied */
new_clock += episode_current->base_offset;
tod_value.high = new_clock;
tod_value.low = hw_tod.low;
release_lock(&sysblk.todlock);
/* Update the timers and check if either a clock related event has
become pending */
update_cpu_timer();
return new_clock;
}
#define SR_SYS_CLOCK_CURRENT_CSR ( SR_SYS_CLOCK | 0x001 )
#define SR_SYS_CLOCK_UNIVERSAL_TOD ( SR_SYS_CLOCK | 0x002 )
#define SR_SYS_CLOCK_HW_STEERING ( SR_SYS_CLOCK | 0x004 )
#define SR_SYS_CLOCK_HW_EPISODE ( SR_SYS_CLOCK | 0x005 )
#define SR_SYS_CLOCK_HW_OFFSET ( SR_SYS_CLOCK | 0x006 )
#define SR_SYS_CLOCK_OLD_CSR ( SR_SYS_CLOCK | 0x100 )
#define SR_SYS_CLOCK_OLD_CSR_START_TIME ( SR_SYS_CLOCK | 0x101 )
#define SR_SYS_CLOCK_OLD_CSR_BASE_OFFSET ( SR_SYS_CLOCK | 0x102 )
#define SR_SYS_CLOCK_OLD_CSR_FINE_S_RATE ( SR_SYS_CLOCK | 0x103 )
#define SR_SYS_CLOCK_OLD_CSR_GROSS_S_RATE ( SR_SYS_CLOCK | 0x104 )
#define SR_SYS_CLOCK_NEW_CSR ( SR_SYS_CLOCK | 0x200 )
#define SR_SYS_CLOCK_NEW_CSR_START_TIME ( SR_SYS_CLOCK | 0x201 )
#define SR_SYS_CLOCK_NEW_CSR_BASE_OFFSET ( SR_SYS_CLOCK | 0x202 )
#define SR_SYS_CLOCK_NEW_CSR_FINE_S_RATE ( SR_SYS_CLOCK | 0x203 )
#define SR_SYS_CLOCK_NEW_CSR_GROSS_S_RATE ( SR_SYS_CLOCK | 0x204 )
int clock_hsuspend(void *file)
{
int i;
char buf[SR_MAX_STRING_LENGTH];
i = (episode_current == &episode_new);
SR_WRITE_VALUE(file, SR_SYS_CLOCK_CURRENT_CSR, i, sizeof(i));
SR_WRITE_VALUE(file, SR_SYS_CLOCK_UNIVERSAL_TOD, universal_tod.high, sizeof(universal_tod.high));
MSGBUF(buf, "%f", hw_steering);
SR_WRITE_STRING(file, SR_SYS_CLOCK_HW_STEERING, buf);
SR_WRITE_VALUE(file, SR_SYS_CLOCK_HW_EPISODE, hw_episode, sizeof(hw_episode));
SR_WRITE_VALUE(file, SR_SYS_CLOCK_HW_OFFSET, hw_offset, sizeof(hw_offset));
SR_WRITE_VALUE(file, SR_SYS_CLOCK_OLD_CSR_START_TIME, episode_old.start_time, sizeof(episode_old.start_time));
SR_WRITE_VALUE(file, SR_SYS_CLOCK_OLD_CSR_BASE_OFFSET, episode_old.base_offset, sizeof(episode_old.base_offset));
SR_WRITE_VALUE(file, SR_SYS_CLOCK_OLD_CSR_FINE_S_RATE, episode_old.fine_s_rate, sizeof(episode_old.fine_s_rate));
SR_WRITE_VALUE(file, SR_SYS_CLOCK_OLD_CSR_GROSS_S_RATE, episode_old.gross_s_rate, sizeof(episode_old.gross_s_rate));
SR_WRITE_VALUE(file, SR_SYS_CLOCK_NEW_CSR_START_TIME, episode_new.start_time, sizeof(episode_new.start_time));
SR_WRITE_VALUE(file, SR_SYS_CLOCK_NEW_CSR_BASE_OFFSET, episode_new.base_offset, sizeof(episode_new.base_offset));
SR_WRITE_VALUE(file, SR_SYS_CLOCK_NEW_CSR_FINE_S_RATE, episode_new.fine_s_rate, sizeof(episode_new.fine_s_rate));
SR_WRITE_VALUE(file, SR_SYS_CLOCK_NEW_CSR_GROSS_S_RATE, episode_new.gross_s_rate, sizeof(episode_new.gross_s_rate));
return 0;