I'm trying to write a minor lib that would enable me to measure average time measured when using struct timespec
. Since C doesn't allow operator overloading, I'm forced to build functions. I am aware that checks like end_time < start_time
are not done, but I've left them out to achieve some kind of shortness in the code. The accuracy of the calculations is primary goal.
enum { NS_PER_SECOND = 1000000000L };
enum { MAX_NS = 999999999 };
// t1 --> start time
// t2 --> end time
// set t1 and t2 by:
// clock_gettime(CLOCK_REALTIME, &t1);
struct timespec get_timespec_diff(const struct timespec t1, const struct timespec t2)
{
struct timespec t;
t.tv_nsec = t2.tv_nsec - t1.tv_nsec;
t.tv_sec = t2.tv_sec - t1.tv_sec;
if (t.tv_sec > 0 && t.tv_nsec < 0) {
t.tv_nsec += NS_PER_SECOND;
t.tv_sec--;
} else if (t.tv_sec < 0 && t.tv_nsec > 0) {
t.tv_nsec -= NS_PER_SECOND;
t.tv_sec++;
}
return t;
}
struct timespec timespec_add(const struct timespec t1, const struct timespec t2) {
unsigned int total_ns = t1.tv_nsec + t2.tv_nsec;
unsigned int total_s = t1.tv_sec + t2.tv_sec;
while(total_ns > MAX_NS) {
total_ns -= (unsigned)(MAX_NS + 1);
total_s++;
}
struct timespec ret;
ret.tv_sec = total_s;
ret.tv_nsec = total_ns;
return ret;
}
struct timespec timespec_avg(const struct timespec t, const unsigned n) {
struct timespec ret;
ret.tv_sec = 0;
ret.tv_nsec = 0;
unsigned int total_ns = t.tv_nsec + (t.tv_sec * (MAX_NS+1));
unsigned int avg_ns = total_ns/n;
while(avg_ns > MAX_NS) {
ret.tv_sec++;
avg_ns /= 10;
}
ret.tv_nsec = avg_ns;
return ret;
}
2 Answers 2
You need to #include <time.h>
for struct timespec
to be defined.
Why are NS_PER_SECOND
and MAX_NS
enum members? There's no advantage compared to plain old const long
. I'm not sure why we need two constants anyway; the code that uses MAX_NS
is simplified if we use NS_PER_SECOND
instead:
long total_ns = t1.tv_nsec + t2.tv_nsec;
time_t total_s = t1.tv_sec + t2.tv_sec;
while (total_ns >= NS_PER_SECOND) {
total_ns -= NS_PER_SECOND;
++total_s;
}
(Note the use of the correct types in this version).
I think this logic is wrong:
if (t.tv_sec > 0 && t.tv_nsec < 0) {
t.tv_nsec += NS_PER_SECOND;
t.tv_sec--;
} else if (t.tv_sec < 0 && t.tv_nsec > 0) {
t.tv_nsec -= NS_PER_SECOND;
t.tv_sec++;
}
I'm reasonably sure that tv_nsec
should always be positive, even when tv_sec
is negative. So we can simplify that to just:
if (t.tv_nsec < 0) {
t.tv_nsec += NS_PER_SECOND;
t.tv_sec--;
}
Note that the logic in timespec_add()
already assumes positive tv_nsec
.
This is highly susceptible to overflow:
unsigned int total_ns = t.tv_nsec + (t.tv_sec * (MAX_NS+1));
The whole reason we have struct timespec
is that we might need to represent values outside the range of the integer types. Probably better to use divmod
to divide tv_nsec
by n
, and add the remainder to nsec
before dividing - we need to be very careful here to avoid overflow. Again, an unsigned type is inconsistent with other code.
Modified code
Here's my version of these functions:
#include <time.h>
const long NS_PER_SECOND = 1000000000L;
struct timespec timespec_sub(const struct timespec t1, const struct timespec t2)
{
struct timespec t;
t.tv_nsec = t2.tv_nsec - t1.tv_nsec;
t.tv_sec = t2.tv_sec - t1.tv_sec;
if (t.tv_nsec < 0) {
t.tv_nsec += NS_PER_SECOND;
t.tv_sec--;
}
return t;
}
struct timespec timespec_add(const struct timespec t1, const struct timespec t2)
{
struct timespec t = { t1.tv_sec + t2.tv_sec, t1.tv_nsec + t2.tv_nsec };
if (t.tv_nsec >= NS_PER_SECOND) {
t.tv_nsec -= NS_PER_SECOND;
t.tv_sec++;
}
return t;
}
struct timespec timespec_divide(struct timespec t, const int n)
{
time_t remainder_secs = t.tv_sec % n;
t.tv_sec /= n;
t.tv_nsec /= n;
t.tv_nsec +=
remainder_secs * (NS_PER_SECOND / n) +
remainder_secs * (NS_PER_SECOND % n) / n;
while (t.tv_nsec >= NS_PER_SECOND) {
t.tv_nsec -= NS_PER_SECOND;
t.tv_sec++;
}
return t;
}
And a primitive unit-test:
int main(void)
{
const struct timespec a = { 1, 905234817 };
struct timespec a_2 = timespec_add(a, a);
struct timespec a_4 = timespec_add(a_2, a_2);
struct timespec a_5 = timespec_add(a_4, a);
struct timespec z = timespec_sub(a, timespec_divide(a_5, 5));
return z.tv_sec || z.tv_nsec;
}
You should expand on the testing, to prove correctness for the tricky cases where overflow could happen.
Further simplification
We can separate out the code to normalise out-of-range nanoseconds into its own function:
struct timespec timespec_normalise(struct timespec t)
{
t.tv_sec += t.tv_nsec / NS_PER_SECOND;
if ((t.tv_nsec %= NS_PER_SECOND) < 0) {
/* division rounds towards zero, since C99 */
t.tv_nsec += NS_PER_SECOND;
--t.tv_sec;
}
return t;
}
Then the functions can use that, to make them shorter and simpler:
struct timespec timespec_sub(const struct timespec t1, const struct timespec t2)
{
struct timespec t = { t2.tv_nsec - t1.tv_nsec, t2.tv_sec - t1.tv_sec };
return timespec_normalise(t);
}
struct timespec timespec_add(const struct timespec t1, const struct timespec t2)
{
struct timespec t = { t1.tv_sec + t2.tv_sec, t1.tv_nsec + t2.tv_nsec };
return timespec_normalise(t);
}
struct timespec timespec_divide(struct timespec t, const int n)
{
time_t remainder_secs = t.tv_sec % n;
t.tv_sec /= n;
t.tv_nsec /= n;
t.tv_nsec +=
remainder_secs * (NS_PER_SECOND / n) +
remainder_secs * (NS_PER_SECOND % n) / n;
return timespec_normalise(t);
}
Because we have the unit tests, we have high confidence that we haven't affected the functionality here.
-
1\$\begingroup\$
timespec_divide()
has problems.time_t
may be floating point sot.tv_sec % n
does not certainly compile.remainder_secs * (NS_PER_SECOND % n)
subject to integer overflow whenn > LONG_MAX/n
. Whenn < 0
, additional issues. \$\endgroup\$chux– chux2020年11月29日 13:07:02 +00:00Commented Nov 29, 2020 at 13:07 -
\$\begingroup\$ I had thought that
time_t
needed to be integer; forgotten that wasn't required. Yes, the divide still has problems; thanks for showing that. Unfortunately no time to fix that now (it's good to show how tricky this stuff can be!) \$\endgroup\$Toby Speight– Toby Speight2020年11月29日 22:35:01 +00:00Commented Nov 29, 2020 at 22:35 -
\$\begingroup\$ The problems with larger
n
could probably help inform some additional unit tests... \$\endgroup\$Toby Speight– Toby Speight2020年11月30日 11:03:37 +00:00Commented Nov 30, 2020 at 11:03
time_t
assumptions, not specifications
OP's code assumes 1) time_t
is an integer type. It could be floating point. 2) unsigned/int
are not 16-bit - they could be.
Writing portable time code is difficult due to #1. Rest of answer will assume #1.
normal range
The normal ranges are .tv_sec >= 0
and 0 <= .tv_nsec <= 999999999
. Useful is code does not rely on that too much.
Overflow risk
In timespec_add()
, t1.tv_sec + t2.tv_sec
risks overflow and thus undefined behavior.
unsigned int total_ns = t1.tv_nsec + t2.tv_nsec;
can readily fail to provide correct results when unsigned
is 16-bit - best to use long
.
Strange name
timespec_avg(const struct timespec t, const unsigned n)
looks more like a divide than an average of time.
Alternate suggestion:
#include <time.h>
#define NS_PER_SECOND 1000000000
// Averaging without overflow, even if `t1,t2` outside normal range
struct timespec timespec_avg(struct timespec t1, struct timespec t2) {
struct timespec avg;
int remainder = t1.tv_sec % 2 + t2.tv_sec % 2;
avg.tv_sec = t1.tv_sec / 2 + t1.tv_sec / 2;
avg.tv_nsec = t1.tv_nsec / 2 + t1.tv_nsec / 2 + (t1.tv_nsec % 2 + t1.tv_nsec % 2) / 2;
avg.tv_sec += avg.tv_nsec / NS_PER_SECOND;
avg.tv_nsec = avg.tv_nsec % NS_PER_SECOND + (NS_PER_SECOND / 2 * remainder);
if (avg.tv_nsec >= NS_PER_SECOND) {
avg.tv_nsec -= NS_PER_SECOND;
avg.tv_sec++;
} else if (avg.tv_nsec < 0) {
avg.tv_nsec += NS_PER_SECOND;
avg.tv_sec--;
}
return avg;
}