Skip to content

leetcode238's Speed

约 3451 字大约 12 分钟

2025-10-17

今天做了道题,力扣的238. Product of Array Except Self,题目大致就是求向量(数组)除每个元素自身外的乘积。一开始我提交了一个用时击败100%用户的答案,但是我认为还有优化空间(主要是内存上的),于是就优化了一下,少开了一个数组。但是发现不但内存消耗几乎没有变化,运行速度还大大降低了。于是我想试图深入一下这个问题。

两个函数的实现

第一个函数是我一开始提交的,速度很快击败100%,就是内存消耗有点大只击败55%。我们暂且将其命名为func1

class Solution {
public:
    vector<int> productExceptSelf(vector<int>& nums) {
        int n = nums.size();
        vector<int> pro(n);
        vector<int> ans(n);
        pro[0] = nums[0];
        for (int i = 1; i < n; i++) {
            pro[i] = pro[i - 1] * nums[i];
        }
        ans[n - 1] = pro[n - 2];
        pro[n - 1] = nums[n - 1];
        for (int i = n - 2; i > 0; i--) {
            ans[i] = pro[i - 1] * pro[i + 1];
            pro[i] = pro[i + 1] * nums[i];
        }
        ans[0] = pro[1];
        return ans;
    }
};

第二个函数是我“优化”后的函数,速度慢些,我们叫他func2

class Solution {
public:
    vector<int> productExceptSelf(vector<int>& nums) {
        int n = nums.size();
        vector<int> pro(n);
        pro[0] = nums[0];
        for (int i = 1; i < n; i++) {
            pro[i] = pro[i - 1] * nums[i];
        }
        pro[n - 1] = nums[n - 1];
        nums[n - 1] = pro[n - 2];

        for (int i = n - 2; i > 0; i--) {
            pro[i] = pro[i + 1] * nums[i];
            nums[i] = pro[i - 1] * pro[i + 1];
        }
        nums[0] = pro[1];
        return nums;
    }
};

可以看出他们的区别在于少开了一个ans数组。

速度差异

我用ai生成了一段测试代码,大致如下:

// -------- Better benchmarking harness (do NOT modify func1/func2) --------

namespace bench {
    using Clock = std::chrono::steady_clock; // monotonic, good for measurements

    static volatile int g_sink = 0;

    inline void consume(const std::vector<int>& v) {
        if (!v.empty()) {
            g_sink ^= v.front();
            g_sink ^= v.back();
            g_sink ^= static_cast<int>(v.size());
        } else {
            g_sink ^= 1;
        }
    }

    struct Stats {
        double min_ms{};
        double median_ms{};
        double avg_ms{};
        double stddev_ms{};
    };

    inline Stats computeStats(std::vector<double> ms) {
        Stats s{};
        if (ms.empty()) return s;
        std::sort(ms.begin(), ms.end());
        s.min_ms = ms.front();
        s.median_ms = ms[ms.size() / 2];
        double sum = std::accumulate(ms.begin(), ms.end(), 0.0);
        s.avg_ms = sum / ms.size();
        double sq = 0.0;
        for (double x : ms) {
            double d = x - s.avg_ms;
            sq += d * d;
        }
        s.stddev_ms = std::sqrt(sq / ms.size());
        return s;
    }

    // Measure func that DOES NOT modify input; avoids extra copy in timed region
    template <typename F>
    Stats measure_readonly(F&& f, const std::vector<int>& input, int trials, int warmup) {
        std::vector<int> reusable = input;
        for (int i = 0; i < warmup; ++i) {
            auto tmp = f(reusable);
            consume(tmp);
        }
        std::vector<double> times_ms;
        times_ms.reserve(trials);
        for (int i = 0; i < trials; ++i) {
            auto t0 = Clock::now();
            auto out = f(reusable);
            auto t1 = Clock::now();
            std::chrono::duration<double, std::milli> dt = t1 - t0;
            times_ms.push_back(dt.count());
            consume(out);
        }
        return computeStats(std::move(times_ms));
    }

    // Measure func that DOES modify input; copy input BEFORE timing each trial
    template <typename F>
    Stats measure_mutating(F&& f, const std::vector<int>& input, int trials, int warmup) {
        for (int i = 0; i < warmup; ++i) {
            std::vector<int> tmp = input;
            auto warm = f(tmp);
            consume(warm);
        }
        std::vector<double> times_ms;
        times_ms.reserve(trials);
        for (int i = 0; i < trials; ++i) {
            std::vector<int> tmp = input;
            auto t0 = Clock::now();
            auto out = f(tmp);
            auto t1 = Clock::now();
            std::chrono::duration<double, std::milli> dt = t1 - t0;
            times_ms.push_back(dt.count());
            consume(out);
        }
        return computeStats(std::move(times_ms));
    }

    // 新增:同时写入 test.txt 文件
    inline void printStats(const std::string& name, const Stats& s, size_t n, std::ofstream* pf = nullptr) {
        double per_elem_ns = (s.avg_ms * 1e6) / static_cast<double>(n);
        std::ostringstream oss;
        oss << std::fixed << std::setprecision(3);
        oss << "  " << name << ": min=" << s.min_ms << " ms, med=" << s.median_ms
            << " ms, avg=" << s.avg_ms << " ms, sd=" << s.stddev_ms
            << " ms (" << per_elem_ns << " ns/elem avg)" << std::endl;
        std::cout << oss.str();
        if (pf) *pf << oss.str();
    }
} // namespace bench

// Generate base input with safe values (avoid signed overflow UB in multiplications)
static std::vector<int> make_input(size_t n, int fill_value = 1) {
    std::vector<int> v;
    v.resize(n, fill_value);
    return v;
}

// 可选:生成固定种子的随机输入,避免溢出
static std::vector<int> make_random_input(size_t n, int minv = 1, int maxv = 10, uint32_t seed = 42) {
    std::vector<int> v(n);
    std::mt19937 rng(seed);
    std::uniform_int_distribution<int> dist(minv, maxv);
    for (size_t i = 0; i < n; ++i) v[i] = dist(rng);
    return v;
}

// Enhanced comparison that handles warmup, fair copying, and robust stats

static void elevate_priority() {
#ifdef _WIN32
    SetPriorityClass(GetCurrentProcess(), REALTIME_PRIORITY_CLASS);
#endif
    // Linux/macOS: 可用 nice/renice,但此处略过
}

// 剔除极端值(如最大/最小),返回新向量
static std::vector<double> trim_outliers(const std::vector<double>& v, int trim = 1) {
    if (v.size() <= 2 * trim) return v;
    std::vector<double> sorted = v;
    std::sort(sorted.begin(), sorted.end());
    return std::vector<double>(sorted.begin() + trim, sorted.end() - trim);
}

static void compare_size(size_t size, int trials = 10, int warmup = 2, int repeat = 3, bool use_random = false, uint32_t seed = 42, int trim = 1) {
    std::ofstream fout("test.txt", std::ios::app);
    if (size < 2) {
        std::cout << "Skipping size " << size << " (<2 not supported by these funcs)" << std::endl;
        if (fout) fout << "Skipping size " << size << " (<2 not supported by these funcs)" << std::endl;
        return;
    }
    std::ostringstream oss;
    oss << "\n=== N=" << size << ", trials=" << trials << ", warmup=" << warmup << ", repeat=" << repeat << (use_random ? ", random" : ", ones") << " ===" << std::endl;
    std::cout << oss.str();
    if (fout) fout << oss.str();

    // 热身:分配大数组并做一次全遍历,激活内存页
    {
        std::vector<int> dummy(size, 1);
        for (auto& x : dummy) x++;
    }

    elevate_priority();

    for (int r = 0; r < repeat; ++r) {
        auto base = use_random ? make_random_input(size, 1, 10, seed + r) : make_input(size, 1);
        auto s1 = bench::measure_readonly(
            [](std::vector<int>& in) { return func1(in); }, base, trials, warmup);
        auto s2 = bench::measure_mutating(
            [](std::vector<int>& in) { return func2(in); }, base, trials, warmup);
        std::vector<double> t1 = trim_outliers({s1.min_ms, s1.median_ms, s1.avg_ms, s1.stddev_ms}, trim);
        std::vector<double> t2 = trim_outliers({s2.min_ms, s2.median_ms, s2.avg_ms, s2.stddev_ms}, trim);
        oss.str(""); oss.clear();
        oss << "  [Run " << r + 1 << "]" << std::endl;
        std::cout << oss.str();
        if (fout) fout << oss.str();
        bench::printStats("func1", s1, size, fout ? &fout : nullptr);
        bench::printStats("func2", s2, size, fout ? &fout : nullptr);
    }
    fout.close();
}

没开Og优化跑出来结果如下:


=== N=10000, trials=10, warmup=2, repeat=3, ones ===
  [Run 1]
  func1: min=0.113 ms, med=0.128 ms, avg=0.130 ms, sd=0.013 ms (13.016 ns/elem avg)
  func2: min=0.106 ms, med=0.126 ms, avg=0.124 ms, sd=0.009 ms (12.439 ns/elem avg)
  [Run 2]
  func1: min=0.107 ms, med=0.116 ms, avg=0.115 ms, sd=0.006 ms (11.526 ns/elem avg)
  func2: min=0.105 ms, med=0.129 ms, avg=0.138 ms, sd=0.048 ms (13.795 ns/elem avg)
  [Run 3]
  func1: min=0.106 ms, med=0.115 ms, avg=0.114 ms, sd=0.008 ms (11.388 ns/elem avg)
  func2: min=0.106 ms, med=0.107 ms, avg=0.110 ms, sd=0.006 ms (11.018 ns/elem avg)

=== N=100000, trials=10, warmup=2, repeat=3, ones ===
  [Run 1]
  func1: min=0.877 ms, med=0.928 ms, avg=0.972 ms, sd=0.116 ms (9.723 ns/elem avg)
  func2: min=1.190 ms, med=1.362 ms, avg=1.402 ms, sd=0.206 ms (14.017 ns/elem avg)
  [Run 2]
  func1: min=0.930 ms, med=1.043 ms, avg=1.078 ms, sd=0.140 ms (10.781 ns/elem avg)
  func2: min=0.896 ms, med=0.931 ms, avg=0.964 ms, sd=0.070 ms (9.636 ns/elem avg)
  [Run 3]
  func1: min=0.943 ms, med=0.966 ms, avg=0.962 ms, sd=0.015 ms (9.625 ns/elem avg)
  func2: min=0.908 ms, med=0.981 ms, avg=0.996 ms, sd=0.146 ms (9.965 ns/elem avg)

=== N=1000000, trials=10, warmup=2, repeat=3, ones ===
  [Run 1]
  func1: min=9.879 ms, med=11.002 ms, avg=10.755 ms, sd=0.515 ms (10.755 ns/elem avg)
  func2: min=9.540 ms, med=10.035 ms, avg=10.419 ms, sd=1.003 ms (10.419 ns/elem avg)
  [Run 2]
  func1: min=9.870 ms, med=10.068 ms, avg=10.054 ms, sd=0.101 ms (10.054 ns/elem avg)
  func2: min=9.825 ms, med=9.919 ms, avg=10.052 ms, sd=0.354 ms (10.052 ns/elem avg)
  [Run 3]
  func1: min=10.103 ms, med=11.242 ms, avg=11.255 ms, sd=0.843 ms (11.255 ns/elem avg)
  func2: min=10.057 ms, med=10.264 ms, avg=10.528 ms, sd=0.590 ms (10.528 ns/elem avg)

=== N=5000000, trials=10, warmup=2, repeat=3, ones ===
  [Run 1]
  func1: min=49.359 ms, med=51.806 ms, avg=51.501 ms, sd=1.549 ms (10.300 ns/elem avg)
  func2: min=48.217 ms, med=54.002 ms, avg=52.848 ms, sd=2.925 ms (10.570 ns/elem avg)
  [Run 2]
  func1: min=49.195 ms, med=51.085 ms, avg=52.378 ms, sd=3.445 ms (10.476 ns/elem avg)
  func2: min=49.746 ms, med=51.061 ms, avg=51.464 ms, sd=1.895 ms (10.293 ns/elem avg)
  [Run 3]
  func1: min=51.081 ms, med=53.660 ms, avg=54.321 ms, sd=3.199 ms (10.864 ns/elem avg)
  func2: min=49.439 ms, med=55.271 ms, avg=54.303 ms, sd=2.204 ms (10.861 ns/elem avg)

=== N=10000000, trials=10, warmup=2, repeat=3, ones ===
  [Run 1]
  func1: min=101.710 ms, med=109.303 ms, avg=107.307 ms, sd=4.133 ms (10.731 ns/elem avg)
  func2: min=97.194 ms, med=101.839 ms, avg=101.739 ms, sd=3.542 ms (10.174 ns/elem avg)
  [Run 2]
  func1: min=99.782 ms, med=105.876 ms, avg=104.865 ms, sd=2.306 ms (10.486 ns/elem avg)
  func2: min=95.317 ms, med=102.779 ms, avg=101.830 ms, sd=3.868 ms (10.183 ns/elem avg)
  [Run 3]
  func1: min=97.102 ms, med=99.855 ms, avg=100.029 ms, sd=1.977 ms (10.003 ns/elem avg)
  func2: min=94.632 ms, med=99.233 ms, avg=99.708 ms, sd=2.833 ms (9.971 ns/elem avg)

=== N=10000, trials=20, warmup=3, repeat=5, random ===
  [Run 1]
  func1: min=0.055 ms, med=0.058 ms, avg=0.058 ms, sd=0.001 ms (5.767 ns/elem avg)
  func2: min=0.055 ms, med=0.058 ms, avg=0.058 ms, sd=0.002 ms (5.761 ns/elem avg)
  [Run 2]
  func1: min=0.055 ms, med=0.057 ms, avg=0.058 ms, sd=0.003 ms (5.760 ns/elem avg)
  func2: min=0.054 ms, med=0.056 ms, avg=0.056 ms, sd=0.001 ms (5.591 ns/elem avg)
  [Run 3]
  func1: min=0.056 ms, med=0.058 ms, avg=0.058 ms, sd=0.002 ms (5.818 ns/elem avg)
  func2: min=0.058 ms, med=0.059 ms, avg=0.059 ms, sd=0.001 ms (5.921 ns/elem avg)
  [Run 4]
  func1: min=0.058 ms, med=0.060 ms, avg=0.061 ms, sd=0.004 ms (6.126 ns/elem avg)
  func2: min=0.058 ms, med=0.059 ms, avg=0.059 ms, sd=0.001 ms (5.922 ns/elem avg)
  [Run 5]
  func1: min=0.057 ms, med=0.058 ms, avg=0.058 ms, sd=0.001 ms (5.796 ns/elem avg)
  func2: min=0.057 ms, med=0.058 ms, avg=0.058 ms, sd=0.001 ms (5.778 ns/elem avg)

=== N=100000, trials=20, warmup=3, repeat=5, random ===
  [Run 1]
  func1: min=0.398 ms, med=0.412 ms, avg=0.425 ms, sd=0.049 ms (4.253 ns/elem avg)
  func2: min=0.603 ms, med=0.613 ms, avg=0.615 ms, sd=0.011 ms (6.150 ns/elem avg)
  [Run 2]
  func1: min=0.381 ms, med=0.401 ms, avg=0.405 ms, sd=0.020 ms (4.048 ns/elem avg)
  func2: min=0.372 ms, med=0.387 ms, avg=0.393 ms, sd=0.016 ms (3.929 ns/elem avg)
  [Run 3]
  func1: min=0.398 ms, med=0.407 ms, avg=0.408 ms, sd=0.012 ms (4.083 ns/elem avg)
  func2: min=0.387 ms, med=0.395 ms, avg=0.407 ms, sd=0.028 ms (4.065 ns/elem avg)
  [Run 4]
  func1: min=0.394 ms, med=0.400 ms, avg=0.403 ms, sd=0.011 ms (4.027 ns/elem avg)
  func2: min=0.385 ms, med=0.387 ms, avg=0.393 ms, sd=0.015 ms (3.933 ns/elem avg)
  [Run 5]
  func1: min=0.400 ms, med=0.404 ms, avg=0.408 ms, sd=0.013 ms (4.082 ns/elem avg)
  func2: min=0.370 ms, med=0.386 ms, avg=0.387 ms, sd=0.013 ms (3.872 ns/elem avg)

=== N=1000000, trials=20, warmup=3, repeat=5, random ===
  [Run 1]
  func1: min=5.045 ms, med=5.081 ms, avg=5.094 ms, sd=0.043 ms (5.094 ns/elem avg)
  func2: min=4.864 ms, med=4.903 ms, avg=4.934 ms, sd=0.147 ms (4.934 ns/elem avg)
  [Run 2]
  func1: min=5.025 ms, med=5.088 ms, avg=5.124 ms, sd=0.111 ms (5.124 ns/elem avg)
  func2: min=4.817 ms, med=4.864 ms, avg=4.870 ms, sd=0.027 ms (4.870 ns/elem avg)
  [Run 3]
  func1: min=5.008 ms, med=5.082 ms, avg=5.093 ms, sd=0.047 ms (5.093 ns/elem avg)
  func2: min=4.818 ms, med=4.906 ms, avg=4.904 ms, sd=0.048 ms (4.904 ns/elem avg)
  [Run 4]
  func1: min=5.069 ms, med=5.217 ms, avg=5.218 ms, sd=0.063 ms (5.218 ns/elem avg)
  func2: min=4.845 ms, med=4.904 ms, avg=4.956 ms, sd=0.260 ms (4.956 ns/elem avg)
  [Run 5]
  func1: min=5.044 ms, med=5.158 ms, avg=5.172 ms, sd=0.095 ms (5.172 ns/elem avg)
  func2: min=4.856 ms, med=4.927 ms, avg=4.920 ms, sd=0.040 ms (4.920 ns/elem avg)

=== N=10000, trials=20, warmup=3, repeat=5, random ===
  [Run 1]
  func1: min=0.055 ms, med=0.056 ms, avg=0.056 ms, sd=0.001 ms (5.638 ns/elem avg)
  func2: min=0.055 ms, med=0.056 ms, avg=0.056 ms, sd=0.000 ms (5.581 ns/elem avg)
  [Run 2]
  func1: min=0.055 ms, med=0.056 ms, avg=0.056 ms, sd=0.001 ms (5.596 ns/elem avg)
  func2: min=0.055 ms, med=0.056 ms, avg=0.056 ms, sd=0.002 ms (5.624 ns/elem avg)
  [Run 3]
  func1: min=0.056 ms, med=0.057 ms, avg=0.057 ms, sd=0.001 ms (5.712 ns/elem avg)
  func2: min=0.056 ms, med=0.058 ms, avg=0.059 ms, sd=0.002 ms (5.853 ns/elem avg)
  [Run 4]
  func1: min=0.057 ms, med=0.064 ms, avg=0.064 ms, sd=0.005 ms (6.426 ns/elem avg)
  func2: min=0.055 ms, med=0.056 ms, avg=0.057 ms, sd=0.003 ms (5.746 ns/elem avg)
  [Run 5]
  func1: min=0.055 ms, med=0.056 ms, avg=0.059 ms, sd=0.009 ms (5.858 ns/elem avg)
  func2: min=0.055 ms, med=0.057 ms, avg=0.057 ms, sd=0.001 ms (5.673 ns/elem avg)

=== N=100000, trials=20, warmup=3, repeat=5, random ===
  [Run 1]
  func1: min=0.404 ms, med=0.413 ms, avg=0.413 ms, sd=0.005 ms (4.130 ns/elem avg)
  func2: min=0.603 ms, med=0.608 ms, avg=0.613 ms, sd=0.011 ms (6.127 ns/elem avg)
  [Run 2]
  func1: min=0.400 ms, med=0.410 ms, avg=0.418 ms, sd=0.020 ms (4.183 ns/elem avg)
  func2: min=0.385 ms, med=0.396 ms, avg=0.402 ms, sd=0.024 ms (4.024 ns/elem avg)
  [Run 3]
  func1: min=0.397 ms, med=0.402 ms, avg=0.403 ms, sd=0.005 ms (4.027 ns/elem avg)
  func2: min=0.370 ms, med=0.387 ms, avg=0.392 ms, sd=0.020 ms (3.919 ns/elem avg)
  [Run 4]
  func1: min=0.397 ms, med=0.403 ms, avg=0.403 ms, sd=0.005 ms (4.034 ns/elem avg)
  func2: min=0.388 ms, med=0.390 ms, avg=0.393 ms, sd=0.007 ms (3.928 ns/elem avg)
  [Run 5]
  func1: min=0.400 ms, med=0.404 ms, avg=0.405 ms, sd=0.004 ms (4.047 ns/elem avg)
  func2: min=0.369 ms, med=0.376 ms, avg=0.383 ms, sd=0.017 ms (3.828 ns/elem avg)

=== N=1000000, trials=20, warmup=3, repeat=5, random ===
  [Run 1]
  func1: min=5.062 ms, med=5.159 ms, avg=5.178 ms, sd=0.123 ms (5.178 ns/elem avg)
  func2: min=4.897 ms, med=4.988 ms, avg=4.979 ms, sd=0.040 ms (4.979 ns/elem avg)
  [Run 2]
  func1: min=5.030 ms, med=5.101 ms, avg=5.160 ms, sd=0.182 ms (5.160 ns/elem avg)
  func2: min=4.797 ms, med=4.949 ms, avg=4.995 ms, sd=0.154 ms (4.995 ns/elem avg)
  [Run 3]
  func1: min=4.999 ms, med=5.145 ms, avg=5.148 ms, sd=0.089 ms (5.148 ns/elem avg)
  func2: min=4.855 ms, med=4.980 ms, avg=4.983 ms, sd=0.109 ms (4.983 ns/elem avg)
  [Run 4]
  func1: min=5.100 ms, med=5.180 ms, avg=5.190 ms, sd=0.077 ms (5.190 ns/elem avg)
  func2: min=4.855 ms, med=4.912 ms, avg=4.944 ms, sd=0.108 ms (4.944 ns/elem avg)
  [Run 5]
  func1: min=5.052 ms, med=5.151 ms, avg=5.191 ms, sd=0.184 ms (5.191 ns/elem avg)
  func2: min=4.832 ms, med=4.925 ms, avg=4.926 ms, sd=0.060 ms (4.926 ns/elem avg)

=== N=10000, trials=20, warmup=3, repeat=5, random ===
  [Run 1]
  func1: min=0.218 ms, med=0.226 ms, avg=0.228 ms, sd=0.007 ms (22.794 ns/elem avg)
  func2: min=0.217 ms, med=0.236 ms, avg=0.237 ms, sd=0.012 ms (23.691 ns/elem avg)
  [Run 2]
  func1: min=0.219 ms, med=0.227 ms, avg=0.229 ms, sd=0.007 ms (22.880 ns/elem avg)
  func2: min=0.224 ms, med=0.234 ms, avg=0.233 ms, sd=0.004 ms (23.308 ns/elem avg)
  [Run 3]
  func1: min=0.214 ms, med=0.230 ms, avg=0.229 ms, sd=0.010 ms (22.866 ns/elem avg)
  func2: min=0.217 ms, med=0.227 ms, avg=0.234 ms, sd=0.031 ms (23.428 ns/elem avg)
  [Run 4]
  func1: min=0.222 ms, med=0.230 ms, avg=0.232 ms, sd=0.009 ms (23.168 ns/elem avg)
  func2: min=0.217 ms, med=0.231 ms, avg=0.230 ms, sd=0.005 ms (23.012 ns/elem avg)
  [Run 5]
  func1: min=0.224 ms, med=0.236 ms, avg=0.241 ms, sd=0.028 ms (24.096 ns/elem avg)
  func2: min=0.221 ms, med=0.233 ms, avg=0.235 ms, sd=0.011 ms (23.506 ns/elem avg)

=== N=100000, trials=20, warmup=3, repeat=5, random ===
  [Run 1]
  func1: min=1.901 ms, med=2.199 ms, avg=2.501 ms, sd=0.572 ms (25.011 ns/elem avg)
  func2: min=1.960 ms, med=2.090 ms, avg=2.099 ms, sd=0.069 ms (20.989 ns/elem avg)
  [Run 2]
  func1: min=1.931 ms, med=2.075 ms, avg=2.089 ms, sd=0.111 ms (20.891 ns/elem avg)
  func2: min=1.990 ms, med=2.093 ms, avg=2.096 ms, sd=0.063 ms (20.964 ns/elem avg)
  [Run 3]
  func1: min=1.943 ms, med=2.076 ms, avg=2.088 ms, sd=0.076 ms (20.879 ns/elem avg)
  func2: min=2.067 ms, med=2.175 ms, avg=2.177 ms, sd=0.074 ms (21.772 ns/elem avg)
  [Run 4]
  func1: min=1.913 ms, med=2.132 ms, avg=2.118 ms, sd=0.090 ms (21.176 ns/elem avg)
  func2: min=2.099 ms, med=2.167 ms, avg=2.163 ms, sd=0.032 ms (21.633 ns/elem avg)
  [Run 5]
  func1: min=1.996 ms, med=2.213 ms, avg=2.195 ms, sd=0.111 ms (21.946 ns/elem avg)
  func2: min=1.903 ms, med=2.091 ms, avg=2.108 ms, sd=0.129 ms (21.077 ns/elem avg)

=== N=1000000, trials=20, warmup=3, repeat=5, random ===
  [Run 1]
  func1: min=20.812 ms, med=21.989 ms, avg=21.873 ms, sd=0.577 ms (21.873 ns/elem avg)
  func2: min=20.983 ms, med=22.185 ms, avg=21.991 ms, sd=0.409 ms (21.991 ns/elem avg)
  [Run 2]
  func1: min=21.573 ms, med=22.237 ms, avg=22.199 ms, sd=0.282 ms (22.199 ns/elem avg)
  func2: min=21.258 ms, med=22.989 ms, avg=22.826 ms, sd=0.590 ms (22.826 ns/elem avg)
  [Run 3]
  func1: min=21.658 ms, med=22.340 ms, avg=22.284 ms, sd=0.339 ms (22.284 ns/elem avg)
  func2: min=20.924 ms, med=22.830 ms, avg=22.528 ms, sd=0.713 ms (22.528 ns/elem avg)
  [Run 4]
  func1: min=20.875 ms, med=21.734 ms, avg=21.899 ms, sd=0.697 ms (21.899 ns/elem avg)
  func2: min=20.881 ms, med=22.488 ms, avg=22.730 ms, sd=1.579 ms (22.730 ns/elem avg)
  [Run 5]
  func1: min=21.993 ms, med=24.340 ms, avg=24.083 ms, sd=0.929 ms (24.083 ns/elem avg)
  func2: min=22.342 ms, med=24.316 ms, avg=24.210 ms, sd=1.596 ms (24.210 ns/elem avg)

其实可以看出来经过大量随机数据测试后,速度也没有很明显的规律变化。这里面要考虑的因素主要是高速缓存的访问,光这部分就够喝一壶了。希望之后有机会好好分析这个问题,现在的我有点无能为力。