欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

erlang mnesia 脏读性能测试

程序员文章站 2022-07-15 11:38:37
...

mnesia有2种方式脏读数据,一种是以Key的方式用dirty_read(Tab, Key),一种是以其他建立索引的字段读取dirty_index_read(Tab, OtherKey, Position).最开始以为这两个的速度是一样的,结果一侧是发现第一种是第二种的将近3倍。测试是用1千万的数据,10万的进程并发访问。

测试用的代码:

-module (tm).

-behaviour (gen_server).

% APIs
-export([start_link/0, find/1, find1/1, ct/0, ct1/0]).

% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
          terminate/2, code_change/3]).

-record (test_mnesia, {userid, pid}).


start_link() ->
    gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
find(UserId) ->
    mnesia:dirty_read(test_mnesia, UserId).
find1(UserId) ->
    mnesia:dirty_index_read(test_mnesia, UserId, #test_mnesia.pid).
ct() ->
    tc:ct(tm, find, [<<"user_1@android">>], 100000).
ct1() ->
    tc:ct(tm, find1, [<<"user_1@android">>], 100000).
    
    
%% ===================================================================
%% gen_server callbacks
%% ===================================================================

init([]) ->
    mnesia:stop(),
    mnesia:delete_schema([node()]),
    mnesia:create_schema([node()]),
    mnesia:start(),
    {atomic,ok} = mnesia:create_table(test_mnesia, [{ram_copies, [node()]},
                                                    {attributes, record_info(fields, test_mnesia)}]),
    {atomic,ok} = mnesia:add_table_index(test_mnesia, pid),
    loop(10000000),
    {ok, []}.
handle_call(_Request, _From, State) ->
    {reply, nomatch, State}.
handle_cast(_Msg, State) ->
    {noreply, State}.
handle_info(_Info, State) ->
    {noreply, State}.
terminate(_Reason, _State) -> ok.
code_change(_OldVer, State, _Extra) -> {ok, State}.


%% ===================================================================
%% Internal functions
%% ===================================================================
loop(N) when N > 0 ->
    UserId = <<"user_", (integer_to_binary(N))/binary, "@android">>,
    mnesia:dirty_write(#test_mnesia{userid = UserId, pid = UserId}),
    loop(N - 1);
loop(0) ->
    ok.

tc的代码:

%% ===================================================================
%% Author xiaotie
%% 2015-07-30
%% 单进程循环测试:LoopTimes是循环次数
%% tc:t(Module, Function, ArgsList, LoopTimes).
%% 多进程并发测试:SpawnProcessesCount是并发的进程数
%% tc:ct(Module, Function, ArgsList, SpawnProcessesCount).
%% ===================================================================

-module (tc).

-export ([t/4, ct/4]).


tc(M, F, A) ->
    {Microsecond, _} = timer:tc (M, F, A),
    Microsecond.

distribution(List, Aver) ->
    distribution(List, Aver, 0, 0).
distribution([H|T], Aver, Greater, Less) ->
    case H > Aver of
        true ->
            distribution(T, Aver, Greater + 1, Less);
        false ->
            distribution(T, Aver, Greater, Less + 1)
    end;
distribution([], _Aver, Greater, Less) ->
    {Greater, Less}.

%% ===================================================================
%% test: one process test N times
%% ===================================================================

t(M, F, A, N) ->
    {Max, Min, Sum, Aver, Greater, Less} = loop ({M, F, A}, N),
    io:format ("=====================~n"),
    io:format ("execute [~p] times of {~p, ~p, ~p}:~n", [N, M, F, A]),
    io:format ("Maximum: ~p(μs)\t~p(s)~n", [Max, Max / 1000000]),
    io:format ("Minimum: ~p(μs)\t~p(s)~n", [Min, Min / 1000000]),
    io:format ("Sum: ~p(μs)\t~p(s)~n", [Sum, Sum / 1000000]),
    io:format ("Average: ~p(μs)\t~p(s)~n", [Aver, Aver / 1000000]),
    io:format ("Greater: ~p~nLess: ~p~n", [Greater, Less]),
    io:format ("=====================~n").


loop({M, F, A}, N) ->
    loop ({M, F, A}, N, 1, 0, 0, 0, []).

loop({M, F, A}, N, I, Max, Min, Sum, List) when N >= I ->
    Microsecond = tc (M, F, A),
    NewSum = Sum + Microsecond,
    if
        Max == 0 ->
            NewMax = NewMin = Microsecond;
        Max < Microsecond ->
            NewMax = Microsecond,
            NewMin = Min;
        Min > Microsecond ->
            NewMax = Max,
            NewMin = Microsecond;
        true ->
            NewMax = Max,
            NewMin = Min
    end,
    loop ({M, F, A}, N, I + 1, NewMax, NewMin, NewSum, [Microsecond|List]);
loop({_M, _F, _A}, N, _I, Max, Min, Sum, List) ->
    Aver = Sum / N,
    {Greater, Less} = distribution(List, Aver),
    {Max, Min, Sum, Aver, Greater, Less}.

%% ===================================================================
%% Concurrency test: N processes each test one time
%% ===================================================================

ct(M, F, A, N) ->
    {Max, Min, Sum, Aver, Greater, Less} = cloop ({M, F, A}, N),
    io:format ("=====================~n"),
    io:format ("spawn [~p] processes of {~p, ~p, ~p}:~n", [N, M, F, A]),
    io:format ("Maximum: ~p(μs)\t~p(s)~n", [Max, Max / 1000000]),
    io:format ("Minimum: ~p(μs)\t~p(s)~n", [Min, Min / 1000000]),
    io:format ("Sum: ~p(μs)\t~p(s)~n", [Sum, Sum / 1000000]),
    io:format ("Average: ~p(μs)\t~p(s)~n", [Aver, Aver / 1000000]),
    io:format ("Greater: ~p~nLess: ~p~n", [Greater, Less]),
    io:format ("=====================~n").


cloop({M, F, A}, N) ->
    CollectorPid = self(),
    ok = loop_spawn({M, F, A}, CollectorPid, N),
    collector(0, 0, 0, N, 1, []).


loop_spawn({M, F, A}, CollectorPid, N) when N > 0 ->
    spawn_link(fun() -> worker({M, F, A}, CollectorPid) end),
    loop_spawn({M, F, A}, CollectorPid, N - 1);
loop_spawn(_, _, 0) ->
    ok.

collector(Max, Min, Sum, N, I, List) when N >= I ->
    receive
        {result, Microsecond} ->
            NewSum = Sum + Microsecond,
            if
                Max == 0 ->
                    NewMax = NewMin = Microsecond;
                Max < Microsecond ->
                    NewMax = Microsecond,
                    NewMin = Min;
                Min > Microsecond ->
                    NewMax = Max,
                    NewMin = Microsecond;
                true ->
                    NewMax = Max,
                    NewMin = Min
            end,
            collector(NewMax, NewMin, NewSum, N, I + 1, [Microsecond|List])
    after
        10000 ->
            ok
    end;
collector(Max, Min, Sum, N, _, List) ->
    Aver = Sum / N,
    {Greater, Less} = distribution(List, Aver),
    {Max, Min, Sum, Aver, Greater, Less}.


worker({M, F, A}, CollectorPid) ->
    Microsecond = tc(M, F, A),
    CollectorPid ! {result, Microsecond}.

 

 

结果同时10万并发进程去访问find和find1,结果:

=====================
spawn [100000] processes of {tm, find, [<<"user_1@android">>]}:
Maximum: 1686(μs)       0.001686(s)
Minimum: 1(μs)  1.0e-6(s)
Sum: 237287(μs) 0.237287(s)
Average: 2.37287(μs)    2.3728699999999998e-6(s)
Greater: 28017
Less: 71983
=====================
ok
3> tm:ct1().
=====================
spawn [100000] processes of {tm, find1, [<<"user_1@android">>]}:
Maximum: 1862(μs)       0.001862(s)
Minimum: 4(μs)  4.0e-6(s)
Sum: 741330(μs) 0.74133(s)
Average: 7.4133(μs)     7.4133e-6(s)
Greater: 29711
Less: 70289
=====================

 测了几次 基本dirty_read在2微秒左右,dirty_index_read在7微秒左右。性能在3倍左右。

 

总结:

经过测试,我们用mnesia想快速读取数据的时候尽量把总查的字段作为Key。