This is an automated email from the ASF dual-hosted git repository. jan pushed a commit to branch rebase/access-2023 in repository https://gitbox.apache.org/repos/asf/couchdb.git
commit 4454b13b4d6d064f0920b73d87a1f8bbd43b72ed Author: Jan Lehnardt <[email protected]> AuthorDate: Sat Jul 8 10:10:45 2023 +0200 fix: perf insert optimisation bypass --- src/couch/src/couch_db.erl | 40 ++-- src/couch/src/couch_db_updater.erl | 92 ++++++---- src/couch/test/eunit/couchdb_access_tests.erl | 201 +++++++++------------ .../test/eunit/couchdb_update_conflicts_tests.erl | 66 +++---- 4 files changed, 182 insertions(+), 217 deletions(-) diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl index 6da5d9925..f3a89bb89 100644 --- a/src/couch/src/couch_db.erl +++ b/src/couch/src/couch_db.erl @@ -307,13 +307,8 @@ delete_doc(Db, Id, Revisions) -> open_doc(Db, IdOrDocInfo) -> open_doc(Db, IdOrDocInfo, []). -open_doc(Db, Id, Options0) -> +open_doc(Db, Id, Options) -> increment_stat(Db, [couchdb, database_reads]), - Options = - case has_access_enabled(Db) of - true -> Options0 ++ [conflicts]; - _Else -> Options0 - end, case open_doc_int(Db, Id, Options) of {ok, #doc{deleted = true} = Doc} -> case lists:member(deleted, Options) of @@ -808,23 +803,13 @@ validate_access(Db, Doc, Options) -> validate_access1(false, _Db, _Doc, _Options) -> ok; -validate_access1(true, Db, #doc{meta = Meta} = Doc, Options) -> - case proplists:get_value(conflicts, Meta) of - % no conflicts - undefined -> - case is_read_from_ddoc_cache(Options) andalso is_per_user_ddoc(Doc) of - true -> throw({not_found, missing}); - _False -> validate_access2(Db, Doc) - end; - % only admins can read conflicted docs in _access dbs - _Else -> - % TODO: expand: if leaves agree on _access, then a user should be able - % to proceed normally, only if they disagree should this become admin-only - case is_admin(Db) of - true -> ok; - _Else2 -> throw({forbidden, <<"document is in conflict">>}) - end - end. +validate_access1(true, Db, #doc{id = <<"_design", _/binary>>} = Doc, Options) -> + case is_read_from_ddoc_cache(Options) andalso is_per_user_ddoc(Doc) of + true -> throw({not_found, missing}); + _False -> validate_access2(Db, Doc) + end; +validate_access1(true, Db, #doc{} = Doc, _Options) -> + validate_access2(Db, Doc). validate_access2(Db, Doc) -> validate_access3(check_access(Db, Doc)). @@ -859,8 +844,10 @@ check_access(Db, Access) -> end end. -check_name(null, _Access) -> true; -check_name(UserName, Access) -> lists:member(UserName, Access). +check_name(null, _Access) -> false; +check_name(UserName, Access) -> + Res = lists:member(UserName, Access), + Res. % nicked from couch_db:check_security % TODO: might need DRY @@ -1517,7 +1504,6 @@ update_docs(Db, Docs0, Options, ?INTERACTIVE_EDIT) -> {ok, DocBuckets, LocalDocs, DocErrors} = before_docs_update(Db, Docs, PrepValidateFun, ?INTERACTIVE_EDIT), - if (AllOrNothing) and (DocErrors /= []) -> RefErrorDict = dict:from_list([{doc_tag(Doc), Doc} || Doc <- Docs]), @@ -1600,7 +1586,7 @@ collect_results_with_metrics(Pid, MRef, []) -> end. collect_results(Pid, MRef, ResultsAcc) -> - receive + receive % TDOD: need to receiver access? {result, Pid, Result} -> collect_results(Pid, MRef, [Result | ResultsAcc]); {done, Pid} -> diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl index c548435fd..03c277ac3 100644 --- a/src/couch/src/couch_db_updater.erl +++ b/src/couch/src/couch_db_updater.erl @@ -169,11 +169,23 @@ handle_cast(Msg, #db{name = Name} = Db) -> ), {stop, Msg, Db}. +-include_lib("couch/include/couch_eunit.hrl"). +-define(debugTimeNano(S, E), + begin + ((fun () -> + __T0 = erlang:system_time(nanosecond), + __V = (E), + __T1 = erlang:system_time(nanosecond), + ?debugFmt(<<"~ts: ~.3f ms">>, [(S), (__T1-__T0)/1000]), + __V + end)()) + end). + handle_info( {update_docs, Client, GroupedDocs, LocalDocs, ReplicatedChanges, UserCtx}, Db ) -> - GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs), + GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs, UserCtx), if LocalDocs == [] -> {GroupedDocs3, Clients} = collect_updates( @@ -186,7 +198,7 @@ handle_info( Clients = [Client] end, LocalDocs2 = [{Client, NRDoc} || NRDoc <- LocalDocs], - try update_docs_int(Db, GroupedDocs3, LocalDocs2, ReplicatedChanges, UserCtx) of + try update_docs_int(Db, GroupedDocs3, LocalDocs2, ReplicatedChanges) of {ok, Db2, UpdatedDDocIds} -> ok = couch_server:db_updated(Db2), case {couch_db:get_update_seq(Db), couch_db:get_update_seq(Db2)} of @@ -260,7 +272,7 @@ handle_info(Msg, Db) -> code_change(_OldVsn, State, _Extra) -> {ok, State}. -sort_and_tag_grouped_docs(Client, GroupedDocs) -> +sort_and_tag_grouped_docs(Client, GroupedDocs, UserCtx) -> % These groups should already be sorted but sometimes clients misbehave. % The merge_updates function will fail and the database can end up with % duplicate documents if the incoming groups are not sorted, so as a sanity @@ -268,7 +280,7 @@ sort_and_tag_grouped_docs(Client, GroupedDocs) -> Cmp = fun([#doc{id = A} | _], [#doc{id = B} | _]) -> A < B end, lists:map( fun(DocGroup) -> - [{Client, maybe_tag_doc(D)} || D <- DocGroup] + [{Client, maybe_tag_doc(D), UserCtx} || D <- DocGroup] end, lists:sort(Cmp, GroupedDocs) ). @@ -282,11 +294,11 @@ maybe_tag_doc(#doc{id = Id, revs = {Pos, [_Rev | PrevRevs]}, meta = Meta0} = Doc Doc#doc{meta = [{ref, Key} | Meta0]} end. -merge_updates([[{_, #doc{id = X}} | _] = A | RestA], [[{_, #doc{id = X}} | _] = B | RestB]) -> +merge_updates([[{_, #doc{id = X}, _} | _] = A | RestA], [[{_, #doc{id = X}, _} | _] = B | RestB]) -> [A ++ B | merge_updates(RestA, RestB)]; -merge_updates([[{_, #doc{id = X}} | _] | _] = A, [[{_, #doc{id = Y}} | _] | _] = B) when X < Y -> +merge_updates([[{_, #doc{id = X}, _} | _] | _] = A, [[{_, #doc{id = Y}, _} | _] | _] = B) when X < Y -> [hd(A) | merge_updates(tl(A), B)]; -merge_updates([[{_, #doc{id = X}} | _] | _] = A, [[{_, #doc{id = Y}} | _] | _] = B) when X > Y -> +merge_updates([[{_, #doc{id = X}, _} | _] | _] = A, [[{_, #doc{id = Y}, _} | _] | _] = B) when X > Y -> [hd(B) | merge_updates(A, tl(B))]; merge_updates([], RestB) -> RestB; @@ -299,12 +311,12 @@ collect_updates(GroupedDocsAcc, ClientsAcc, ReplicatedChanges) -> % local docs. It's easier to just avoid multiple _local doc % updaters than deal with their possible conflicts, and local docs % writes are relatively rare. Can be optmized later if really needed. - {update_docs, Client, GroupedDocs, [], ReplicatedChanges} -> + {update_docs, Client, GroupedDocs, [], ReplicatedChanges, UserCtx} -> case ReplicatedChanges of true -> couch_stats:increment_counter([couchdb, coalesced_updates, replicated]); false -> couch_stats:increment_counter([couchdb, coalesced_updates, interactive]) end, - GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs), + GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs, UserCtx), GroupedDocsAcc2 = merge_updates(GroupedDocsAcc, GroupedDocs2), collect_updates( @@ -503,7 +515,7 @@ merge_rev_trees([NewDocs | RestDocsList], [OldDocInfo | RestOldInfo], Acc) -> % Track doc ids so we can debug large revision trees erlang:put(last_id_merged, OldDocInfo#full_doc_info.id), NewDocInfo0 = lists:foldl( - fun({Client, NewDoc}, OldInfoAcc) -> + fun({Client, NewDoc, _UserCtx}, OldInfoAcc) -> NewInfo = merge_rev_tree(OldInfoAcc, NewDoc, Client, ReplicatedChanges), case is_overflowed(NewInfo, OldInfoAcc, FullPartitions) of true when not ReplicatedChanges -> @@ -600,7 +612,8 @@ merge_rev_tree(OldInfo, NewDoc, Client, false) when send_result(Client, NewDoc, {ok, {OldPos + 1, NewRevId}}), OldInfo#full_doc_info{ rev_tree = NewTree1, - deleted = false + deleted = false, + access = NewDoc#doc.access }; _ -> throw(doc_recreation_failed) @@ -621,7 +634,8 @@ merge_rev_tree(OldInfo, NewDoc, Client, false) -> {NewTree, new_leaf} when not NewDeleted -> OldInfo#full_doc_info{ rev_tree = NewTree, - deleted = false + deleted = false, + access = NewDoc#doc.access }; {NewTree, new_leaf} when NewDeleted -> % We have to check if we just deleted this @@ -629,7 +643,8 @@ merge_rev_tree(OldInfo, NewDoc, Client, false) -> % resolution. OldInfo#full_doc_info{ rev_tree = NewTree, - deleted = couch_doc:is_deleted(NewTree) + deleted = couch_doc:is_deleted(NewTree), + access = NewDoc#doc.access }; _ -> send_result(Client, NewDoc, conflict), @@ -671,29 +686,25 @@ maybe_stem_full_doc_info(#full_doc_info{rev_tree = Tree} = Info, Limit) -> end. -update_docs_int(Db, DocsList, LocalDocs, ReplicatedChanges, UserCtx) -> +update_docs_int(Db, DocsList, LocalDocs, ReplicatedChanges) -> UpdateSeq = couch_db_engine:get_update_seq(Db), RevsLimit = couch_db_engine:get_revs_limit(Db), - Ids = [Id || [{_Client, #doc{id = Id}} | _] <- DocsList], - % TODO: maybe a perf hit, instead of zip3-ing existing Accesses into - % our doc lists, maybe find 404 docs differently down in - % validate_docs_access (revs is [], which we can then use - % to skip validation as we know it is the first doc rev) - Accesses = [Access || [{_Client, #doc{access = Access}} | _] <- DocsList], + Ids = [Id || [{_Client, #doc{id = Id}, _} | _] <- DocsList], + % % TODO: maybe combine these comprehensions, so we do not loop twice + % Accesses = [Access || [{_Client, #doc{access = Access}, _} | _] <- DocsList], % lookup up the old documents, if they exist. OldDocLookups = couch_db_engine:open_docs(Db, Ids), - OldDocInfos = lists:zipwith3( + OldDocInfos = lists:zipwith( fun - (_Id, #full_doc_info{} = FDI, _Access) -> + (_Id, #full_doc_info{} = FDI) -> FDI; - (Id, not_found, Access) -> - #full_doc_info{id = Id, access = Access} + (Id, not_found) -> + #full_doc_info{id = Id} end, Ids, - OldDocLookups, - Accesses + OldDocLookups ), %% Get the list of full partitions @@ -737,7 +748,7 @@ update_docs_int(Db, DocsList, LocalDocs, ReplicatedChanges, UserCtx) -> %. and don’t add to DLV, nor ODI {DocsListValidated, OldDocInfosValidated} = validate_docs_access( - Db, UserCtx, DocsList, OldDocInfos + Db, DocsList, OldDocInfos ), {ok, AccOut} = merge_rev_trees(DocsListValidated, OldDocInfosValidated, AccIn), @@ -750,7 +761,7 @@ update_docs_int(Db, DocsList, LocalDocs, ReplicatedChanges, UserCtx) -> % the trees, the attachments are already written to disk) {ok, IndexFDIs} = flush_trees(Db, NewFullDocInfos, []), Pairs = pair_write_info(OldDocLookups, IndexFDIs), - LocalDocs1 = apply_local_docs_access(Db, LocalDocs), + LocalDocs1 = apply_local_docs_access(Db, LocalDocs), % TODO: local docs acess needs validating LocalDocs2 = update_local_doc_revs(LocalDocs1), {ok, Db1} = couch_db_engine:write_doc_infos(Db, Pairs, LocalDocs2), @@ -780,28 +791,30 @@ update_docs_int(Db, DocsList, LocalDocs, ReplicatedChanges, UserCtx) -> {ok, commit_data(Db1), UpdatedDDocIds}. % at this point, we already validated this Db is access enabled, so do the checks right away. -check_access(Db, UserCtx, Access) -> couch_db:check_access(Db#db{user_ctx = UserCtx}, Access). +check_access(Db, UserCtx, Access) -> + couch_db:check_access(Db#db{user_ctx = UserCtx}, Access). -validate_docs_access(Db, UserCtx, DocsList, OldDocInfos) -> +validate_docs_access(Db, DocsList, OldDocInfos) -> case couch_db:has_access_enabled(Db) of - true -> validate_docs_access_int(Db, UserCtx, DocsList, OldDocInfos); + true -> validate_docs_access_int(Db, DocsList, OldDocInfos); _Else -> {DocsList, OldDocInfos} end. -validate_docs_access_int(Db, UserCtx, DocsList, OldDocInfos) -> - validate_docs_access(Db, UserCtx, DocsList, OldDocInfos, [], []). +validate_docs_access_int(Db, DocsList, OldDocInfos) -> + validate_docs_access(Db, DocsList, OldDocInfos, [], []). -validate_docs_access(_Db, _UserCtx, [], [], DocsListValidated, OldDocInfosValidated) -> +validate_docs_access(_Db, [], [], DocsListValidated, OldDocInfosValidated) -> + % TODO: check if need to reverse this? maybe this is the cause of the test reverse issue? {lists:reverse(DocsListValidated), lists:reverse(OldDocInfosValidated)}; validate_docs_access( - Db, UserCtx, [Docs | DocRest], [OldInfo | OldInfoRest], DocsListValidated, OldDocInfosValidated + Db, [Docs | DocRest], [OldInfo | OldInfoRest], DocsListValidated, OldDocInfosValidated ) -> % loop over Docs as {Client, NewDoc} % validate Doc % if valid, then put back in Docs % if not, then send_result and skip NewDocs = lists:foldl( - fun({Client, Doc}, Acc) -> + fun({Client, Doc, UserCtx}, Acc) -> % check if we are allowed to update the doc, skip when new doc OldDocMatchesAccess = case OldInfo#full_doc_info.rev_tree of @@ -810,11 +823,12 @@ validate_docs_access( end, NewDocMatchesAccess = check_access(Db, UserCtx, Doc#doc.access), + case OldDocMatchesAccess andalso NewDocMatchesAccess of % if valid, then send to DocsListValidated, OldDocsInfo true -> % and store the access context on the new doc - [{Client, Doc} | Acc]; + [{Client, Doc, UserCtx} | Acc]; % if invalid, then send_result tagged `access`(c.f. `conflict) false -> % and don’t add to DLV, nor ODI @@ -827,7 +841,7 @@ validate_docs_access( ), {NewDocsListValidated, NewOldDocInfosValidated} = - case length(NewDocs) of + case length(NewDocs) of %TODO: what if only 2/3? % we sent out all docs as invalid access, drop the old doc info associated with it 0 -> {[NewDocs | DocsListValidated], OldDocInfosValidated}; @@ -835,7 +849,7 @@ validate_docs_access( {[NewDocs | DocsListValidated], [OldInfo | OldDocInfosValidated]} end, validate_docs_access( - Db, UserCtx, DocRest, OldInfoRest, NewDocsListValidated, NewOldDocInfosValidated + Db, DocRest, OldInfoRest, NewDocsListValidated, NewOldDocInfosValidated ). apply_local_docs_access(Db, Docs) -> diff --git a/src/couch/test/eunit/couchdb_access_tests.erl b/src/couch/test/eunit/couchdb_access_tests.erl index a2440f9fe..59789a819 100644 --- a/src/couch/test/eunit/couchdb_access_tests.erl +++ b/src/couch/test/eunit/couchdb_access_tests.erl @@ -13,7 +13,6 @@ -module(couchdb_access_tests). -include_lib("couch/include/couch_eunit.hrl"). --include_lib("couch/include/couch_db.hrl"). -define(CONTENT_JSON, {"Content-Type", "application/json"}). -define(ADMIN_REQ_HEADERS, [?CONTENT_JSON, {basic_auth, {"a", "a"}}]). @@ -48,10 +47,10 @@ after_each(_, Url) -> before_all() -> Couch = test_util:start_couch([chttpd, couch_replicator]), Hashed = couch_passwords:hash_admin_password("a"), - ok = config:set("admins", "a", binary_to_list(Hashed), _Persist = false), - ok = config:set("couchdb", "uuid", "21ac467c1bc05e9d9e9d2d850bb1108f", _Persist = false), - ok = config:set("log", "level", "debug", _Persist = false), - ok = config:set("per_doc_access", "enabled", "true", _Persist = false), + ok = config:set("admins", "a", binary_to_list(Hashed), false), + ok = config:set("couchdb", "uuid", "21ac467c1bc05e9d9e9d2d850bb1108f", false), + ok = config:set("log", "level", "debug", false), + ok = config:set("per_doc_access", "enabled", "true", false), % cleanup and setup {ok, _, _, _} = test_request:delete(url() ++ "/db", ?ADMIN_REQ_HEADERS), @@ -79,64 +78,63 @@ after_all(_) -> access_test_() -> Tests = [ % Server config - fun performance_regression/2 -% fun should_not_let_create_access_db_if_disabled/2, -% -% % Doc creation -% fun should_not_let_anonymous_user_create_doc/2, -% fun should_let_admin_create_doc_with_access/2, -% fun should_let_admin_create_doc_without_access/2, -% fun should_let_user_create_doc_for_themselves/2, -% fun should_not_let_user_create_doc_for_someone_else/2, -% fun should_let_user_create_access_ddoc/2, -% fun access_ddoc_should_have_no_effects/2, -% -% % Doc updates -% fun users_with_access_can_update_doc/2, -% fun users_without_access_can_not_update_doc/2, -% fun users_with_access_can_not_change_access/2, -% fun users_with_access_can_not_remove_access/2, -% -% % Doc reads -% fun should_let_admin_read_doc_with_access/2, -% fun user_with_access_can_read_doc/2, -% fun user_without_access_can_not_read_doc/2, -% fun user_can_not_read_doc_without_access/2, -% fun admin_with_access_can_read_conflicted_doc/2, -% fun user_with_access_can_not_read_conflicted_doc/2, -% -% % Doc deletes -% fun should_let_admin_delete_doc_with_access/2, -% fun should_let_user_delete_doc_for_themselves/2, -% fun should_not_let_user_delete_doc_for_someone_else/2, -% -% % _all_docs with include_docs -% fun should_let_admin_fetch_all_docs/2, -% fun should_let_user_fetch_their_own_all_docs/2, -% -% % _changes -% fun should_let_admin_fetch_changes/2, -% fun should_let_user_fetch_their_own_changes/2, -% -% % views -% fun should_not_allow_admin_access_ddoc_view_request/2, -% fun should_not_allow_user_access_ddoc_view_request/2, -% fun should_allow_admin_users_access_ddoc_view_request/2, -% fun should_allow_user_users_access_ddoc_view_request/2, -% -% % replication -% fun should_allow_admin_to_replicate_from_access_to_access/2, -% fun should_allow_admin_to_replicate_from_no_access_to_access/2, -% fun should_allow_admin_to_replicate_from_access_to_no_access/2, -% fun should_allow_admin_to_replicate_from_no_access_to_no_access/2, -% % -% fun should_allow_user_to_replicate_from_access_to_access/2, -% fun should_allow_user_to_replicate_from_access_to_no_access/2, -% fun should_allow_user_to_replicate_from_no_access_to_access/2, -% fun should_allow_user_to_replicate_from_no_access_to_no_access/2, -% -% % _revs_diff for docs you don’t have access to -% fun should_not_allow_user_to_revs_diff_other_docs/2 + fun should_not_let_create_access_db_if_disabled/2, + + % Doc creation + fun should_not_let_anonymous_user_create_doc/2, + fun should_let_admin_create_doc_with_access/2, + fun should_let_admin_create_doc_without_access/2, + fun should_let_user_create_doc_for_themselves/2, + fun should_not_let_user_create_doc_for_someone_else/2, + fun should_let_user_create_access_ddoc/2, + % fun access_ddoc_should_have_no_effects/2, + + % Doc updates + fun users_with_access_can_update_doc/2, + fun users_without_access_can_not_update_doc/2, + fun users_with_access_can_not_change_access/2, + fun users_with_access_can_not_remove_access/2, + + % Doc reads + fun should_let_admin_read_doc_with_access/2, + fun user_with_access_can_read_doc/2, + fun user_without_access_can_not_read_doc/2, + fun user_can_not_read_doc_without_access/2, + fun admin_with_access_can_read_conflicted_doc/2, + % fun user_with_access_can_not_read_conflicted_doc/2, + + % Doc deletes + fun should_let_admin_delete_doc_with_access/2, + fun should_let_user_delete_doc_for_themselves/2, + fun should_not_let_user_delete_doc_for_someone_else/2, + + % _all_docs with include_docs + fun should_let_admin_fetch_all_docs/2, + fun should_let_user_fetch_their_own_all_docs/2, + + % _changes + fun should_let_admin_fetch_changes/2, + fun should_let_user_fetch_their_own_changes/2, + + % views + fun should_not_allow_admin_access_ddoc_view_request/2, + fun should_not_allow_user_access_ddoc_view_request/2, + fun should_allow_admin_users_access_ddoc_view_request/2, + fun should_allow_user_users_access_ddoc_view_request/2, + + % replication + fun should_allow_admin_to_replicate_from_access_to_access/2, + fun should_allow_admin_to_replicate_from_no_access_to_access/2, + fun should_allow_admin_to_replicate_from_access_to_no_access/2, + fun should_allow_admin_to_replicate_from_no_access_to_no_access/2, + + fun should_allow_user_to_replicate_from_access_to_access/2, + fun should_allow_user_to_replicate_from_access_to_no_access/2, + fun should_allow_user_to_replicate_from_no_access_to_access/2, + fun should_allow_user_to_replicate_from_no_access_to_no_access/2, + + % _revs_diff for docs you don’t have access to + fun should_not_allow_user_to_revs_diff_other_docs/2 % TODO: create test db with role and not _users in _security.members % and make sure a user in that group can access while a user not @@ -151,7 +149,7 @@ access_test_() -> fun before_all/0, fun after_all/1, [ - make_test_cases(clustered, Tests) + make_test_cases(basic, Tests) ] } }. @@ -162,36 +160,6 @@ make_test_cases(Mod, Funs) -> {foreachx, fun before_each/1, fun after_each/2, [{Mod, Fun} || Fun <- Funs]} }. - -performance_regression(_PortType, _Url) -> - DbName = ?tempdb(), - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX, overwrite]), - Result = - try - T=erlang:system_time(second), - eprof:start(), - eprof:log("/tmp/eprof-" ++ integer_to_list(T) ++ ".log"), - eprof:profile(fun() -> - Update = fun(Iter) -> - Doc = couch_doc:from_json_obj( - {[ - {<<"_id">>, integer_to_binary(Iter)}, - {<<"value">>, 1} - ]} - ), - couch_db:update_doc(Db, Doc, []) - end, - lists:foreach(Update, lists:seq(0, 20000)) - end), - eprof:analyze() - catch - _:Error -> - Error - end, - ok = couch_db:close(Db), - ?debugFmt("~nResult: ~p~n", [Result]), - ?_assertEqual(ok, Result). - % Doc creation % http://127.0.0.1:64903/db/a?revs=true&open_revs=%5B%221-23202479633c2b380f79507a776743d5%22%5D&latest=true @@ -206,9 +174,9 @@ performance_regression(_PortType, _Url) -> % should_not_let_create_access_db_if_disabled(_PortType, Url) -> - ok = config:set("per_doc_access", "enabled", "false", _Persist = false), + ok = config:set("per_doc_access", "enabled", "false", false), {ok, Code, _, _} = test_request:put(url() ++ "/db?q=1&n=1&access=true", ?ADMIN_REQ_HEADERS, ""), - ok = config:set("per_doc_access", "enabled", "true", _Persist = false), + ok = config:set("per_doc_access", "enabled", "true", false), ?_assertEqual(400, Code). should_not_let_anonymous_user_create_doc(_PortType, Url) -> @@ -276,7 +244,7 @@ access_ddoc_should_have_no_effects(_PortType, Url) -> Ddoc ), ?assertEqual(201, Code), - {ok, Code1, _, _} = test_request:put( + {ok, Code1, _, B} = test_request:put( Url ++ "/db/b", ?USERX_REQ_HEADERS, "{\"a\":1,\"_access\":[\"x\"]}" @@ -403,22 +371,27 @@ user_with_access_can_read_doc(_PortType, Url) -> ), ?_assertEqual(200, Code). -user_with_access_can_not_read_conflicted_doc(_PortType, Url) -> - {ok, 201, _, _} = test_request:put( - Url ++ "/db/a", - ?ADMIN_REQ_HEADERS, - "{\"_id\":\"f1\",\"a\":1,\"_access\":[\"x\"]}" - ), - {ok, 201, _, _} = test_request:put( - Url ++ "/db/a?new_edits=false", - ?ADMIN_REQ_HEADERS, - "{\"_id\":\"f1\",\"_rev\":\"7-XYZ\",\"a\":1,\"_access\":[\"x\"]}" - ), - {ok, Code, _, _} = test_request:get( - Url ++ "/db/a", - ?USERX_REQ_HEADERS - ), - ?_assertEqual(403, Code). +% TODO: induce conflict with two different _access users per rev +% could be comiing from a split-brain scenario +% whoever ends up winner can read the doc, but not the leaf +% that doesn’t belong to them +% whoever loses can only request their leaf +% user_with_access_can_not_read_conflicted_doc(_PortType, Url) -> +% {ok, 201, _, _} = test_request:put( +% Url ++ "/db/a", +% ?ADMIN_REQ_HEADERS, +% "{\"_id\":\"f1\",\"a\":1,\"_access\":[\"x\"]}" +% ), +% {ok, 201, _, _} = test_request:put( +% Url ++ "/db/a?new_edits=false", +% ?ADMIN_REQ_HEADERS, +% "{\"_id\":\"f1\",\"_rev\":\"7-XYZ\",\"a\":1,\"_access\":[\"x\"]}" +% ), +% {ok, Code, _, _} = test_request:get( +% Url ++ "/db/a", +% ?USERX_REQ_HEADERS +% ), +% ?_assertEqual(403, Code). admin_with_access_can_read_conflicted_doc(_PortType, Url) -> {ok, 201, _, _} = test_request:put( @@ -1503,5 +1476,5 @@ port() -> % {ok, 200, _, Body} = test_request:get(Url ++ "/db/_all_docs?include_docs=true", % ?USERX_REQ_HEADERS), % {Json} = jiffy:decode(Body), -% ?debugFmt("~nHSOIN: ~p~n", [Json]), % ?_assertEqual(3, length(proplists:get_value(<<"rows">>, Json))). +% ?debugFmt("~nHSOIN: ~p~n", [Json]), diff --git a/src/couch/test/eunit/couchdb_update_conflicts_tests.erl b/src/couch/test/eunit/couchdb_update_conflicts_tests.erl index f6d31e294..aa2015af3 100644 --- a/src/couch/test/eunit/couchdb_update_conflicts_tests.erl +++ b/src/couch/test/eunit/couchdb_update_conflicts_tests.erl @@ -18,9 +18,8 @@ -define(i2l(I), integer_to_list(I)). -define(DOC_ID, <<"foobar">>). -define(LOCAL_DOC_ID, <<"_local/foobar">>). -% TODO: enable 1000, 2000, 5000, 10000]). --define(NUM_CLIENTS, [1000]). --define(TIMEOUT, 200000). +-define(NUM_CLIENTS, [1000, 2000, 5000, 10000]). +-define(TIMEOUT, 20000). start() -> test_util:start_couch(). @@ -55,8 +54,8 @@ view_indexes_cleanup_test_() -> fun start/0, fun test_util:stop_couch/1, [ - concurrent_updates()%, - % bulk_docs_updates() + concurrent_updates(), + bulk_docs_updates() ] } }. @@ -69,26 +68,26 @@ concurrent_updates() -> fun setup/1, fun teardown/2, [ - {NumClients, fun should_concurrently_update_doc/2} + {NumClients, fun should_concurrently_update_doc/2} || NumClients <- ?NUM_CLIENTS ] } }. -% bulk_docs_updates() -> -% { -% "Bulk docs updates", -% { -% foreach, -% fun setup/0, -% fun teardown/1, -% [ -% fun should_bulk_create_delete_doc/1, -% fun should_bulk_create_local_doc/1, -% fun should_ignore_invalid_local_doc/1 -% ] -% } -% }. +bulk_docs_updates() -> + { + "Bulk docs updates", + { + foreach, + fun setup/0, + fun teardown/1, + [ + fun should_bulk_create_delete_doc/1, + fun should_bulk_create_local_doc/1, + fun should_ignore_invalid_local_doc/1 + ] + } + }. should_concurrently_update_doc(NumClients, {DbName, InitRev}) -> { @@ -101,22 +100,16 @@ should_concurrently_update_doc(NumClients, {DbName, InitRev}) -> ]} }. -% should_bulk_create_delete_doc({DbName, InitRev}) -> -% ?_test(bulk_delete_create(DbName, InitRev)). -% -% should_bulk_create_local_doc({DbName, _}) -> -% ?_test(bulk_create_local_doc(DbName)). -% -% should_ignore_invalid_local_doc({DbName, _}) -> -% ?_test(ignore_invalid_local_doc(DbName)). +should_bulk_create_delete_doc({DbName, InitRev}) -> + ?_test(bulk_delete_create(DbName, InitRev)). -concurrent_doc_update(NumClients, DbName, InitRev) -> - eprof:start(), - eprof:log("/tmp/eprof1.log"), - eprof:profile(fun() -> concurrent_doc_update1(NumClients, DbName, InitRev) end), - eprof:analyze(). +should_bulk_create_local_doc({DbName, _}) -> + ?_test(bulk_create_local_doc(DbName)). -concurrent_doc_update1(NumClients, DbName, InitRev) -> +should_ignore_invalid_local_doc({DbName, _}) -> + ?_test(ignore_invalid_local_doc(DbName)). + +concurrent_doc_update(NumClients, DbName, InitRev) -> Clients = lists:map( fun(Value) -> ClientDoc = couch_doc:from_json_obj( @@ -343,9 +336,8 @@ spawn_client(DbName, Doc) -> go -> ok end, erlang:yield(), - Result = - try - couch_db:update_doc(Db, Doc, []) + Result = try + couch_db:update_doc(Db, Doc, []) catch _:Error -> Error
