From ea75748b2832c4067f545fd7817b7cd8fd840b7e Mon Sep 17 00:00:00 2001 From: Kent Pitman Date: Thu, 25 Mar 2021 12:55:28 -0400 Subject: [PATCH 1/7] Updated renderers and some related changes. --- Makefile | 27 +- pyproject.toml | 2 +- src/encoded/loadxl.py | 2 + src/encoded/renderers.py | 202 ++++++++++----- src/encoded/tests/conftest.py | 8 +- src/encoded/tests/conftest_settings.py | 33 +++ .../tests/data/workbook-inserts/page.json | 37 +++ .../data/workbook-inserts/static_section.json | 22 +- src/encoded/tests/datafixtures.py | 32 +-- src/encoded/tests/test_create_mapping.py | 2 +- src/encoded/tests/test_embedding.py | 2 +- src/encoded/tests/test_fixtures.py | 54 ++-- src/encoded/tests/test_indexing.py | 30 ++- src/encoded/tests/test_purge_item_type.py | 9 +- src/encoded/tests/test_search.py | 88 ++++--- src/encoded/tests/test_static_page.py | 240 ++++++++---------- src/encoded/tests/test_validation_errors.py | 1 + src/encoded/tests/test_views.py | 2 +- src/encoded/tests/workbook_fixtures.py | 10 +- 19 files changed, 483 insertions(+), 320 deletions(-) create mode 100644 src/encoded/tests/data/workbook-inserts/page.json diff --git a/Makefile b/Makefile index 09d4ea9a6e..a14e61e3ba 100644 --- a/Makefile +++ b/Makefile @@ -95,11 +95,17 @@ deploy2: # spins up waittress to serve the application pserve development.ini psql-dev: # starts psql with the url after 'sqlalchemy.url =' in development.ini - @psql `grep 'sqlalchemy[.]url =' development.ini | sed -E 's/^.* = (.*)/\1/'` + @scripts/psql-start dev -kibana-start: +psql-test: # starts psql with a url constructed from data in 'ps aux'. + @scripts/psql-start test + +kibana-start: # starts a dev version of kibana (default port) scripts/kibana-start +kibana-start-test: # starts a test version of kibana (port chosen for active tests) + scripts/kibana-start test + kibana-stop: scripts/kibana-stop @@ -117,19 +123,27 @@ test: make test-unit make test-npm +retest: + bin/test -vv --last-failed + test-any: bin/test -vv --timeout=200 - test-npm: bin/test -vv --timeout=200 -m "working and not manual and not integratedx and not performance and not broken and not sloppy and workbook" test-unit: bin/test -vv --timeout=200 -m "working and not manual and not integratedx and not performance and not broken and not sloppy and not workbook" +test-performance: + bin/test -vv --timeout=200 -m "working and not manual and not integratedx and performance and not broken and not sloppy" + +test-integrated: + bin/test -vv --timeout=200 -m "working and not manual and (integrated or integratedx) and not performance and not broken and not sloppy" + travis-test: # Actually, we don't normally use this. Instead the GA workflow sets up two parallel tests. - make travis-test-npm make travis-test-unit + make travis-test-npm travis-test-npm: # Note this only does the 'not indexing' tests bin/test -vv --force-flaky --max-runs=3 --timeout=400 -m "working and not manual and not integratedx and not performance and not broken and not sloppy and workbook" --aws-auth --durations=10 --cov src/encoded --es search-fourfront-testing-6-8-kncqa2za2r43563rkcmsvgn2fq.us-east-1.es.amazonaws.com:443 @@ -154,11 +168,14 @@ info: $(info - Use 'make configure' to install poetry. You should not have to do this directly.) $(info - Use 'make deploy1' to spin up postgres/elasticsearch and load inserts.) $(info - Use 'make deploy2' to spin up the application server.) - $(info - Use 'make kibana-start' to start kibana, and 'make kibana-stop' to stop it.) + $(info - Use 'make kibana-start' to start kibana on the default local ES port, and 'make kibana-stop' to stop it.) + $(info - Use 'make kibana-start-test' to start kibana on the port being used for active testing, and 'make kibana-stop' to stop it.) $(info - Use 'make kill' to kill postgres and elasticsearch proccesses. Please use with care.) $(info - Use 'make moto-setup' to install moto, for less flaky tests. Implied by 'make build'.) $(info - Use 'make npm-setup' to build the front-end. Implied by 'make build'.) $(info - Use 'make psql-dev' to start psql on data associated with an active 'make deploy1'.) + $(info - Use 'make psql-test' to start psql on data associated with an active test.) + $(info - Use 'make retest' to run failing tests from the previous test run.) $(info - Use 'make test' to run tests with normal options similar to what we use on GitHub Actions.) $(info - Use 'make test-any' to run tests without marker constraints (i.e., with no '-m' option).) $(info - Use 'make update' to update dependencies (and the lock file).) diff --git a/pyproject.toml b/pyproject.toml index cd0787ae16..2caa525cad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [tool.poetry] # Note: Various modules refer to this system as "encoded", not "fourfront". name = "encoded" -version = "2.5.7" +version = "2.5.8" description = "4DN-DCIC Fourfront" authors = ["4DN-DCIC Team "] license = "MIT" diff --git a/src/encoded/loadxl.py b/src/encoded/loadxl.py index 0072a30bd5..b6510963d8 100644 --- a/src/encoded/loadxl.py +++ b/src/encoded/loadxl.py @@ -9,6 +9,7 @@ import webtest from base64 import b64encode +from dcicutils.misc_utils import ignored from PIL import Image from pkg_resources import resource_filename from pyramid.paster import get_app @@ -97,6 +98,7 @@ def load_data_view(context, request): 2) store in form of {'item_type': [items], 'item_type2': [items]} item_type should be same as insert file names i.e. file_fastq """ + ignored(context) # this is a bit wierd but want to reuse load_data functionality so I'm rolling with it config_uri = request.json.get('config_uri', 'production.ini') patch_only = request.json.get('patch_only', False) diff --git a/src/encoded/renderers.py b/src/encoded/renderers.py index cb71afa9bb..744ee9a417 100644 --- a/src/encoded/renderers.py +++ b/src/encoded/renderers.py @@ -4,35 +4,33 @@ import psutil import time -from pkg_resources import resource_filename -from urllib.parse import urlencode, urlparse +from dcicutils.misc_utils import environ_bool, PRINT, ignored from functools import lru_cache +from pkg_resources import resource_filename from pyramid.events import BeforeRender, subscriber from pyramid.httpexceptions import ( HTTPMovedPermanently, HTTPPreconditionFailed, HTTPUnauthorized, - # HTTPForbidden, HTTPUnsupportedMediaType, HTTPNotAcceptable, HTTPServerError ) -# from pyramid.security import forget +from pyramid.response import Response from pyramid.settings import asbool from pyramid.threadlocal import manager -from pyramid.response import Response from pyramid.traversal import split_path_info, _join_path_tuple -# from snovault.validation import CSRFTokenError -# from subprocess_middleware.tween import SubprocessTween from subprocess_middleware.worker import TransformWorker +from urllib.parse import urlencode, urlparse from webob.cookies import Cookie +from .util import content_type_allowed log = logging.getLogger(__name__) def includeme(config): - ''' + """ Can get tween ordering by executing the following on command-line from root dir: `bin/ptween development.ini` @@ -67,13 +65,15 @@ def includeme(config): This means that if handler(request) is called, then the downstream tweens are acted upon it, until response is returned. It's an ONION! - ''' + """ config.add_tween('.renderers.validate_request_tween_factory', under='snovault.stats.stats_tween_factory') - # DISABLED - .add_tween('.renderers.remove_expired_session_cookies_tween_factory', under='.renderers.validate_request_tween_factory') + # DISABLED - .add_tween('.renderers.remove_expired_session_cookies_tween_factory', + # under='.renderers.validate_request_tween_factory') config.add_tween('.renderers.render_page_html_tween_factory', under='.renderers.validate_request_tween_factory') - # The above tweens, when using response (= `handler(request)`) act on the _transformed_ response (containing HTML body). + # The above tweens, when using response (= `handler(request)`) act on the _transformed_ response + # (containing HTML body). # The below tweens run _before_ the JS rendering. Responses in these tweens have not been transformed to HTML yet. config.add_tween('.renderers.set_response_headers_tween_factory', under='.renderers.render_page_html_tween_factory') @@ -93,6 +93,7 @@ def validate_request_tween_factory(handler, registry): Apache config: SetEnvIf Request_Method HEAD X_REQUEST_METHOD=HEAD """ + ignored(registry) def validate_request_tween(request): @@ -107,19 +108,18 @@ def validate_request_tween(request): # Includes page text/html requests. return handler(request) - elif request.content_type != 'application/json': - if request.content_type == 'application/x-www-form-urlencoded' and request.path[0:10] == '/metadata/': - # Special case to allow us to POST to metadata TSV requests via form submission - return handler(request) + elif content_type_allowed(request): + return handler(request) + + else: detail = "Request content type %s is not 'application/json'" % request.content_type raise HTTPUnsupportedMediaType(detail) - return handler(request) - return validate_request_tween def security_tween_factory(handler, registry): + ignored(registry) def security_tween(request): """ @@ -132,7 +132,7 @@ def security_tween(request): """ expected_user = request.headers.get('X-If-Match-User') - if expected_user is not None: # Not sure when this is the case + if expected_user is not None: # Not sure when this is the case if request.authenticated_userid != 'mailto.' + expected_user: detail = 'X-If-Match-User does not match' raise HTTPPreconditionFailed(detail) @@ -147,15 +147,18 @@ def security_tween(request): raise HTTPUnauthorized( title="No Access", comment="Invalid Authorization header or Auth Challenge response.", - headers={'WWW-Authenticate': "Bearer realm=\"{}\"; Basic realm=\"{}\"".format(request.domain, request.domain) } + headers={ + 'WWW-Authenticate': ("Bearer realm=\"{}\"; Basic realm=\"{}\"" + .format(request.domain, request.domain)) + } ) - if hasattr(request, 'auth0_expired'): # Add some security-related headers on the up-swing response = handler(request) if request.auth0_expired: - #return response + # return response + # # If have the attribute and it is true, then our session has expired. # This is true for both AJAX requests (which have request.authorization) & browser page # requests (which have cookie); both cases handled in authentication.py @@ -176,7 +179,10 @@ def security_tween(request): path='/' ) # = Same as response.delete_cookie(..) response.status_code = 401 - response.headers['WWW-Authenticate'] = "Bearer realm=\"{}\", title=\"Session Expired\"; Basic realm=\"{}\"".format(request.domain, request.domain) + response.headers['WWW-Authenticate'] = ( + "Bearer realm=\"{}\", title=\"Session Expired\"; Basic realm=\"{}\"" + .format(request.domain, request.domain) + ) else: # We have JWT and it's not expired. Add 'X-Request-JWT' & 'X-User-Info' header. # For performance, only do it if should transform to HTML as is not needed on every request. @@ -188,9 +194,10 @@ def security_tween(request): # This header is parsed in renderer.js, or, more accurately, # by libs/react-middleware.js which is imported by server.js and compiled into # renderer.js. Is used to get access to User Info on initial web page render. - response.headers['X-Request-JWT'] = request.cookies.get('jwtToken','') - user_info = request.user_info.copy() # Re-ified property set in authentication.py - del user_info["id_token"] # Redundant - don't need this in SSR nor browser as get from X-Request-JWT. + response.headers['X-Request-JWT'] = request.cookies.get('jwtToken', '') + user_info = request.user_info.copy() # Re-ified property set in authentication.py + # Redundant - don't need this in SSR nor browser as get from X-Request-JWT. + del user_info["id_token"] response.headers['X-User-Info'] = json.dumps(user_info) else: response.headers['X-Request-JWT'] = "null" @@ -203,20 +210,22 @@ def security_tween(request): # requests from Authorization header which acts like a CSRF token. # See authentication.py - get_jwt() - #token = request.headers.get('X-CSRF-Token') - #if token is not None: - # # Avoid dirtying the session and adding a Set-Cookie header - # # XXX Should consider if this is a good idea or not and timeouts - # if token == dict.get(request.session, '_csrft_', None): - # return handler(request) - # raise CSRFTokenError('Incorrect CSRF token') + # Alex notes that we do not use request.session so this is probably very old. -kmp 4-Mar-2021 + + # token = request.headers.get('X-CSRF-Token') + # if token is not None: + # # Avoid dirtying the session and adding a Set-Cookie header + # # XXX Should consider if this is a good idea or not and timeouts + # if token == dict.get(request.session, '_csrft_', None): + # return handler(request) + # raise CSRFTokenError('Incorrect CSRF token') # raise CSRFTokenError('Missing CSRF token') return security_tween def remove_expired_session_cookies_tween_factory(handler, registry): - ''' + """ CURRENTLY DISABLED Original purpose of this was to remove expired (session?) cookies. See: https://github.com/ENCODE-DCC/encoded/commit/75854803c99e5044a6a33aedb3a79d750481b6cd#diff-bc19a9793a1b3b4870cff50e7c7c9bd1R135 @@ -224,7 +233,8 @@ def remove_expired_session_cookies_tween_factory(handler, registry): We disable it for now via removing from tween chain as are using JWT tokens and handling their removal in security_tween_factory & authentication.py as well as client-side (upon "Logout" action). If needed for some reason, can re-enable. - ''' + """ # noQA - not going to break the long URL line above + ignored(registry) ignore = { '/favicon.ico', @@ -235,8 +245,8 @@ def remove_expired_session_cookies_tween(request): return handler(request) session = request.session - #if session or session._cookie_name not in request.cookies: - # return handler(request) + # if session or session._cookie_name not in request.cookies: + # return handler(request) response = handler(request) # Below seems to be empty always; though we do have some in request.cookies @@ -260,7 +270,9 @@ def remove_expired_session_cookies_tween(request): def set_response_headers_tween_factory(handler, registry): - '''Add additional response headers here''' + """Add additional response headers here""" + ignored(registry) + def set_response_headers_tween(request): response = handler(request) response.headers['X-Request-URL'] = request.url @@ -316,15 +328,80 @@ def canonical_redirect(event): raise HTTPMovedPermanently(location=location, detail="Redirected from " + str(request.path_info)) +# Web browsers send an Accept request header for initial (e.g. non-AJAX) page requests +# which should contain 'text/html' +MIME_TYPE_HTML = 'text/html' +MIME_TYPE_JSON = 'application/json' +MIME_TYPE_LD_JSON = 'application/ld+json' + +MIME_TYPES_SUPPORTED = [MIME_TYPE_HTML, MIME_TYPE_JSON, MIME_TYPE_LD_JSON] +MIME_TYPE_DEFAULT = MIME_TYPES_SUPPORTED[0] +MIME_TYPE_TRIAGE_MODE = 'modern' # if this doesn't work, fall back to 'legacy' + +DEBUG_MIME_TYPES = environ_bool("DEBUG_MIME_TYPES", default=False) + + +def best_mime_type(request, mode=MIME_TYPE_TRIAGE_MODE): + # TODO: I think this function does nothing but return MIME_TYPES_SUPPORTED[0] -kmp 3-Feb-2021 + """ + Given a request, tries to figure out the best kind of MIME type to use in response + based on what kinds of responses we support and what was requested. + + In the case we can't comply, we just use application/json whether or not that's what was asked for. + """ + if mode == 'legacy': + # See: + # https://tedboy.github.io/flask/generated/generated/werkzeug.Accept.best_match.html#werkzeug-accept-best-match + # Note that this is now deprecated, or will be. The message is oddly worded ("will be deprecated") + # that presumably means "will be removed". Deprecation IS the warning of actual action, not the action itself. + # "This is currently maintained for backward compatibility, and will be deprecated in the future. + # AcceptValidHeader.best_match() uses its own algorithm (one not specified in RFC 7231) to determine + # what is a best match. The algorithm has many issues, and does not conform to RFC 7231." + # Anyway, we were getting this warning during testing: + # DeprecationWarning: The behavior of AcceptValidHeader.best_match is currently + # being maintained for backward compatibility, but it will be deprecated in the future, + # as it does not conform to the RFC. + # TODO: Once the modern replacement is shown to work, we should remove this conditional branch. + result = request.accept.best_match(MIME_TYPES_SUPPORTED, MIME_TYPE_DEFAULT) + else: + options = request.accept.acceptable_offers(MIME_TYPES_SUPPORTED) + if not options: + # TODO: Probably we should return a 406 response by raising HTTPNotAcceptable if + # no acceptable types are available. (Certainly returning JSON in this case is + # not some kind of friendly help toa naive user with an old browser.) + # Ref: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status + result = MIME_TYPE_DEFAULT + else: + mime_type, score = options[0] + result = mime_type + if DEBUG_MIME_TYPES: + PRINT("Using mime type", result, "for", request.method, request.url) + for k, v in request.headers.items(): + PRINT("%s: %s" % (k, v)) + PRINT("----------") + return result + + @lru_cache(maxsize=16) def should_transform(request, response): - ''' + """ Determines whether to transform the response from JSON->HTML/JS depending on type of response - and what the request is looking for to be returned via e.g. request Accept, Authorization header. - In case of no Accept header, attempts to guess. - - Memoized via `lru_cache`. Cache size is set to be 16 (> 1) in case sub-requests fired off during handling. - ''' + and what the request is looking for to be returned via these criteria, which are tried in order + until one succeeds: + + * If the request method is other than GET or HEAD, returns False. + * If the response.content_type is other than 'application/json', returns False. + * If a 'frame=' query param is given and not 'page' (the default), returns False. + * If a 'format=json' query param is given explicitly, + * For 'format=html', returns True. + * For 'format=json', returns False. + This rule does not match if 'format=' is not given explicitly. + If 'format=' is given an explicit value of ther than 'html' or 'json', an HTTPNotAcceptable error will be raised. + * If the first element of MIME_TYPES_SUPPORTED[0] is 'text/html', returns True. + * Otherwise, in all remaining cases, returns False. + + NOTE: Memoized via `lru_cache`. Cache size is set to be 16 (> 1) in case sub-requests fired off during handling. + """ # We always return JSON in response to POST, PATCH, etc. if request.method not in ('GET', 'HEAD'): return False @@ -333,30 +410,39 @@ def should_transform(request, response): if response.content_type != 'application/json': return False + # TODO: This would be an incompatible change, but an improvement? For now, let's disable it so we an phase it + # it in on a separate PR. There is a corresponding part of the test that needs to be uncommented as well + # to make all this work. -kmp 18-Mar-2021 + # + # # If we have a 'frame' that is not None or page, force JSON, since our UI doesn't handle all various + # # forms of the data, just embedded/page. + # frame_param = request.params.get("frame", "page") + # if frame_param != "page": + # return False + # The `format` URI param allows us to override request's 'Accept' header. - format = request.params.get('format') - if format is not None: - format = format.lower() - if format == 'json': + format_param = request.params.get('format') + if format_param is not None: + format_param = format_param.lower() + if format_param == 'json': return False - if format == 'html': + if format_param == 'html': return True else: - raise HTTPNotAcceptable("Improper format URI parameter", comment="The format URI parameter should be set to either html or json.") + raise HTTPNotAcceptable("Improper format URI parameter", + comment="The format URI parameter should be set to either html or json.") # Web browsers send an Accept request header for initial (e.g. non-AJAX) page requests # which should contain 'text/html' # See: https://tedboy.github.io/flask/generated/generated/werkzeug.Accept.best_match.html#werkzeug-accept-best-match - mime_type = request.accept.best_match(['text/html', 'application/json', 'application/ld+json'], 'application/json') - format = mime_type.split('/', 1)[1] # Will be 1 of 'html', 'json', 'json-ld' + mime_type = best_mime_type(request) # Result will be one of MIME_TYPES_SUPPORTED - # N.B. ld+json (JSON-LD) is likely more unique case and might be sent by search engines (?) which can parse JSON-LDs. - # At some point we could maybe have it to be same as making an `@@object` or `?frame=object` request (?) esp if fill + # N.B. ld+json (JSON-LD) is likely more unique case and might be sent by search engines (?) + # which can parse JSON-LDs. At some point we could maybe have it to be same as + # making an `@@object` or `?frame=object` request (?) esp if fill # out @context response w/ schema(s) (or link to schema) - if format == 'html': - return True - return False + return mime_type == MIME_TYPE_HTML def render_page_html_tween_factory(handler, registry): @@ -378,7 +464,9 @@ class TransformErrorResponse(HTTPServerError): rss_limit = 256 * (1024 ** 2) # MB - reload_process = True if registry.settings.get('reload_templates', False) else lambda proc: psutil.Process(proc.pid).memory_info().rss > rss_limit + reload_process = (True + if registry.settings.get('reload_templates', False) + else lambda proc: psutil.Process(proc.pid).memory_info().rss > rss_limit) # TransformWorker inits and manages a subprocess # it re-uses the subprocess so interestingly data in JS global variables diff --git a/src/encoded/tests/conftest.py b/src/encoded/tests/conftest.py index 190682e395..f970af3af3 100644 --- a/src/encoded/tests/conftest.py +++ b/src/encoded/tests/conftest.py @@ -16,9 +16,8 @@ from pyramid.threadlocal import get_current_registry, manager as threadlocal_manager from snovault import DBSESSION, ROOT, UPGRADER from snovault.elasticsearch import ELASTIC_SEARCH, create_mapping -from snovault.util import generate_indexer_namespace_for_testing from .. import main -from .conftest_settings import make_app_settings_dictionary +from .conftest_settings import make_app_settings_dictionary, INDEXER_NAMESPACE_FOR_TESTING # Done in pytest.ini now. @@ -31,11 +30,13 @@ @pytest.fixture(autouse=True) def autouse_external_tx(external_tx): + notice_pytest_fixtures(external_tx) pass @pytest.fixture(scope='session') -def app_settings(request, wsgi_server_host_port, conn, DBSession): +def app_settings(request, wsgi_server_host_port, conn, DBSession): # noQA - We didn't choose the fixture name. + notice_pytest_fixtures(request, wsgi_server_host_port, conn, DBSession) settings = make_app_settings_dictionary() settings['auth0.audiences'] = 'http://%s:%s' % wsgi_server_host_port # add some here for file testing @@ -63,6 +64,7 @@ def filter(self, record): @pytest.yield_fixture def threadlocals(request, dummy_request, registry): + notice_pytest_fixtures(request, dummy_request, registry) threadlocal_manager.push({'request': dummy_request, 'registry': registry}) yield dummy_request threadlocal_manager.pop() diff --git a/src/encoded/tests/conftest_settings.py b/src/encoded/tests/conftest_settings.py index 9b87c70673..129089b5e0 100644 --- a/src/encoded/tests/conftest_settings.py +++ b/src/encoded/tests/conftest_settings.py @@ -1,5 +1,9 @@ import pkg_resources +from snovault.util import generate_indexer_namespace_for_testing + + +INDEXER_NAMESPACE_FOR_TESTING = generate_indexer_namespace_for_testing('ff') _app_settings = { 'collection_datastore': 'database', @@ -37,3 +41,32 @@ def make_app_settings_dictionary(): return _app_settings.copy() + + +ORDER = [ + 'user', 'award', 'lab', 'static_section', 'higlass_view_config', 'page', + 'ontology', 'ontology_term', 'file_format', 'badge', 'organism', 'gene', + 'genomic_region', 'bio_feature', 'target', 'imaging_path', 'publication', + 'publication_tracking', 'document', 'image', 'vendor', 'construct', + 'modification', 'experiment_type', 'protocol', 'sop_map', 'biosample_cell_culture', + 'individual_human', 'individual_mouse', 'individual_fly', 'individual_primate', + 'individual_chicken', 'individual_zebrafish', 'biosource', 'antibody', 'enzyme', + 'treatment_rnai', 'treatment_agent', + 'biosample', 'quality_metric_fastqc', 'quality_metric_bamcheck', 'quality_metric_rnaseq', + 'quality_metric_bamqc', 'quality_metric_pairsqc', 'quality_metric_margi', + 'quality_metric_dedupqc_repliseq', 'quality_metric_chipseq', 'quality_metric_workflowrun', + 'quality_metric_atacseq', 'quality_metric_rnaseq_madqc', 'quality_metric_qclist', + 'microscope_setting_d1', 'microscope_setting_d2', + 'microscope_setting_a1', 'microscope_setting_a2', 'file_fastq', + 'file_processed', 'file_reference', 'file_calibration', 'file_microscopy', + 'file_set', 'file_set_calibration', 'file_set_microscope_qc', + 'file_vistrack', 'experiment_hi_c', 'experiment_capture_c', + 'experiment_repliseq', 'experiment_atacseq', 'experiment_chiapet', + 'experiment_damid', 'experiment_seq', 'experiment_tsaseq', + 'experiment_mic', 'experiment_set', 'experiment_set_replicate', + 'data_release_update', 'software', 'analysis_step', 'workflow', + 'workflow_mapping', 'workflow_run_sbg', 'workflow_run_awsem', + 'tracking_item', 'quality_metric_flag', + 'summary_statistic', 'summary_statistic_hi_c', 'workflow_run', + 'microscope_configuration' +] diff --git a/src/encoded/tests/data/workbook-inserts/page.json b/src/encoded/tests/data/workbook-inserts/page.json new file mode 100644 index 0000000000..c8599e2c48 --- /dev/null +++ b/src/encoded/tests/data/workbook-inserts/page.json @@ -0,0 +1,37 @@ +[ + { + "name": "help/user-guide/rest-api", + "title": "The REST-API", + "content": ["442c8aa0-dc6c-43d7-814a-854af460b020"], + "uuid": "a2aa8bb9-9dd9-4c80-bdb6-2349b7a3540d", + "table-of-contents": { + "enabled": true, + "header-depth": 4, + "list-styles": ["decimal", "lower-alpha", "lower-roman"] + } + }, + { + "name": "help/user-guide/rest-api-draft", + "title": "The REST-API", + "content": ["442c8aa0-dc6c-43d7-814a-854af460b020"], + "uuid": "a2aa8bb9-9dd9-4c80-bdb6-2349b7a3540c", + "table-of-contents": { + "enabled": true, + "header-depth": 4, + "list-styles": ["decimal", "lower-alpha", "lower-roman"] + }, + "status": "draft" + }, + { + "name": "help/user-guide/rest-api-deleted", + "title": "The REST-API", + "content": ["442c8aa0-dc6c-43d7-814a-854af460b020"], + "uuid": "a2aa8bb9-9dd9-4c80-bdb6-2349b7a3540a", + "table-of-contents": { + "enabled": true, + "header-depth": 4, + "list-styles": ["decimal", "lower-alpha", "lower-roman"] + }, + "status": "deleted" + } +] diff --git a/src/encoded/tests/data/workbook-inserts/static_section.json b/src/encoded/tests/data/workbook-inserts/static_section.json index 835eb8d494..7afed1051f 100644 --- a/src/encoded/tests/data/workbook-inserts/static_section.json +++ b/src/encoded/tests/data/workbook-inserts/static_section.json @@ -1,7 +1,15 @@ -[{ - "name" : "search-info-header.Workflow", - "uuid" : "442c8aa0-dc6c-43d7-814a-854af460b001", - "section_type" : "Search Info Header", - "title" : "Workflow Information", - "body" : "" -}] +[ + { + "name" : "search-info-header.Workflow", + "uuid" : "442c8aa0-dc6c-43d7-814a-854af460b001", + "section_type" : "Search Info Header", + "title" : "Workflow Information", + "body" : "" + }, + { + "name": "help.user-guide.rest-api.rest_api_submission", + "uuid": "442c8aa0-dc6c-43d7-814a-854af460b020", + "title": "", + "file": "/docs/source/rest_api_submission.rst" + } +] diff --git a/src/encoded/tests/datafixtures.py b/src/encoded/tests/datafixtures.py index 7ca2853b72..8c709aac70 100644 --- a/src/encoded/tests/datafixtures.py +++ b/src/encoded/tests/datafixtures.py @@ -3,34 +3,10 @@ from uuid import uuid4 - -ORDER = [ - 'user', 'award', 'lab', 'static_section', 'higlass_view_config', 'page', - 'ontology', 'ontology_term', 'file_format', 'badge', 'organism', 'gene', - 'genomic_region', 'bio_feature', 'target', 'imaging_path', 'publication', - 'publication_tracking', 'document', 'image', 'vendor', 'construct', - 'modification', 'experiment_type', 'protocol', 'sop_map', 'biosample_cell_culture', - 'individual_human', 'individual_mouse', 'individual_fly', 'individual_primate', - 'individual_chicken', 'individual_zebrafish', 'biosource', 'antibody', 'enzyme', - 'treatment_rnai', 'treatment_agent', - 'biosample', 'quality_metric_fastqc', 'quality_metric_bamcheck', 'quality_metric_rnaseq', - 'quality_metric_bamqc', 'quality_metric_pairsqc', 'quality_metric_margi', - 'quality_metric_dedupqc_repliseq', 'quality_metric_chipseq', 'quality_metric_workflowrun', - 'quality_metric_atacseq', 'quality_metric_rnaseq_madqc', 'quality_metric_qclist', - 'microscope_setting_d1', 'microscope_setting_d2', - 'microscope_setting_a1', 'microscope_setting_a2', 'file_fastq', - 'file_processed', 'file_reference', 'file_calibration', 'file_microscopy', - 'file_set', 'file_set_calibration', 'file_set_microscope_qc', - 'file_vistrack', 'experiment_hi_c', 'experiment_capture_c', - 'experiment_repliseq', 'experiment_atacseq', 'experiment_chiapet', - 'experiment_damid', 'experiment_seq', 'experiment_tsaseq', - 'experiment_mic', 'experiment_set', 'experiment_set_replicate', - 'data_release_update', 'software', 'analysis_step', 'workflow', - 'workflow_mapping', 'workflow_run_sbg', 'workflow_run_awsem', - 'tracking_item', 'quality_metric_flag', - 'summary_statistic', 'summary_statistic_hi_c', 'workflow_run', - 'microscope_configuration' -] +# If anyone else needs to import ORDER, it got moved, so get it from +# its new nome in .conftest_settings. -kmp 14-Mar-2021 +# +# from .conftest_settings import ORDER @pytest.fixture diff --git a/src/encoded/tests/test_create_mapping.py b/src/encoded/tests/test_create_mapping.py index aae1140355..3e7681b216 100644 --- a/src/encoded/tests/test_create_mapping.py +++ b/src/encoded/tests/test_create_mapping.py @@ -5,7 +5,7 @@ from snovault.elasticsearch.create_mapping import type_mapping from snovault.util import add_default_embeds from unittest.mock import patch, MagicMock -from .datafixtures import ORDER +from .conftest_settings import ORDER from ..commands import create_mapping_on_deploy from ..commands.create_mapping_on_deploy import ( ITEM_INDEX_ORDER, diff --git a/src/encoded/tests/test_embedding.py b/src/encoded/tests/test_embedding.py index 236f167484..b3e2bfd2a5 100644 --- a/src/encoded/tests/test_embedding.py +++ b/src/encoded/tests/test_embedding.py @@ -3,7 +3,7 @@ from snovault import TYPES from snovault.util import add_default_embeds, crawl_schemas_by_embeds from ..types.base import get_item_or_none -from .datafixtures import ORDER +from .conftest_settings import ORDER pytestmark = [pytest.mark.setone, pytest.mark.working] diff --git a/src/encoded/tests/test_fixtures.py b/src/encoded/tests/test_fixtures.py index 01dfada4a5..185ce2f2f1 100644 --- a/src/encoded/tests/test_fixtures.py +++ b/src/encoded/tests/test_fixtures.py @@ -3,10 +3,11 @@ from unittest import mock -from ..tests import datafixtures +from ..tests import conftest_settings -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] +# in cgap these are marked broken -kmp 24-Feb-2021 +pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema, pytest.mark.indexing] @pytest.yield_fixture(scope='session') @@ -100,16 +101,22 @@ def test_fixtures2(minitestdata2, testapp): def test_order_complete(app, conn): + order_source_module = conftest_settings # TODO: This could use a doc string or comment. -kent & eric 29-Jun-2020 - print("original datafixtures.ORDER =", datafixtures.ORDER) - print("original len(datafixtures.ORDER) =", len(datafixtures.ORDER)) - assert "access_key" not in datafixtures.ORDER - order_for_testing = datafixtures.ORDER + ["access_key"] - with mock.patch.object(datafixtures, "ORDER", order_for_testing): - print("mocked datafixtures.ORDER =", datafixtures.ORDER) - print("len(mocked datafixtures.ORDER) =", len(datafixtures.ORDER)) - assert "access_key" in datafixtures.ORDER - ORDER = datafixtures.ORDER + print("original order_source_module.ORDER =", order_source_module.ORDER) + print("original len(order_source_module.ORDER) =", len(order_source_module.ORDER)) + assert "access_key" not in order_source_module.ORDER + print("confirmed: 'access_key' is NOT in ORDER") + order_for_testing = order_source_module.ORDER + ["access_key"] + assert order_source_module.ORDER is not order_for_testing + assert len(order_for_testing) == len(order_source_module.ORDER) + 1 + with mock.patch.object(order_source_module, "ORDER", order_for_testing): + print("=" * 24, "binding ORDER to add 'access_key'", "=" * 24) + print("mocked order_source_module.ORDER =", order_source_module.ORDER) + print("len(mocked order_source_module.ORDER) =", len(order_source_module.ORDER)) + assert "access_key" in order_source_module.ORDER + print("confirmed: 'access_key' IS in ORDER") + patched_order = order_source_module.ORDER environ = { 'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'TEST', @@ -117,22 +124,25 @@ def test_order_complete(app, conn): testapp = webtest.TestApp(app, environ) master_types = [] profiles = testapp.get('/profiles/?frame=raw').json + print("constructing master_types from /profiles/?frame=raw") for a_type in profiles: if profiles[a_type].get('id') and profiles[a_type]['isAbstract'] is False: schema_name = profiles[a_type]['id'].split('/')[-1][:-5] master_types.append(schema_name) - print(ORDER) - print(master_types) - print(len(ORDER)) - print(len(master_types)) + print("patched_order=", patched_order) + print("master_types=", master_types) + print("len(patched_order)=", len(patched_order)) + print("len(master_types)=", len(master_types)) - missing_types = [i for i in master_types if i not in ORDER] - extra_types = [i for i in ORDER if i not in master_types] - print(missing_types) - print(extra_types) + missing_types = [i for i in master_types if i not in patched_order] + extra_types = [i for i in patched_order if i not in master_types] + print("missing_types=", missing_types) + print("extra_types=", extra_types) assert missing_types == [] assert extra_types == [] - print("restored datafixtures.ORDER =", datafixtures.ORDER) - print("restored len(datafixtures.ORDER) =", len(datafixtures.ORDER)) - assert "access_key" not in datafixtures.ORDER + print("=" * 24, "exiting bound context for ORDER", "=" * 24) + print("restored order_source_module.ORDER =", order_source_module.ORDER) + print("restored len(order_source_module.ORDER) =", len(order_source_module.ORDER)) + assert "access_key" not in order_source_module.ORDER + print("confirmed: 'access_key' is NOT in ORDER") \ No newline at end of file diff --git a/src/encoded/tests/test_indexing.py b/src/encoded/tests/test_indexing.py index 7e8c11c24f..87c4d6ebe1 100644 --- a/src/encoded/tests/test_indexing.py +++ b/src/encoded/tests/test_indexing.py @@ -24,13 +24,13 @@ build_index_record, compare_against_existing_mapping ) -from snovault.elasticsearch.interfaces import INDEXER_QUEUE from snovault.elasticsearch.indexer_utils import get_namespaced_index +from snovault.elasticsearch.interfaces import INDEXER_QUEUE from sqlalchemy import MetaData, func from timeit import default_timer as timer from unittest import mock from zope.sqlalchemy import mark_changed -from .. import main +from .. import main, loadxl from ..util import delay_rerun from ..verifier import verify_item from .workbook_fixtures import app_settings # why does this care?? does it? -kmp 12-Mar-2021 @@ -67,9 +67,9 @@ def app(app_settings, request): yield app - DBSession = app.registry[DBSESSION] + db_session = app.registry[DBSESSION] # Dispose connections so postgres can tear down. - DBSession.bind.pool.dispose() + db_session.bind.pool.dispose() @pytest.yield_fixture(autouse=True) @@ -128,6 +128,7 @@ def test_indexing_simple(app, testapp, indexer_testapp): count += 1 assert res.json['total'] >= 2 assert uuid in uuids + namespaced_indexing = get_namespaced_index(app, 'indexing') indexing_doc = es.get(index=namespaced_indexing, doc_type='indexing', id='latest_indexing') indexing_source = indexing_doc['_source'] @@ -154,7 +155,7 @@ def test_create_mapping_on_indexing(app, testapp, registry, elasticsearch): item_types = TEST_COLLECTIONS # check that mappings and settings are in index for item_type in item_types: - item_mapping = type_mapping(registry[TYPES], item_type) + type_mapping(registry[TYPES], item_type) try: namespaced_index = get_namespaced_index(app, item_type) item_index = es.indices.get(index=namespaced_index) @@ -184,7 +185,7 @@ def test_file_processed_detailed(app, testapp, indexer_testapp, award, lab, file } fp_res = testapp.post_json('/file_processed', item) test_fp_uuid = fp_res.json['@graph'][0]['uuid'] - res = testapp.post_json('/file_processed', item) + testapp.post_json('/file_processed', item) indexer_testapp.post_json('/index', {'record': True}) # Todo, input a list of accessions / uuids: @@ -285,10 +286,10 @@ def test_real_validation_error(app, indexer_testapp, testapp, lab, award, file_f assert val_err_view['validation_errors'] == es_res['_source']['validation_errors'] -# @pytest.mark.performance +@pytest.mark.performance @pytest.mark.skip(reason="need to update perf-testing inserts") def test_load_and_index_perf_data(testapp, indexer_testapp): - ''' + """ ~~ CURRENTLY NOT WORKING ~~ PERFORMANCE TESTING @@ -299,7 +300,7 @@ def test_load_and_index_perf_data(testapp, indexer_testapp): nightly through the mastertest_deployment process in the torb repo it takes roughly 25 to run. Note: run with bin/test -s -m performance to see the prints from the test - ''' + """ insert_dir = pkg_resources.resource_filename('encoded', 'tests/data/perf-testing/') inserts = [f for f in os.listdir(insert_dir) if os.path.isfile(os.path.join(insert_dir, f))] @@ -317,7 +318,7 @@ def test_load_and_index_perf_data(testapp, indexer_testapp): # load -em up start = timer() - with mock.patch('encoded.loadxl.get_app') as mocked_app: + with mock.patch.object(loadxl, 'get_app') as mocked_app: mocked_app.return_value = testapp.app data = {'store': json_inserts} res = testapp.post_json('/load_data', data, # status=200 @@ -333,22 +334,23 @@ def test_load_and_index_perf_data(testapp, indexer_testapp): # check a couple random inserts for item in test_inserts: start = timer() - assert testapp.get("/" + item['data']['uuid'] + "?frame=raw").json['uuid'] + assert testapp.get("/" + item['data']['uuid'] + "?frame=raw").json['uuid'] # noQA stop = timer() frame_time = stop - start start = timer() - assert testapp.get("/" + item['data']['uuid']).follow().json['uuid'] + assert testapp.get("/" + item['data']['uuid']).follow().json['uuid'] # noQA stop = timer() embed_time = stop - start - print("PERFORMANCE: Time to query item %s - %s raw: %s embed %s" % (item['type_name'], item['data']['uuid'], + print("PERFORMANCE: Time to query item %s - %s raw: %s embed %s" % (item['type_name'], item['data']['uuid'], # noQA frame_time, embed_time)) # userful for seeing debug messages # assert False -def test_permissions_database_applies_permissions(award, lab, file_formats, wrangler_testapp, anontestapp, indexer_testapp): +def test_permissions_database_applies_permissions(award, lab, file_formats, wrangler_testapp, anontestapp, + indexer_testapp): """ Tests that anontestapp gets view denied when using datastore=database """ file_item_body = { 'award': award['uuid'], diff --git a/src/encoded/tests/test_purge_item_type.py b/src/encoded/tests/test_purge_item_type.py index 08e6f7d2b5..311ef118c9 100644 --- a/src/encoded/tests/test_purge_item_type.py +++ b/src/encoded/tests/test_purge_item_type.py @@ -2,13 +2,13 @@ import time from dcicutils.qa_utils import notice_pytest_fixtures from .workbook_fixtures import app_settings, app, workbook -from encoded.commands.purge_item_type import purge_item_type_from_storage +from ..commands.purge_item_type import purge_item_type_from_storage notice_pytest_fixtures(app_settings, app, workbook) -pytestmark = [pytest.mark.working, pytest.mark.workbook] +pytestmark = [pytest.mark.working] @pytest.fixture @@ -41,7 +41,7 @@ def many_dummy_static_sections(testapp): return paths -@pytest.mark.parametrize('item_type', ['static_section']) # maybe should test some other types... +@pytest.mark.parametrize('item_type', ['static_section']) # XXX: Maybe parametrize on a few types? def test_purge_item_type_from_db(testapp, dummy_static_section, item_type): """ Tests purging all items of a certain item type from the DB """ assert purge_item_type_from_storage(testapp, [item_type]) is True @@ -61,6 +61,9 @@ def test_purge_item_type_from_db_many(testapp, many_dummy_static_sections): testapp.get('/search/?type=StaticSection', status=404) +# Just this one test is a workbook test, but note well that it does not modify the workbook. +# It should just try and fail, so that should be OK for the workbook. -kmp 22-Mar-2021 +@pytest.mark.workbook def test_purge_item_type_with_links_fails(testapp, workbook): """ Tries to remove 'lab', which should fail since it has links """ testapp.post_json('/index', {'record': True}) # must index everything so individual links show up diff --git a/src/encoded/tests/test_search.py b/src/encoded/tests/test_search.py index 9c604e4c0f..71e43b4f8e 100644 --- a/src/encoded/tests/test_search.py +++ b/src/encoded/tests/test_search.py @@ -19,11 +19,11 @@ pytest.mark.schema, # pytest.mark.indexing, pytest.mark.workbook, - #pytest.mark.flaky(rerun_filter=customized_delay_rerun(sleep_seconds=10)) + # pytest.mark.flaky(rerun_filter=customized_delay_rerun(sleep_seconds=10)) ] -### IMPORTANT +# ### IMPORTANT # uses the inserts in ./data/workbook_inserts # design your tests accordingly notice_pytest_fixtures(app_settings, app, workbook) @@ -215,10 +215,10 @@ def test_search_facets_and_columns_order(workbook, testapp, registry): schema_facets = [fct for fct in schema_facets if not fct[1].get('disabled', False)] sort_facets = sorted(schema_facets, key=lambda fct: fct[1].get('order', 0)) res = testapp.get('/search/?type=ExperimentSetReplicate&limit=all').json - for i,val in enumerate(sort_facets): + for i, val in enumerate(sort_facets): assert res['facets'][i]['field'] == val[0] # assert order of columns when we officially upgrade to python 3.6 (ordered dicts) - for key,val in schema.get('columns', {}).items(): + for key, val in schema.get('columns', {}).items(): assert res['columns'][key]['title'] == val['title'] @@ -362,20 +362,23 @@ def test_metadata_tsv_view(workbook, htmltestapp): FILE_ACCESSION_COL_INDEX = 3 FILE_DOWNLOAD_URL_COL_INDEX = 0 - def check_tsv(result_rows, len_requested = None): + def check_tsv(result_rows, len_requested=None): info_row = result_rows.pop(0) header_row = result_rows.pop(0) assert header_row[FILE_ACCESSION_COL_INDEX] == 'File Accession' - assert header_row.index('File Download URL') == FILE_DOWNLOAD_URL_COL_INDEX # Ensure we have this column - assert len(result_rows) > 0 # We at least have some rows. + assert header_row.index('File Download URL') == FILE_DOWNLOAD_URL_COL_INDEX # Ensure we have this column + assert len(result_rows) > 0 # We at least have some rows. for row_index in range(1): - assert len(result_rows[row_index][FILE_ACCESSION_COL_INDEX]) > 4 # We have a value for File Accession - assert 'http' in result_rows[row_index][FILE_DOWNLOAD_URL_COL_INDEX] # Make sure it seems like a valid URL. + assert len(result_rows[row_index][FILE_ACCESSION_COL_INDEX]) > 4 # We have a value for File Accession + assert 'http' in result_rows[row_index][FILE_DOWNLOAD_URL_COL_INDEX] # Make sure it seems like a valid URL. assert '/@@download/' in result_rows[row_index][FILE_DOWNLOAD_URL_COL_INDEX] - assert result_rows[row_index][FILE_ACCESSION_COL_INDEX] in result_rows[row_index][FILE_DOWNLOAD_URL_COL_INDEX] # That File Accession is also in File Download URL of same row. - assert len(result_rows[row_index][FILE_ACCESSION_COL_INDEX]) < len(result_rows[row_index][FILE_DOWNLOAD_URL_COL_INDEX]) + # That File Accession is also in File Download URL of same row. + assert (result_rows[row_index][FILE_ACCESSION_COL_INDEX] + in result_rows[row_index][FILE_DOWNLOAD_URL_COL_INDEX]) + assert (len(result_rows[row_index][FILE_ACCESSION_COL_INDEX]) + < len(result_rows[row_index][FILE_DOWNLOAD_URL_COL_INDEX])) # Last some rows should be 'summary' rows. And have empty spaces for 'Download URL' / first column. summary_start_row = None @@ -394,32 +397,35 @@ def check_tsv(result_rows, len_requested = None): assert int(result_rows[summary_start_row + 4][4]) == summary_start_row assert int(result_rows[summary_start_row + 5][4]) <= summary_start_row - # run a simple GET query with type=ExperimentSetReplicate - res = htmltestapp.get('/metadata/type=ExperimentSetReplicate/metadata.tsv') # OLD URL FORMAT IS USED -- TESTING REDIRECT TO NEW URL + res = htmltestapp.get('/metadata/type=ExperimentSetReplicate/metadata.tsv') # OLD URL FORMAT IS USED -- TESTING REDIRECT TO NEW URL res = res.maybe_follow() # Follow redirect -- https://docs.pylonsproject.org/projects/webtest/en/latest/api.html#webtest.response.TestResponse.maybe_follow assert 'text/tsv' in res.content_type - result_rows = [ row.rstrip(' \r').split('\t') for row in res.body.decode('utf-8').split('\n') ] # Strip out carriage returns and whatnot. Make a plain multi-dim array. + # Strip out carriage returns and whatnot. Make a plain multi-dim array. + result_rows = [row.rstrip(' \r').split('\t') + for row in res.body.decode('utf-8').split('\n')] check_tsv(result_rows) # Perform POST w/ accession triples (main case, for BrowseView downloads) - res2_post_data = { # N.B. '.post', not '.post_json' is used. This dict is converted to POST form values, with key values STRINGIFIED, not to POST JSON request. - "accession_triples" : [ - ["4DNESAAAAAA1","4DNEXO67APU1","4DNFIO67APU1"], - ["4DNESAAAAAA1","4DNEXO67APU1","4DNFIO67APT1"], - ["4DNESAAAAAA1","4DNEXO67APT1","4DNFIO67APV1"], - ["4DNESAAAAAA1","4DNEXO67APT1","4DNFIO67APY1"], - ["4DNESAAAAAA1","4DNEXO67APV1","4DNFIO67APZ1"], - ["4DNESAAAAAA1","4DNEXO67APV1","4DNFIO67AZZ1"] + res2_post_data = { # N.B. '.post', not '.post_json' is used. This dict is converted to POST form values, with key values STRINGIFIED, not to POST JSON request. + "accession_triples": [ + ["4DNESAAAAAA1", "4DNEXO67APU1", "4DNFIO67APU1"], + ["4DNESAAAAAA1", "4DNEXO67APU1", "4DNFIO67APT1"], + ["4DNESAAAAAA1", "4DNEXO67APT1", "4DNFIO67APV1"], + ["4DNESAAAAAA1", "4DNEXO67APT1", "4DNFIO67APY1"], + ["4DNESAAAAAA1", "4DNEXO67APV1", "4DNFIO67APZ1"], + ["4DNESAAAAAA1", "4DNEXO67APV1", "4DNFIO67AZZ1"] ], - 'download_file_name' : 'metadata_TEST.tsv' + 'download_file_name': 'metadata_TEST.tsv' } - res2 = htmltestapp.post('/metadata/?type=ExperimentSetReplicate', { k : json.dumps(v) for k,v in res2_post_data.items() }) # NEWER URL FORMAT + res2 = htmltestapp.post('/metadata/?type=ExperimentSetReplicate', + {k: json.dumps(v) + for k, v in res2_post_data.items()}) # NEWER URL FORMAT assert 'text/tsv' in res2.content_type - result_rows = [ row.rstrip(' \r').split('\t') for row in res2.body.decode('utf-8').split('\n') ] + result_rows = [row.rstrip(' \r').split('\t') for row in res2.body.decode('utf-8').split('\n')] check_tsv(result_rows, len(res2_post_data['accession_triples'])) @@ -516,11 +522,11 @@ def test_search_with_no_value(workbook, testapp): def test_search_with_static_header(workbook, testapp): """ Performs a search which should be accompanied by a search header """ search = '/search/?type=Workflow' - res_json = testapp.get(search, status=404).json # no items, just checking hdr + res_json = testapp.get(search, status=404).json # no items, just checking hdr assert 'search_header' in res_json assert 'content' in res_json['search_header'] assert res_json['search_header']['title'] == 'Workflow Information' - search = '/search/?type=workflow' # check type resolution + search = '/search/?type=workflow' # check type resolution res_json = testapp.get(search, status=404).json assert 'search_header' in res_json assert 'content' in res_json['search_header'] @@ -601,9 +607,9 @@ def test_index_data_workbook(app, workbook, testapp, indexer_testapp, htmltestap pass -###################################### -## Search-based visualization tests ## -###################################### +# ###################################### +# ## Search-based visualization tests ## +# ###################################### def test_barplot_aggregation_endpoint(workbook, testapp): @@ -619,8 +625,8 @@ def test_barplot_aggregation_endpoint(workbook, testapp): # Now, test the endpoint after ensuring we have the data correctly loaded into ES. # We should get back same count as from search results here. res = testapp.post_json('/bar_plot_aggregations', { - "search_query_params" : { "type" : ['ExperimentSetReplicate'] }, - "fields_to_aggregate_for" : ["experiments_in_set.experiment_type.display_title", "award.project"] + "search_query_params": {"type": ['ExperimentSetReplicate']}, + "fields_to_aggregate_for": ["experiments_in_set.experiment_type.display_title", "award.project"] }).json print() @@ -629,22 +635,22 @@ def test_barplot_aggregation_endpoint(workbook, testapp): assert (res['total']['experiment_sets'] == count_exp_set_test_inserts) or (res['total']['experiment_sets'] == search_result_count) - assert res['field'] == 'experiments_in_set.experiment_type.display_title' # top level field + assert res['field'] == 'experiments_in_set.experiment_type.display_title' # top level field assert isinstance(res['terms'], dict) is True assert len(res["terms"].keys()) > 0 - #assert isinstance(res['terms']["CHIP-seq"], dict) is True # A common term likely to be found. + # assert isinstance(res['terms']["CHIP-seq"], dict) is True # A common term likely to be found. - #assert res["terms"]["CHIP-seq"]["field"] == "award.project" # Child-field + # assert res["terms"]["CHIP-seq"]["field"] == "award.project" # Child-field # We only have 4DN as single award.project in test inserts so should have values in all buckets, though probably less than total. - #assert res["terms"]["CHIP-seq"]["total"]["experiment_sets"] > 0 - #assert res["terms"]["CHIP-seq"]["total"]["experiment_sets"] < count_exp_set_test_inserts + # assert res["terms"]["CHIP-seq"]["total"]["experiment_sets"] > 0 + # assert res["terms"]["CHIP-seq"]["total"]["experiment_sets"] < count_exp_set_test_inserts - #assert res["terms"]["CHIP-seq"]["terms"]["4DN"]["experiment_sets"] > 0 - #assert res["terms"]["CHIP-seq"]["terms"]["4DN"]["experiment_sets"] < count_exp_set_test_inserts + # assert res["terms"]["CHIP-seq"]["terms"]["4DN"]["experiment_sets"] > 0 + # assert res["terms"]["CHIP-seq"]["terms"]["4DN"]["experiment_sets"] < count_exp_set_test_inserts @pytest.fixture(scope='session') @@ -818,7 +824,7 @@ def test_search_additional_mixing_disabled_default_hidden(self, testapp, hidden_ hidden restriction. """ facets = testapp.get('/search/?type=TestingHiddenFacets' '&additional_facet=%s' - '&additional_facet=%s' + '&additional_facet=%s' '&additional_facet=%s' % (_facets[0], _facets[1], _facets[2])).json['facets'] expected = self.DEFAULT_FACETS + [_facets[0], _facets[1]] # first two should show actual = [facet['field'] for facet in facets] @@ -854,7 +860,7 @@ def test_search_additional_nested_facets(self, testapp, hidden_facet_test_data, @pytest.fixture def many_non_nested_facets(self, testapp, hidden_facet_test_data): - return testapp.get('/search/?type=TestingHiddenFacets' + return testapp.get('/search/?type=TestingHiddenFacets' '&additional_facet=non_nested_array_of_objects.fruit' '&additional_facet=non_nested_array_of_objects.color' '&additional_facet=non_nested_array_of_objects.uid').json['facets'] diff --git a/src/encoded/tests/test_static_page.py b/src/encoded/tests/test_static_page.py index 0e7cf013eb..b7b5147068 100644 --- a/src/encoded/tests/test_static_page.py +++ b/src/encoded/tests/test_static_page.py @@ -1,146 +1,124 @@ import pytest -import webtest +#import webtest from dcicutils.qa_utils import notice_pytest_fixtures -from .workbook_fixtures import app_settings, app # are these needed? -kmp 12-Mar-2021 - - -notice_pytest_fixtures(app_settings, app) - -pytestmark = [pytest.mark.indexing, pytest.mark.working] - - -@pytest.fixture(scope='module') -def help_page_section_json(): - return { - "title": "", - "name" : "help.user-guide.rest-api.rest_api_submission", - "file": "/docs/source/rest_api_submission.rst", - "uuid" : "442c8aa0-dc6c-43d7-814a-854af460b020" - } - -@pytest.fixture(scope='module') -def help_page_json(): - return { - "name": "help/user-guide/rest-api", - "title": "The REST-API", - "content": ["442c8aa0-dc6c-43d7-814a-854af460b020"], - "uuid": "a2aa8bb9-9dd9-4c80-bdb6-2349b7a3540d", - "table-of-contents": { - "enabled": True, - "header-depth": 4, - "list-styles": ["decimal", "lower-alpha", "lower-roman"] - } - } - -@pytest.fixture(scope='module') -def help_page_json_draft(): - return { - "name": "help/user-guide/rest-api-draft", - "title": "The REST-API", - "content": ["442c8aa0-dc6c-43d7-814a-854af460b020"], - "uuid": "a2aa8bb9-9dd9-4c80-bdb6-2349b7a3540c", - "table-of-contents": { - "enabled": True, - "header-depth": 4, - "list-styles": ["decimal", "lower-alpha", "lower-roman"] - }, - "status" : "draft" - } - -@pytest.fixture(scope='module') -def help_page_json_deleted(): - return { - "name": "help/user-guide/rest-api-deleted", - "title": "The REST-API", - "content": ["442c8aa0-dc6c-43d7-814a-854af460b020"], - "uuid": "a2aa8bb9-9dd9-4c80-bdb6-2349b7a3540a", - "table-of-contents": { - "enabled": True, - "header-depth": 4, - "list-styles": ["decimal", "lower-alpha", "lower-roman"] - }, - "status" : "deleted" - } - - -@pytest.fixture(scope='module') -def posted_help_page_section(testapp, help_page_section_json): - try: - res = testapp.post_json('/static-sections/', help_page_section_json, status=201) - val = res.json['@graph'][0] - except webtest.AppError: - res = testapp.get('/' + help_page_section_json['uuid'], status=301).follow() - val = res.json - return val - - -@pytest.fixture(scope='module') -def help_page(testapp, posted_help_page_section, help_page_json): - try: - res = testapp.post_json('/pages/', help_page_json, status=201) - val = res.json['@graph'][0] - except webtest.AppError: - res = testapp.get('/' + help_page_json['uuid'], status=301).follow() - val = res.json - return val - - -@pytest.fixture(scope='module') -def help_page_deleted(testapp, posted_help_page_section, help_page_json_draft): - try: - res = testapp.post_json('/pages/', help_page_json_draft, status=201) - val = res.json['@graph'][0] - except webtest.AppError: - res = testapp.get('/' + help_page_json_draft['uuid'], status=301).follow() - val = res.json - return val - - -@pytest.fixture(scope='module') -def help_page_restricted(testapp, posted_help_page_section, help_page_json_deleted): - try: - res = testapp.post_json('/pages/', help_page_json_deleted, status=201) - val = res.json['@graph'][0] - except webtest.AppError: - res = testapp.get('/' + help_page_json_deleted['uuid'], status=301).follow() - val = res.json - return val - - -def test_get_help_page(testapp, help_page): - help_page_url = "/" + help_page['name'] +from .workbook_fixtures import workbook, app_settings, app +from ..util import workbook_lookup + +notice_pytest_fixtures(workbook, app_settings, app) + +pytestmark = [pytest.mark.working, pytest.mark.workbook, pytest.mark.indexing] + + +def wait_for_index(testapp): + testapp.post_json("/index", {"record": False}) + + +@pytest.fixture +def static_help_page_default(): + return workbook_lookup(item_type='Page', name='help/user-guide/rest-api') + + +def test_static_help_page_default(static_help_page_default): + assert static_help_page_default['name'] == 'help/user-guide/rest-api' + + +@pytest.fixture +def static_help_page_draft(): + return workbook_lookup(item_type='Page', name='help/user-guide/rest-api-draft') + + +@pytest.fixture +def static_help_page_deleted(): + return workbook_lookup(item_type='Page', name='help/user-guide/rest-api-deleted') + + +def test_get_help_page(workbook, testapp, static_help_page_default): + testapp = testapp + wait_for_index(testapp) + help_page_url = "/" + static_help_page_default['name'] res = testapp.get(help_page_url, status=200) assert res.json['@id'] == help_page_url assert res.json['@context'] == help_page_url assert 'HelpPage' in res.json['@type'] assert 'StaticPage' in res.json['@type'] - #assert res.json['content'] == help_page['content'] # No longer works latter is set to an @id of static_section - assert 'Accession and uuid are automatically assigned during initial posting' in res.json['content'][0]['content'] # Instead lets check what we have embedded on GET request is inside our doc file (rest_api_submission.md). - assert res.json['toc'] == help_page['table-of-contents'] - + # assert res.json['content'] == help_page['content'] # No longer works latter is set to an @id of static_section + # Instead lets check what we have embedded on GET request is inside our doc file (rest_api_submission.md). + assert 'Accession and uuid are automatically assigned during initial posting' in res.json['content'][0]['content'] + assert res.json['toc'] == static_help_page_default['table-of-contents'] -def test_get_help_page_deleted(anonhtmltestapp, help_page_deleted): - help_page_url = "/" + help_page_deleted['name'] - anonhtmltestapp.get(help_page_url, status=403) +def test_get_help_page_draft(workbook, anonhtmltestapp, htmltestapp, static_help_page_draft): -def test_get_help_page_no_access(anonhtmltestapp, testapp, help_page_restricted): - help_page_url = "/" + help_page_restricted['name'] + wait_for_index(htmltestapp) + help_page_url = "/" + static_help_page_draft['name'] anonhtmltestapp.get(help_page_url, status=403) - testapp.get(help_page_url, status=200) + htmltestapp.get(help_page_url, status=200) -def test_page_unique_name(testapp, help_page, help_page_deleted): +def test_get_help_page_deleted(workbook, anonhtmltestapp, htmltestapp, static_help_page_deleted): + wait_for_index(htmltestapp) + help_page_url = "/" + static_help_page_deleted['name'] + anonhtmltestapp.get(help_page_url, status=403) + htmltestapp.get(help_page_url, status=200) # Why 200 and not 404? -kmp 23-Feb-2021 + + +def test_get_help_page_no_access(workbook, anontestapp, testapp, anonhtmltestapp, htmltestapp, + static_help_page_default, static_help_page_draft, static_help_page_deleted): + notice_pytest_fixtures(workbook) + wait_for_index(testapp) + success = True + for app_name, testapp, role in [("anon_es", anontestapp, 'anon'), + ("es", testapp, 'system'), + ("anon_html_es", anonhtmltestapp, 'anon'), + ("html_es", htmltestapp, 'system')]: + for help_page, is_public in [(static_help_page_default, True), + (static_help_page_draft, False), + (static_help_page_deleted, False)]: + expected_code = 200 if is_public else (403 if role == 'anon' else 200) + page_name = help_page['name'] + help_page_url = "/" + page_name + res = testapp.get(help_page_url, status=(200, 301, 403, 404)).maybe_follow() + actual_code = res.status_code + if actual_code == expected_code: + print("%s => %s: SUCCESS (%s)" % (app_name, page_name, actual_code)) + else: + print("%s => %s: FAILED (%s, not %s): %s..." + % (app_name, page_name, actual_code, expected_code, res.body[:20])) + success = False + assert success, "Test failed." + + +def check_page_unique_name(testapp, conflicting_page, page_to_patch): + wait_for_index(testapp) # POST again with same name and expect validation error - new_page = {'name': help_page['name']} - res = testapp.post_json('/page', new_page, status=422) - expected_val_err = "%s already exists with name '%s'" % (help_page['uuid'], new_page['name']) - actual_error_description = res.json['errors'][0]['description'] - print("expected:", expected_val_err) - print("actual:", actual_error_description) - assert expected_val_err in actual_error_description - - # also test PATCH of an existing page with another name - res = testapp.patch_json(help_page_deleted['@id'], {'name': new_page['name']}, status=422) - assert expected_val_err in res.json['errors'][0]['description'] + conflicting_document = {'name': conflicting_page['name']} + conflict_message = "%s already exists with name '%s'" % (conflicting_page['uuid'], conflicting_page['name']) + + def check_conflict(res): + actual_error_description = res.json['errors'][0]['description'] + print("expected:", conflict_message) + print("actual:", actual_error_description) + assert conflict_message in actual_error_description + + # Test that POST of a new page with the same name as an existing page is not allowed. + check_conflict(testapp.post_json('/page', conflicting_document, status=422)) + # Also test PATCH of an existing page with the same name as another existing page is not allowed. + page_to_patch_uuid_url = '/' + page_to_patch['uuid'] + check_conflict(testapp.patch_json(page_to_patch_uuid_url, conflicting_document, status=422)) + actual_page_to_patch = testapp.get(page_to_patch_uuid_url).maybe_follow().json + check_conflict(testapp.patch_json(actual_page_to_patch['@id'], conflicting_document, status=422)) + + +def test_page_unique_name(workbook, testapp, static_help_page_draft, static_help_page_default): + notice_pytest_fixtures(workbook, testapp, static_help_page_draft, static_help_page_default) + check_page_unique_name(testapp=testapp, + conflicting_page=static_help_page_default, + page_to_patch=static_help_page_draft) + + +def test_page_unique_name_deleted(workbook, testapp, static_help_page_draft, static_help_page_deleted): + notice_pytest_fixtures(workbook, testapp, static_help_page_draft, static_help_page_deleted) + check_page_unique_name(testapp=testapp, + conflicting_page=static_help_page_deleted, + page_to_patch=static_help_page_draft) diff --git a/src/encoded/tests/test_validation_errors.py b/src/encoded/tests/test_validation_errors.py index 0b6cba0bfc..d985103006 100644 --- a/src/encoded/tests/test_validation_errors.py +++ b/src/encoded/tests/test_validation_errors.py @@ -14,6 +14,7 @@ # pytest.mark.flaky(rerun_filter=delay_rerun), ] + @pytest.mark.skip(reason="validation_errors facet was removed in search.py") def test_validation_err_facet(workbook, testapp): res = testapp.get('/search/?type=ExperimentSetReplicate').json diff --git a/src/encoded/tests/test_views.py b/src/encoded/tests/test_views.py index 776cd1cfbf..9f1299191f 100644 --- a/src/encoded/tests/test_views.py +++ b/src/encoded/tests/test_views.py @@ -8,7 +8,7 @@ from pyramid.compat import ascii_native_ from snovault import TYPES from urllib.parse import urlparse -from .datafixtures import ORDER +from .conftest_settings import ORDER pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] diff --git a/src/encoded/tests/workbook_fixtures.py b/src/encoded/tests/workbook_fixtures.py index 55bcb6c026..93666294a1 100644 --- a/src/encoded/tests/workbook_fixtures.py +++ b/src/encoded/tests/workbook_fixtures.py @@ -7,7 +7,7 @@ from snovault.elasticsearch import create_mapping from .. import main from ..loadxl import load_all -from .conftest_settings import make_app_settings_dictionary +from .conftest_settings import make_app_settings_dictionary, INDEXER_NAMESPACE_FOR_TESTING # this file was previously used to setup the test fixtures for the BDD tests. @@ -29,7 +29,7 @@ def app_settings(wsgi_server_host_port, elasticsearch_server, postgresql_server, settings['collection_datastore'] = 'elasticsearch' settings['item_datastore'] = 'elasticsearch' settings['indexer'] = True - settings['indexer.namespace'] = os.environ.get('TRAVIS_JOB_ID', '') # set namespace for tests + settings['indexer.namespace'] = INDEXER_NAMESPACE_FOR_TESTING # use aws auth to access elasticsearch if aws_auth: @@ -48,9 +48,9 @@ def app(app_settings, **kwargs): yield app - DBSession = app.registry[DBSESSION] + db_session = app.registry[DBSESSION] # Dispose connections so postgres can tear down. - DBSession.bind.pool.dispose() + db_session.bind.pool.dispose() @pytest.mark.fixture_cost(500) @@ -65,7 +65,7 @@ def workbook(app): # just load the workbook inserts load_res = load_all(testapp, pkg_resources.resource_filename('encoded', 'tests/data/workbook-inserts/'), []) if load_res: - raise(load_res) + raise load_res testapp.post_json('/index', {}) yield From c6340e79f8d0e456ec72a752b65bb86ecd9017f2 Mon Sep 17 00:00:00 2001 From: Kent Pitman Date: Thu, 25 Mar 2021 13:34:48 -0400 Subject: [PATCH 2/7] Tighter doc string. --- src/encoded/tests/conftest.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/encoded/tests/conftest.py b/src/encoded/tests/conftest.py index f970af3af3..796b92d17c 100644 --- a/src/encoded/tests/conftest.py +++ b/src/encoded/tests/conftest.py @@ -101,8 +101,7 @@ def dummy_request(root, registry, app): @pytest.fixture(scope='session') def app(app_settings): - """WSGI application level functional testing. - """ + """ WSGI application level functional testing. """ return main({}, **app_settings) From bc417f38d56c148e4a4478bbc17da30ea10060de Mon Sep 17 00:00:00 2001 From: Kent Pitman Date: Fri, 26 Mar 2021 12:21:58 -0400 Subject: [PATCH 3/7] Re-bump patch version after merge from master. --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 516ef1238b..3fe724bab7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [tool.poetry] # Note: Various modules refer to this system as "encoded", not "fourfront". name = "encoded" -version = "2.6.1" +version = "2.6.2" description = "4DN-DCIC Fourfront" authors = ["4DN-DCIC Team "] license = "MIT" From 052280108401bfa2242944665c4fe532f6104e7d Mon Sep 17 00:00:00 2001 From: Kent Pitman Date: Fri, 26 Mar 2021 13:35:15 -0400 Subject: [PATCH 4/7] Mark test_purge_item_type.py broken --- src/encoded/tests/test_purge_item_type.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/encoded/tests/test_purge_item_type.py b/src/encoded/tests/test_purge_item_type.py index 50b5958d05..75e7da9d59 100644 --- a/src/encoded/tests/test_purge_item_type.py +++ b/src/encoded/tests/test_purge_item_type.py @@ -2,7 +2,7 @@ from ..commands.purge_item_type import purge_item_type_from_storage -pytestmark = [pytest.mark.working] +pytestmark = [pytest.mark.broken] @pytest.fixture From 2d333871f7f3146ddb3d5a1af331f835cfd0ca23 Mon Sep 17 00:00:00 2001 From: Kent Pitman Date: Mon, 29 Mar 2021 23:44:50 -0400 Subject: [PATCH 5/7] Simplify test_static_page.py --- src/encoded/tests/test_static_page.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/src/encoded/tests/test_static_page.py b/src/encoded/tests/test_static_page.py index b7b5147068..099d0f50f9 100644 --- a/src/encoded/tests/test_static_page.py +++ b/src/encoded/tests/test_static_page.py @@ -1,5 +1,4 @@ import pytest -#import webtest from dcicutils.qa_utils import notice_pytest_fixtures from .workbook_fixtures import workbook, app_settings, app @@ -10,10 +9,6 @@ pytestmark = [pytest.mark.working, pytest.mark.workbook, pytest.mark.indexing] -def wait_for_index(testapp): - testapp.post_json("/index", {"record": False}) - - @pytest.fixture def static_help_page_default(): return workbook_lookup(item_type='Page', name='help/user-guide/rest-api') @@ -34,8 +29,8 @@ def static_help_page_deleted(): def test_get_help_page(workbook, testapp, static_help_page_default): + notice_pytest_fixtures(workbook, testapp, static_help_page_default) testapp = testapp - wait_for_index(testapp) help_page_url = "/" + static_help_page_default['name'] res = testapp.get(help_page_url, status=200) assert res.json['@id'] == help_page_url @@ -49,15 +44,14 @@ def test_get_help_page(workbook, testapp, static_help_page_default): def test_get_help_page_draft(workbook, anonhtmltestapp, htmltestapp, static_help_page_draft): - - wait_for_index(htmltestapp) + notice_pytest_fixtures(workbook, anonhtmltestapp, htmltestapp, static_help_page_draft) help_page_url = "/" + static_help_page_draft['name'] anonhtmltestapp.get(help_page_url, status=403) htmltestapp.get(help_page_url, status=200) def test_get_help_page_deleted(workbook, anonhtmltestapp, htmltestapp, static_help_page_deleted): - wait_for_index(htmltestapp) + notice_pytest_fixtures(workbook, anonhtmltestapp, htmltestapp, static_help_page_deleted) help_page_url = "/" + static_help_page_deleted['name'] anonhtmltestapp.get(help_page_url, status=403) htmltestapp.get(help_page_url, status=200) # Why 200 and not 404? -kmp 23-Feb-2021 @@ -65,8 +59,8 @@ def test_get_help_page_deleted(workbook, anonhtmltestapp, htmltestapp, static_he def test_get_help_page_no_access(workbook, anontestapp, testapp, anonhtmltestapp, htmltestapp, static_help_page_default, static_help_page_draft, static_help_page_deleted): - notice_pytest_fixtures(workbook) - wait_for_index(testapp) + notice_pytest_fixtures(workbook, anontestapp, testapp, anonhtmltestapp, htmltestapp, + static_help_page_default, static_help_page_draft, static_help_page_deleted) success = True for app_name, testapp, role in [("anon_es", anontestapp, 'anon'), ("es", testapp, 'system'), @@ -90,8 +84,11 @@ def test_get_help_page_no_access(workbook, anontestapp, testapp, anonhtmltestapp def check_page_unique_name(testapp, conflicting_page, page_to_patch): - wait_for_index(testapp) - # POST again with same name and expect validation error + """ + Tries to post and patch page_to_patch but expects to get a 422 error because of a conflict with conflicting_page. + Since under normal circumstances no change will occur, it is considered safe to do as part of workbook tests. + """ + conflicting_document = {'name': conflicting_page['name']} conflict_message = "%s already exists with name '%s'" % (conflicting_page['uuid'], conflicting_page['name']) @@ -103,9 +100,11 @@ def check_conflict(res): # Test that POST of a new page with the same name as an existing page is not allowed. check_conflict(testapp.post_json('/page', conflicting_document, status=422)) + # Also test PATCH of an existing page with the same name as another existing page is not allowed. page_to_patch_uuid_url = '/' + page_to_patch['uuid'] check_conflict(testapp.patch_json(page_to_patch_uuid_url, conflicting_document, status=422)) + actual_page_to_patch = testapp.get(page_to_patch_uuid_url).maybe_follow().json check_conflict(testapp.patch_json(actual_page_to_patch['@id'], conflicting_document, status=422)) From 6828650f636a77915a5005080b4e86a2b6b425d5 Mon Sep 17 00:00:00 2001 From: Kent Pitman Date: Tue, 30 Mar 2021 14:06:23 -0400 Subject: [PATCH 6/7] Bump patch version after merging master. --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index a2e1bd0af2..0594a9b039 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [tool.poetry] # Note: Various modules refer to this system as "encoded", not "fourfront". name = "encoded" -version = "2.6.5" +version = "2.6.6" description = "4DN-DCIC Fourfront" authors = ["4DN-DCIC Team "] license = "MIT" From 87e1b4bea6a2a7167818926811057e042ca92836 Mon Sep 17 00:00:00 2001 From: Kent Pitman Date: Tue, 30 Mar 2021 14:45:16 -0400 Subject: [PATCH 7/7] Get rid of a warning about the use of backslash i in test_types_init_collections.py --- src/encoded/tests/test_types_init_collections.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/encoded/tests/test_types_init_collections.py b/src/encoded/tests/test_types_init_collections.py index afd99d9d20..17e017e73f 100644 --- a/src/encoded/tests/test_types_init_collections.py +++ b/src/encoded/tests/test_types_init_collections.py @@ -223,9 +223,9 @@ def vendor_data_alias(lab, award): 'title': 'Wrong Alias Biochemical', 'lab': lab['@id'], 'award': award['@id'], - 'aliases': ['my_lab:this_is_correct_one', - 'my_lab:this/is_wrong', - 'my_lab:this\is_wrong_too']} + 'aliases': [r'my_lab:this_is_correct_one', + r'my_lab:this/is_wrong', + r'my_lab:this\is_wrong_too']} def test_vendor_alias_wrong_format(testapp, vendor_data_alias):