diff --git a/oioioi/encdec/__init__.py b/oioioi/encdec/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/oioioi/encdec/apps.py b/oioioi/encdec/apps.py
new file mode 100644
index 000000000..cf2032bec
--- /dev/null
+++ b/oioioi/encdec/apps.py
@@ -0,0 +1,6 @@
+from django.apps import AppConfig
+
+
+class EncdecAppConfig(AppConfig):
+    default_auto_field = 'django.db.models.AutoField'
+    name = "oioioi.encdec"
diff --git a/oioioi/encdec/controllers.py b/oioioi/encdec/controllers.py
new file mode 100644
index 000000000..65463592f
--- /dev/null
+++ b/oioioi/encdec/controllers.py
@@ -0,0 +1,328 @@
+import itertools
+from operator import attrgetter  # pylint: disable=E0611
+
+from django.conf import settings
+from django.template.loader import render_to_string
+from django.utils.translation import gettext_lazy as _
+from oioioi.contests.controllers import submission_template_context
+from oioioi.encdec.models import EncdecChannel, EncdecChecker, EncdecTestReport
+from oioioi.evalmgr.tasks import (
+    add_before_placeholder,
+    extend_after_placeholder,
+    recipe_placeholder,
+)
+from oioioi.filetracker.utils import django_to_filetracker_path
+from oioioi.problems.utils import can_admin_problem, can_admin_problem_instance
+from oioioi.programs.controllers import ProgrammingProblemController
+from oioioi.programs.utils import (
+    get_extension,
+    get_problem_link_or_name,
+)
+from oioioi.contests.models import ScoreReport, SubmissionReport
+from oioioi.programs.models import CompilationReport, GroupReport
+
+
+def get_report_display_type(request, status, score, max_score):
+    if status == 'INI_OK' or status == 'OK':
+        try:
+            if score is None or max_score is None:
+                display_type = status
+
+            elif max_score.to_int() == 0:
+                display_type = status
+
+            else:
+                score_percentage = (
+                    float(score.to_int()) / max_score.to_int()
+                )
+
+                if score_percentage < 0.25:
+                    display_type = 'OK0'
+                elif score_percentage < 0.5:
+                    display_type = 'OK25'
+                elif score_percentage < 0.75:
+                    display_type = 'OK50'
+                elif score_percentage < 1.0:
+                    display_type = 'OK75'
+                else:
+                    display_type = 'OK100'
+
+        # If by any means there is no 'score' or 'max_score' field then
+        # we just treat the test report as without them
+        except AttributeError:
+            display_type = status
+
+    else:
+        display_type = status
+
+    return display_type
+
+class EncdecProblemController(ProgrammingProblemController):
+    description = _("Encoder-decoder programming problem")
+
+    def generate_initial_evaluation_environ(self, environ, submission, **kwargs):
+        problem_instance = submission.problem_instance
+        problem = problem_instance.problem
+        contest = problem_instance.contest
+        if contest is not None:
+            round = problem_instance.round
+
+        submission = submission.programsubmission
+        environ['source_file'] = django_to_filetracker_path(submission.source_file)
+        environ['language'] = get_extension(submission.source_file.name)
+        environ[
+            'compilation_result_size_limit'
+        ] = problem_instance.controller.get_compilation_result_size_limit(submission)
+
+        environ['submission_id'] = submission.id
+        environ['submission_kind'] = submission.kind
+        environ['problem_instance_id'] = problem_instance.id
+        environ['problem_id'] = problem.id
+        environ['problem_short_name'] = problem.short_name
+        if contest is not None:
+            environ['round_id'] = round.id
+            environ['contest_id'] = contest.id
+        environ['submission_owner'] = (
+            submission.user.username if submission.user else None
+        )
+        environ['oioioi_instance'] = settings.SITE_NAME
+        environ['contest_priority'] = (
+            contest.judging_priority
+            if contest is not None
+            else settings.NON_CONTEST_PRIORITY
+        )
+        environ['contest_priority'] += settings.OIOIOI_INSTANCE_PRIORITY_BONUS
+        environ['contest_weight'] = (
+            contest.judging_weight
+            if contest is not None
+            else settings.NON_CONTEST_WEIGHT
+        )
+        environ['contest_weight'] += settings.OIOIOI_INSTANCE_WEIGHT_BONUS
+
+        environ.setdefault('report_kinds', ['INITIAL', 'NORMAL']),
+        if 'hidden_judge' in environ['extra_args']:
+            environ['report_kinds'] = ['HIDDEN']
+
+        environ['compiler'] = problem_instance.controller.get_compiler_for_submission(
+            submission
+        )
+
+    def generate_recipe(self, kinds):
+        recipe_body = [('collect_tests', 'oioioi.encdec.handlers.collect_tests')]
+
+        if 'INITIAL' in kinds:
+            recipe_body.extend(
+                [
+                    (
+                        'initial_run_tests',
+                        'oioioi.encdec.handlers.run_tests',
+                        dict(kind='EXAMPLE'),
+                    ),
+                    ('initial_run_tests_end', 'oioioi.encdec.handlers.run_tests_end'),
+                    ('initial_grade_tests', 'oioioi.encdec.handlers.grade_tests'),
+                    ('initial_grade_groups', 'oioioi.encdec.handlers.grade_groups'),
+                    (
+                        'initial_grade_submission',
+                        'oioioi.encdec.handlers.grade_submission',
+                        dict(kind='EXAMPLE'),
+                    ),
+                    (
+                        'initial_make_report',
+                        'oioioi.encdec.handlers.make_report',
+                        dict(kind='INITIAL'),
+                    ),
+                    recipe_placeholder('after_initial_tests'),
+                ]
+            )
+
+        if 'NORMAL' in kinds or 'HIDDEN' in kinds or 'FULL' in kinds:
+            recipe_body.append(recipe_placeholder('before_final_tests'))
+
+        if 'NORMAL' in kinds:
+            recipe_body.extend(
+                [
+                    (
+                        'final_run_tests',
+                        'oioioi.encdec.handlers.run_tests',
+                        dict(kind='NORMAL'),
+                    ),
+                    ('final_run_tests_end', 'oioioi.encdec.handlers.run_tests_end'),
+                    ('final_grade_tests', 'oioioi.encdec.handlers.grade_tests'),
+                    ('final_grade_groups', 'oioioi.encdec.handlers.grade_groups'),
+                    (
+                        'final_grade_submission',
+                        'oioioi.encdec.handlers.grade_submission',
+                    ),
+                    ('final_make_report', 'oioioi.encdec.handlers.make_report'),
+                    recipe_placeholder('after_final_tests'),
+                ]
+            )
+
+        if 'HIDDEN' in kinds:
+            recipe_body.extend(
+                [
+                    ('hidden_run_tests', 'oioioi.encdec.handlers.run_tests'),
+                    ('hidden_run_tests_end', 'oioioi.encdec.handlers.run_tests_end'),
+                    ('hidden_grade_tests', 'oioioi.encdec.handlers.grade_tests'),
+                    ('hidden_grade_groups', 'oioioi.encdec.handlers.grade_groups'),
+                    (
+                        'hidden_grade_submission',
+                        'oioioi.encdec.handlers.grade_submission',
+                        dict(kind=None),
+                    ),
+                    (
+                        'hidden_make_report',
+                        'oioioi.encdec.handlers.make_report',
+                        dict(kind='HIDDEN'),
+                    ),
+                    recipe_placeholder('after_all_tests'),
+                ]
+            )
+
+        if 'FULL' in kinds:
+            recipe_body.extend(
+                [
+                    ('full_run_tests', 'oioioi.encdec.handlers.run_tests'),
+                    ('full_run_tests', 'oioioi.encdec.handlers.run_tests_end'),
+                    ('full_grade_tests', 'oioioi.encdec.handlers.grade_tests'),
+                    ('full_grade_groups', 'oioioi.encdec.handlers.grade_groups'),
+                    (
+                        'full_grade_submission',
+                        'oioioi.encdec.handlers.grade_submission',
+                        dict(kind=None),
+                    ),
+                    (
+                        'full_make_report',
+                        'oioioi.encdec.handlers.make_report',
+                        dict(kind='FULL'),
+                    ),
+                    recipe_placeholder('after_full_tests'),
+                ]
+            )
+
+        return recipe_body
+
+    def fill_evaluation_environ(self, environ, submission, **kwargs):
+        self.generate_base_environ(environ, submission, **kwargs)
+
+        if 'USER_OUTS' in environ['submission_kind']:
+            environ['report_kinds'] = ['USER_OUTS']
+            environ['save_outputs'] = True
+
+        recipe_body = self.generate_recipe(environ['report_kinds'])
+
+        extend_after_placeholder(environ, 'after_compile', recipe_body)
+
+        environ.setdefault('group_scorer', 'oioioi.programs.utils.min_group_scorer')
+        environ.setdefault(
+            'score_aggregator', 'oioioi.programs.utils.sum_score_aggregator'
+        )
+
+        channel = EncdecChannel.objects.get(problem=self.problem).exe_file
+        checker = EncdecChecker.objects.get(problem=self.problem).exe_file
+
+        environ['channel'] = django_to_filetracker_path(channel)
+        environ['checker'] = django_to_filetracker_path(checker)
+
+        if 'INITIAL' in environ['report_kinds']:
+            add_before_placeholder(
+                environ,
+                'after_initial_tests',
+                (
+                    'update_report_statuses',
+                    'oioioi.contests.handlers.update_report_statuses',
+                ),
+            )
+            add_before_placeholder(
+                environ,
+                'after_initial_tests',
+                (
+                    'update_submission_score',
+                    'oioioi.contests.handlers.update_submission_score',
+                ),
+            )
+
+    def render_submission(self, request, submission):
+        problem_instance = submission.problem_instance
+        if submission.kind == 'USER_OUTS':
+            # The comment includes safe string, because it is generated
+            # automatically (users can not affect it).
+            # Note that we temporarily assign a safestring object, because
+            # field type in model is originally a string.
+            submission.programsubmission.comment = mark_safe(
+                submission.programsubmission.comment
+            )
+        can_admin = can_admin_problem_instance(request, submission.problem_instance)
+
+        return render_to_string(
+            'encdec/submission_header.html',
+            request=request,
+            context={
+                'submission': submission_template_context(
+                    request, submission.programsubmission
+                ),
+                'problem': get_problem_link_or_name(request, submission),
+                'saved_diff_id': request.session.get('saved_diff_id'),
+                'supported_extra_args': problem_instance.controller.get_supported_extra_args(
+                    submission
+                ),
+                'can_admin': can_admin,
+            },
+        )
+
+    def render_report(self, request, report):
+        problem_instance = report.submission.problem_instance
+        if report.kind == 'FAILURE':
+            return problem_instance.controller.render_report_failure(request, report)
+
+        score_report = ScoreReport.objects.get(submission_report=report)
+        compilation_report = CompilationReport.objects.get(submission_report=report)
+        test_reports = (
+            EncdecTestReport.objects.filter(submission_report=report)
+            .select_related('userout_status')
+            .order_by('test__order', 'test_group', 'test_name')
+        )
+        group_reports = GroupReport.objects.filter(submission_report=report)
+        show_scores = any(gr.score is not None for gr in group_reports)
+        group_reports = dict((g.group, g) for g in group_reports)
+
+        picontroller = problem_instance.controller
+
+        allow_download_out = picontroller.can_generate_user_out(request, report)
+        allow_test_comments = picontroller.can_see_test_comments(request, report)
+        all_outs_generated = allow_download_out
+
+        groups = []
+        for group_name, tests in itertools.groupby(
+            test_reports, attrgetter('test_group')
+        ):
+            tests_list = list(tests)
+
+            for test in tests_list:
+                test.generate_status = picontroller._out_generate_status(request, test)
+                all_outs_generated &= test.generate_status == 'OK'
+
+            tests_records = [
+                {'encoder_display_type': get_report_display_type(request, test.encoder_status, test.score, test.max_score),
+                 'decoder_display_type': get_report_display_type(request, test.decoder_status, test.score, test.max_score),
+                 'test': test}
+                for test in tests_list
+            ]
+
+            groups.append({'tests': tests_records, 'report': group_reports[group_name]})
+
+        return render_to_string(
+            'encdec/report.html',
+            request=request,
+            context={
+                'report': report,
+                'score_report': score_report,
+                'compilation_report': compilation_report,
+                'groups': groups,
+                'show_scores': show_scores,
+                'allow_download_out': allow_download_out,
+                'allow_test_comments': allow_test_comments,
+                'all_outs_generated': all_outs_generated,
+                'is_admin': picontroller.is_admin(request, report),
+            },
+        )
diff --git a/oioioi/encdec/handlers.py b/oioioi/encdec/handlers.py
new file mode 100644
index 000000000..1af93a012
--- /dev/null
+++ b/oioioi/encdec/handlers.py
@@ -0,0 +1,622 @@
+import functools
+import logging
+from collections import defaultdict
+
+import six
+from django.conf import settings
+from django.db import transaction
+from django.urls import reverse
+from django.utils.module_loading import import_string
+from django.utils.text import Truncator
+from django.utils.translation import gettext_lazy as _
+
+from oioioi.base.utils import make_html_link
+from oioioi.contests.handlers import _get_submission_or_skip
+from oioioi.contests.models import ScoreReport, SubmissionReport
+from oioioi.contests.scores import IntegerScore, ScoreValue
+from oioioi.encdec.models import (
+    LanguageOverrideForEncdecTest,
+    EncdecTest,
+    EncdecTestReport,
+    EncdecUserOutGenStatus,
+)
+from oioioi.evalmgr.tasks import transfer_job
+from oioioi.filetracker.client import get_client
+from oioioi.filetracker.utils import (
+    django_to_filetracker_path,
+    filetracker_to_django_file,
+)
+from oioioi.programs.handlers import (
+    COMPILE_TASK_PRIORITY,
+    DEFAULT_TEST_TASK_PRIORITY,
+    EXAMPLE_TEST_TASK_PRIORITY,
+    TESTRUN_TEST_TASK_PRIORITY,
+    _make_filename,
+    _skip_on_compilation_error,
+)
+from oioioi.programs.models import (
+    CompilationReport,
+    GroupReport,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def _override_tests_limits(language, tests):
+    """ Given language and list of EncdecTest objects, returns
+    the dictionary of memory and time limits.
+    The key is test's pk.
+    In case language overriding is defined in the database,
+    the value of key is specified by overriding. Otherwise,
+    the limits are the same as initial.
+    """
+
+    overriding_tests = LanguageOverrideForEncdecTest.objects.filter(
+        test__in=tests, language=language
+    )
+    new_limits = {}
+
+    for test in tests:
+        new_limits[test.pk] = {
+            'encoder_mem_limit': test.encoder_memory_limit,
+            'decoder_mem_limit': test.decoder_memory_limit,
+            'encoder_time_limit': test.encoder_time_limit,
+            'decoder_time_limit': test.decoder_time_limit,
+        }
+
+    for new_rule in overriding_tests:
+        new_limits[new_rule.test.pk]['encoder_mem_limit'] = new_rule.encoder_memory_limit
+        new_limits[new_rule.test.pk]['decoder_mem_limit'] = new_rule.decoder_memory_limit
+        new_limits[new_rule.test.pk]['encoder_time_limit'] = new_rule.encoder_time_limit
+        new_limits[new_rule.test.pk]['decoder_time_limit'] = new_rule.decoder_time_limit
+
+    return new_limits
+
+
+@_skip_on_compilation_error
+@transaction.atomic
+def collect_tests(env, **kwargs):
+    """Collects tests from the database and converts them to
+    evaluation environments.
+
+    Used ``environ`` keys:
+      * ``problem_instance_id``
+      * ``language``
+      * ``extra_args``
+      * ``is_rejudge``
+
+    Produced ``environ`` keys:
+       * ``tests``: a dictionary mapping test names to test envs
+    """
+    env.setdefault('tests', {})
+
+    problem_instance = env['problem_instance_id']
+    if 'tests_subset' in env['extra_args']:
+        tests = list(EncdecTest.objects.in_bulk(env['extra_args']['tests_subset']).values())
+    else:
+        tests = EncdecTest.objects.filter(
+            problem_instance__id=problem_instance, is_active=True
+        )
+
+    if env['is_rejudge']:
+        submission = env['submission_id']
+        rejudge_type = env['extra_args'].setdefault('rejudge_type', 'FULL')
+        tests_to_judge = env['extra_args'].setdefault('tests_to_judge', [])
+        test_reports = EncdecTestReport.objects.filter(
+            submission_report__submission__id=submission,
+            submission_report__status='ACTIVE',
+        )
+        tests_used = [report.test_name for report in test_reports]
+        if rejudge_type == 'NEW':
+            tests_to_judge = [
+                t.name
+                for t in EncdecTest.objects.filter(
+                    problem_instance__id=problem_instance, is_active=True
+                ).exclude(name__in=tests_used)
+            ]
+        elif rejudge_type == 'JUDGED':
+            tests = EndecTest.objects.filter(
+                problem_instance__id=problem_instance, name__in=tests_used
+            )
+            tests_to_judge = [t for t in tests_to_judge if t in tests_used]
+        elif rejudge_type == 'FULL':
+            tests_to_judge = [t.name for t in tests]
+    else:
+        tests_to_judge = [t.name for t in tests]
+
+    # Some of the tests may be overriden, e.g. adding additional
+    # overhead in time limits for Python submissions.
+    language = env['language']
+    new_limits = _override_tests_limits(language, tests)
+
+    for test in tests:
+        test_env = {}
+        test_env['id'] = test.id
+        test_env['name'] = test.name
+        test_env['in_file'] = django_to_filetracker_path(test.input_file)
+        test_env['hint_file'] = django_to_filetracker_path(test.hint_file)
+        test_env['kind'] = test.kind
+        test_env['group'] = test.group or test.name
+        test_env['max_score'] = test.max_score
+        test_env['order'] = test.order
+        if test.encoder_time_limit:
+            test_env['encoder_time_limit'] = new_limits[test.pk]['encoder_time_limit']
+        if test.decoder_time_limit:
+            test_env['decoder_time_limit'] = new_limits[test.pk]['decoder_time_limit']
+        if test.encoder_memory_limit:
+            test_env['encoder_mem_limit'] = new_limits[test.pk]['encoder_mem_limit']
+        if test.decoder_memory_limit:
+            test_env['decoder_mem_limit'] = new_limits[test.pk]['decoder_mem_limit']
+        test_env['to_judge'] = False
+        env['tests'][test.name] = test_env
+
+    for test in tests_to_judge:
+        env['tests'][test]['to_judge'] = True
+    return env
+
+
+@_skip_on_compilation_error
+def run_tests(env, kind=None, **kwargs):
+    """Runs tests and saves their results into the environment
+
+       If ``kind`` is specified, only tests with the given kind will be run.
+
+       Used ``environ`` keys:
+         * ``tests``: this should be a dictionary, mapping test name into
+           the environment to pass to the ``exec`` job
+         * ``unsafe_exec``: set to ``True`` if we want to use only
+           ``ulimit()`` to limit the executable file resources, ``False``
+           otherwise (see the documentation for ``unsafe-exec`` job for
+           more information),
+         * ``compiled_file``: the compiled file which will be tested,
+         * ``exec_info``: information how to execute ``compiled_file``
+         * ``check_outputs``: set to ``True`` if the output should be verified
+         * ``checker``: if present, it should be the filetracker path
+           of the binary used as the output checker,
+         * ``save_outputs``: set to ``True`` if and only if each of
+           test results should have its output file attached.
+         * ``sioworkers_extra_args``: dict mappting kinds to additional
+           arguments passed to
+           :fun:`oioioi.sioworkers.jobs.run_sioworkers_jobs`
+           (kwargs).
+
+       Produced ``environ`` keys:
+         * ``test_results``: a dictionary, mapping test names into
+           dictionaries with the following keys:
+
+             ``result_code``
+               test status: OK, WA, RE, ...
+             ``result_string``
+               detailed supervisor information (for example, where the
+               required and returned outputs differ)
+             ``time_used``
+               total time used, in miliseconds
+             ``mem_used``
+               memory usage, in KiB
+             ``num_syscalls``
+               number of syscalls performed
+             ``out_file``
+               filetracker path to the output file (only if
+               ``env['save_outputs']`` was set)
+
+           If the dictionary already exists, new test results are appended.
+    """
+    jobs = dict()
+    not_to_judge = []
+    for test_name, test_env in six.iteritems(env['tests']):
+        if kind and test_env['kind'] != kind:
+            continue
+        if not test_env['to_judge']:
+            not_to_judge.append(test_name)
+            continue
+        job = test_env.copy()
+        job['job_type'] = (env.get('exec_mode', '') + '-encdec-exec').lstrip('-')
+        if kind == 'INITIAL' or kind == 'EXAMPLE':
+            job['task_priority'] = EXAMPLE_TEST_TASK_PRIORITY
+        elif env['submission_kind'] == 'TESTRUN':
+            job['task_priority'] = TESTRUN_TEST_TASK_PRIORITY
+        else:
+            job['task_priority'] = DEFAULT_TEST_TASK_PRIORITY
+        job['exe_file'] = env['compiled_file']
+        job['exec_info'] = env['exec_info']
+        job['chn_file'] = env['channel']
+        job['chk_file'] = env['checker']
+        job['untrusted_channel'] = job['untrusted_checker'] = env['untrusted_checker']
+        jobs[test_name] = job
+    extra_args = env.get('sioworkers_extra_args', {}).get(kind, {})
+    env['workers_jobs'] = jobs
+    env['workers_jobs.extra_args'] = extra_args
+    env['workers_jobs.not_to_judge'] = not_to_judge
+    return transfer_job(env,
+            'oioioi.sioworkers.handlers.transfer_job',
+            'oioioi.sioworkers.handlers.restore_job')
+
+
+@_skip_on_compilation_error
+def run_tests_end(env, **kwargs):
+    not_to_judge = env['workers_jobs.not_to_judge']
+    del env['workers_jobs.not_to_judge']
+    jobs = env['workers_jobs.results']
+    env.setdefault('test_results', {})
+    for test_name, result in six.iteritems(jobs):
+        env['test_results'].setdefault(test_name, {}).update(result)
+    for test_name in not_to_judge:
+        env['test_results'].setdefault(test_name, {}) \
+                .update(env['tests'][test_name])
+    return env
+
+
+def _convert_test_result(test_result):
+    result = {}
+    if 'failed_step' not in test_result:
+        result['exec_time_limit'] = test_result['decoder_time_limit']
+        result['time_used'] = max (
+            test_result['decoder_time_used'],
+            (test_result['encoder_time_used'] * test_result['decoder_time_limit']) / test_result['encoder_time_limit']
+        )
+        result['result_code'] = 'OK'
+        result['result_percentage'] = test_result['checker_result_percentage']
+    elif test_result['failed_step'] == 'checker':
+        result['exec_time_limit'] = test_result['decoder_time_limit']
+        result['time_used'] = test_result['decoder_time_used']
+        result['result_code'] = test_result['checker_result_code']
+    elif test_result['failed_step'] == 'decoder':
+        result['exec_time_limit'] = test_result['decoder_time_limit']
+        result['time_used'] = test_result['decoder_time_used']
+        result['result_code'] = test_result['decoder_result_code']
+    elif test_result['failed_step'] == 'channel':
+        result['exec_time_limit'] = test_result['encoder_time_limit']
+        result['time_used'] = test_result['encoder_time_used']
+        result['result_code'] = test_result['channel_result_code']
+    elif test_result['failed_step'] == 'encoder':
+        result['exec_time_limit'] = test_result['encoder_time_limit']
+        result['time_used'] = test_result['encoder_time_used']
+        result['result_code'] = test_result['encoder_result_code']
+    return result
+
+
+@_skip_on_compilation_error
+def grade_tests(env, **kwargs):
+    """Grades tests using a scoring function.
+
+       The ``env['test_scorer']``, which is used by this ``Handler``,
+       should be a path to a function which gets test definition (e.g.  a
+       ``env['tests'][test_name]`` dict) and test run result (e.g.  a
+       ``env['test_results'][test_name]`` dict) and returns a score
+       (instance of some subclass of
+       :class:`~oioioi.contests.scores.ScoreValue`) and a status.
+
+       Used ``environ`` keys:
+         * ``tests``
+         * ``test_results``
+         * ``test_scorer``
+
+       Produced ``environ`` keys:
+         * `score`, `max_score` and `status` keys in ``env['test_result']``
+    """
+
+    fun = import_string(env.get('test_scorer')
+            or settings.DEFAULT_TEST_SCORER)
+    tests = env['tests']
+    for test_name, test_result in six.iteritems(env['test_results']):
+        if tests[test_name]['to_judge']:
+            score, max_score, status = fun(tests[test_name], _convert_test_result(test_result))
+            assert isinstance(score, (type(None), ScoreValue))
+            assert isinstance(max_score, (type(None), ScoreValue))
+            test_result['score'] = score and score.serialize()
+            test_result['max_score'] = max_score and max_score.serialize()
+            test_result['status'] = status
+        else:
+            report = TestReport.objects.get(
+                submission_report__submission__id=env['submission_id'],
+                submission_report__status='ACTIVE',
+                test_name=test_name)
+            score = report.score
+            max_score = IntegerScore(report.test_max_score)
+            status = report.status
+            time_used = report.time_used
+            test_result['score'] = score and score.serialize()
+            test_result['max_score'] = max_score and max_score.serialize()
+            test_result['status'] = status
+            test_result['time_used'] = time_used
+            env['test_results'][test_name] = test_result
+    return env
+
+
+@_skip_on_compilation_error
+def grade_groups(env, **kwargs):
+    """Grades ungraded groups using a aggregating function.
+
+    The ``group_scorer`` key in ``env`` should contain the path to
+    a function which gets a list of test results (wihtout their names) and
+    returns an aggregated score (instance of some subclass of
+    :class:`~oioioi.contests.scores.ScoreValue`).
+
+    Used ``environ`` keys:
+      * ``tests``
+      * ``encoder_results``
+      * ``group_scorer``
+
+    Produced ``environ`` keys:
+      * `score`, `max_score` and `status` keys in ``env['group_results']``
+    """
+
+    test_results = defaultdict(dict)
+    for test_name, test_result in env['test_results'].items():
+        test = env['tests'][test_name]
+        group_name = test['group']
+        test_results[group_name][test_name] = {
+            'score': test_result['score'],
+            'max_score': test_result['max_score'],
+            'order': test['order'],
+            'status': test_result['status']
+        }
+
+    group_scorer = import_string(env.get('group_scorer', settings.DEFAULT_GROUP_SCORER))
+    env.setdefault('group_results', {})
+    for group_name, results in test_results.items():
+        if group_name in env['group_results']:
+            continue
+        score, max_score, status = group_scorer(results)
+        if not isinstance(score, (type(None), ScoreValue)):
+            raise TypeError(
+                "Group scorer returned %r as score, "
+                "not None or ScoreValue" % (type(score),)
+            )
+        if not isinstance(max_score, (type(None), ScoreValue)):
+            raise TypeError(
+                "Group scorer returned %r as max_score, "
+                "not None or ScoreValue" % (type(max_score),)
+            )
+        group_result = {}
+        group_result['score'] = score and score.serialize()
+        group_result['max_score'] = max_score and max_score.serialize()
+        group_result['status'] = status
+        one_of_tests = env['tests'][next(iter(results.keys()))]
+        if not all(
+            env['tests'][key]['kind'] == one_of_tests['kind']
+            for key in results.keys()
+        ):
+            raise ValueError(
+                "Tests in group '%s' have different kinds. "
+                "This is not supported." % (group_name,)
+            )
+        group_result['kind'] = one_of_tests['kind']
+        env['group_results'][group_name] = group_result
+
+    return env
+
+
+def grade_submission(env, kind='NORMAL', **kwargs):
+    """Grades submission with specified kind of tests on a `Job` layer.
+
+    If ``kind`` is None, all tests will be graded.
+
+    This `Handler` aggregates score from graded groups and gets
+    submission status from tests results.
+
+    Used ``environ`` keys:
+        * ``group_results``
+        * ``score_aggregator``
+
+    Produced ``environ`` keys:
+        * ``status``
+        * ``score``
+        * ``max_score``
+    """
+
+    # TODO: let score_aggregator handle compilation errors
+
+    if env.get('compilation_result', 'OK') != 'OK':
+        env['score'] = None
+        env['max_score'] = None
+        env['status'] = 'CE'
+        return env
+
+    fun = import_string(
+        env.get('score_aggregator') or settings.DEFAULT_SCORE_AGGREGATOR
+    )
+
+    if kind is None:
+        group_results = env['group_results']
+    else:
+        group_results = dict(
+            (name, res)
+            for (name, res) in env['group_results'].items()
+            if res['kind'] == kind
+        )
+
+    score, max_score, status = fun(group_results)
+    assert isinstance(score, (type(None), ScoreValue))
+    assert isinstance(max_score, (type(None), ScoreValue))
+    env['score'] = score and score.serialize()
+    env['max_score'] = max_score and max_score.serialize()
+    env['status'] = status
+
+    return env
+
+
+@_get_submission_or_skip
+def _make_base_report(env, submission, kind):
+    """Helper function making: SubmissionReport, ScoreReport,
+    CompilationReport.
+
+    Used ``environ`` keys:
+        * ``status``
+        * ``score``
+        * ``compilation_result``
+        * ``compilation_message``
+        * ``submission_id``
+        * ``max_score``
+
+    Alters ``environ`` by adding:
+        * ``report_id``: id of the produced
+          :class:`~oioioi.contests.models.SubmissionReport`
+
+    Returns: tuple (submission, submission_report)
+    """
+    submission_report = SubmissionReport(submission=submission)
+    submission_report.kind = kind
+    submission_report.save()
+
+    env['report_id'] = submission_report.id
+
+    status_report = ScoreReport(submission_report=submission_report)
+    status_report.status = env['status']
+    status_report.score = env['score']
+    status_report.max_score = env['max_score']
+    status_report.save()
+
+    compilation_report = CompilationReport(submission_report=submission_report)
+    compilation_report.status = env['compilation_result']
+    compilation_message = env['compilation_message']
+
+    if not isinstance(compilation_message, str):
+        compilation_message = compilation_message.decode('utf8')
+    compilation_report.compiler_output = compilation_message
+    compilation_report.save()
+
+    return submission, submission_report
+
+
+@transaction.atomic
+def make_report(env, kind='NORMAL', save_scores=True, **kwargs):
+    """Builds entities for tests results in a database.
+
+    Used ``environ`` keys:
+        * ``tests``
+        * ``decoder_results``
+        * ``group_results``
+        * ``status``
+        * ``score``
+        * ``compilation_result``
+        * ``compilation_message``
+        * ``submission_id``
+
+    Produced ``environ`` keys:
+        * ``report_id``: id of the produced
+          :class:`~oioioi.contests.models.SubmissionReport`
+    """
+    submission, submission_report = _make_base_report(env, kind)
+
+    if env['compilation_result'] != 'OK':
+        return env
+    tests = env['tests']
+
+    test_results = env.get('test_results', {})
+    for test_name, test_result in test_results.items():
+        test = tests[test_name]
+        if 'report_id' in test_result:
+            continue
+        failed_step = test_result.get('failed_step', None)
+        test_report = EncdecTestReport(submission_report=submission_report)
+        test_report.test_id = test.get('id')
+        test_report.test_name = test_name
+        test_report.test_group = test['group']
+        test_report.test_encoder_time_limit = test['encoder_time_limit']
+        test_report.test_decoder_time_limit = test['decoder_time_limit']
+        test_report.max_score = test_result['max_score']
+        test_report.score = test_result['score'] if save_scores else None
+        if failed_step == 'encoder':
+            test_report.encoder_status = test_result['encoder_result_code']
+            test_report.decoder_status = 'SKIP'
+            comment = test_result.get('encoder_result_string', '')
+        elif failed_step == 'channel':
+            test_report.encoder_status = test_result['channel_result_code']
+            test_report.decoder_status = 'SKIP'
+            comment = test_result.get('channel_result_string', '')
+        elif failed_step == 'decoder':
+            test_report.encoder_status = test_result['channel_result_code']
+            test_report.decoder_status = test_result['decoder_result_code']
+            comment = test_result.get('decoder_result_string', '')
+        else:
+            test_report.encoder_status = test_result['channel_result_code']
+            test_report.decoder_status = test_result['checker_result_code']
+            comment = test_result.get('checker_result_string', '')
+        test_report.encoder_time_used = test_result['encoder_time_used']
+        test_report.decoder_time_used = test_result.get('decoder_time_used', 0)
+
+        if comment.lower() in ['ok', 'time limit exceeded']:  # Annoying
+            comment = ''
+        test_report.comment = Truncator(comment).chars(
+            EncdecTestReport._meta.get_field('comment').max_length
+        )
+        test_report.save()
+        test_result['report_id'] = test_report.id
+
+    group_results = env.get('group_results', {})
+    for group_name, group_result in group_results.items():
+        if 'report_id' in group_result:
+            continue
+        group_report = GroupReport(submission_report=submission_report)
+        group_report.group = group_name
+        group_report.score = group_result['score'] if save_scores else None
+        group_report.max_score = group_result['max_score'] if save_scores else None
+        group_report.status = group_result['status']
+        group_report.save()
+        group_result['result_id'] = group_report.id
+
+    if kind == 'INITIAL':
+        if submission.user is not None and not env.get('is_rejudge', False):
+            logger.info(
+                "Submission %(submission_id)d by user %(username)s"
+                " for problem %(short_name)s got initial result.",
+                {
+                    'submission_id': submission.pk,
+                    'username': submission.user.username,
+                    'short_name': submission.problem_instance.short_name,
+                },
+                extra={
+                    'notification': 'initial_results',
+                    'user': submission.user,
+                    'submission': submission,
+                },
+            )
+
+    return env
+
+
+@transaction.atomic
+def fill_outfile_in_existing_test_reports(env, **kwargs):
+    """Fill output files into existing test reports that are not directly
+    related to present submission. Also change status of UserOutGenStatus
+    object to finished.
+
+    Used ``environ`` keys:
+        * ``extra_args`` dictionary with ``submission_report`` object
+        * ``decoder_results``
+    """
+    if 'submission_report_id' not in env['extra_args']:
+        logger.info('No submission_report given to fill tests outputs')
+        return env
+
+    submission_report_id = env['extra_args']['submission_report_id']
+    submission_report = SubmissionReport.objects.get(id=submission_report_id)
+    test_reports = EncdecTestReport.objects.filter(submission_report=submission_report)
+    decoder_results = env.get('decoder_results', {})
+
+    for test_name, result in decoder_results.items():
+        try:
+            testreport = test_reports.get(test_name=test_name)
+        except (EncdecTestReport.DoesNotExist, EncdecTestReport.MultipleObjectsReturned):
+            logger.warning('Test report for test: %s can not be determined', test_name)
+            continue
+
+        if testreport.output_file:
+            logger.warning(
+                'Output for test report %s exists. Deleting old one.', testreport.id
+            )
+            get_client().delete_file(testreport.output_file)
+
+        testreport.output_file = filetracker_to_django_file(result['out_file'])
+        testreport.save()
+
+        try:
+            download_controller = UserOutGenStatus.objects.get(testreport=testreport)
+        except UserOutGenStatus.DoesNotExist:
+            download_controller = UserOutGenStatus(testreport=testreport)
+
+        download_controller.status = 'OK'
+        download_controller.save()
+
+    return env
diff --git a/oioioi/encdec/migrations/0001_initial.py b/oioioi/encdec/migrations/0001_initial.py
new file mode 100644
index 000000000..a9c0a2f51
--- /dev/null
+++ b/oioioi/encdec/migrations/0001_initial.py
@@ -0,0 +1,120 @@
+# Generated by Django 4.1.6 on 2023-03-12 17:55
+
+from django.db import migrations, models
+import django.db.models.deletion
+import oioioi.base.fields
+import oioioi.contests.fields
+from oioioi.contests.models import submission_statuses
+import oioioi.encdec.models
+import oioioi.filetracker.fields
+import oioioi.problems.models
+import oioioi.programs.models
+
+
+class Migration(migrations.Migration):
+
+    initial = True
+
+    dependencies = [
+        ('contests', '0014_contest_enable_editor'),
+        ('problems', '0031_auto_20220328_1124'),
+    ]
+
+    operations = [
+        migrations.CreateModel(
+            name='EncdecTest',
+            fields=[
+                ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+                ('name', models.CharField(max_length=30, verbose_name='name')),
+                ('input_file', oioioi.filetracker.fields.FileField(blank=True, max_length=255, null=True, upload_to=oioioi.problems.models.make_problem_filename, verbose_name='input')),
+                ('hint_file', oioioi.filetracker.fields.FileField(blank=True, max_length=255, null=True, upload_to=oioioi.problems.models.make_problem_filename, verbose_name='hint')),
+                ('kind', oioioi.base.fields.EnumField(max_length=64, verbose_name='kind')),
+                ('group', models.CharField(max_length=30, verbose_name='group')),
+                ('encoder_time_limit', models.IntegerField(null=True, validators=[oioioi.programs.models.validate_time_limit], verbose_name='encoder time limit (ms)')),
+                ('decoder_time_limit', models.IntegerField(null=True, validators=[oioioi.programs.models.validate_time_limit], verbose_name='decoder time limit (ms)')),
+                ('encoder_memory_limit', models.IntegerField(blank=True, null=True, validators=[oioioi.programs.models.validate_memory_limit], verbose_name='encoder memory limit (KiB)')),
+                ('decoder_memory_limit', models.IntegerField(blank=True, null=True, validators=[oioioi.programs.models.validate_memory_limit], verbose_name='decoder_memory limit (KiB)')),
+                ('max_score', models.IntegerField(default=10, verbose_name='score')),
+                ('order', models.IntegerField(default=0)),
+                ('is_active', models.BooleanField(default=True)),
+                ('problem_instance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contests.probleminstance')),
+            ],
+            options={
+                'verbose_name': 'test',
+                'verbose_name_plural': 'tests',
+                'ordering': ['order'],
+                'unique_together': {('problem_instance', 'name')},
+            },
+        ),
+        migrations.CreateModel(
+            name='EncdecTestReport',
+            fields=[
+                ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+                ('encoder_status', oioioi.base.fields.EnumField(submission_statuses)),
+                ('decoder_status', oioioi.base.fields.EnumField(submission_statuses)),
+                ('comment', models.CharField(blank=True, max_length=255)),
+                ('score', oioioi.contests.fields.ScoreField(blank=True, max_length=255, null=True)),
+                ('max_score', oioioi.contests.fields.ScoreField(blank=True, max_length=255, null=True)),
+                ('encoder_time_used', models.IntegerField(blank=True)),
+                ('decoder_time_used', models.IntegerField(blank=True)),
+                ('encoder_output_file', oioioi.filetracker.fields.FileField(blank=True, max_length=255, null=True, upload_to=oioioi.encdec.models.make_encoder_output_filename)),
+                ('decoder_output_file', oioioi.filetracker.fields.FileField(blank=True, max_length=255, null=True, upload_to=oioioi.encdec.models.make_decoder_output_filename)),
+                ('test_name', models.CharField(max_length=30)),
+                ('test_group', models.CharField(max_length=30)),
+                ('test_encoder_time_limit', models.IntegerField(blank=True, null=True)),
+                ('test_decoder_time_limit', models.IntegerField(blank=True, null=True)),
+                ('submission_report', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contests.submissionreport')),
+                ('test', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='encdec.encdectest')),
+            ],
+        ),
+        migrations.CreateModel(
+            name='EncdecUserOutGenStatus',
+            fields=[
+                ('testreport', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='userout_status', serialize=False, to='encdec.encdectestreport')),
+                ('status', oioioi.base.fields.EnumField(default='?', max_length=64)),
+                ('visible_for_user', models.BooleanField(default=True)),
+            ],
+        ),
+        migrations.CreateModel(
+            name='EncdecChecker',
+            fields=[
+                ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+                ('exe_file', oioioi.filetracker.fields.FileField(blank=True, max_length=255, null=True, upload_to=oioioi.problems.models.make_problem_filename, verbose_name='encoder-decoder checker executable file')),
+                ('problem', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='problems.problem')),
+            ],
+            options={
+                'verbose_name': 'encoder-decoder output checker',
+                'verbose_name_plural': 'encoder-decoder output checkers',
+            },
+        ),
+        migrations.CreateModel(
+            name='EncdecChannel',
+            fields=[
+                ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+                ('exe_file', oioioi.filetracker.fields.FileField(blank=True, max_length=255, null=True, upload_to=oioioi.problems.models.make_problem_filename, verbose_name='encoder-decoder channel executable file')),
+                ('problem', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='problems.problem')),
+            ],
+            options={
+                'verbose_name': 'encoder-decoder channel',
+                'verbose_name_plural': 'encoder-decoder channels',
+            },
+        ),
+        migrations.CreateModel(
+            name='LanguageOverrideForEncdecTest',
+            fields=[
+                ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+                ('encoder_time_limit', models.IntegerField(null=True, validators=[oioioi.programs.models.validate_time_limit], verbose_name='encoder time limit (ms)')),
+                ('decoder_time_limit', models.IntegerField(null=True, validators=[oioioi.programs.models.validate_time_limit], verbose_name='decoder time limit (ms)')),
+                ('encoder_memory_limit', models.IntegerField(blank=True, null=True, validators=[oioioi.programs.models.validate_memory_limit], verbose_name='encoder memory limit (KiB)')),
+                ('decoder_memory_limit', models.IntegerField(blank=True, null=True, validators=[oioioi.programs.models.validate_memory_limit], verbose_name='decoder memory limit (KiB)')),
+                ('language', models.CharField(max_length=30, verbose_name='language')),
+                ('test', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='encdec.encdectest')),
+            ],
+            options={
+                'verbose_name': 'encoder-decoder test limit override',
+                'verbose_name_plural': 'encoder-decoder tests limit overrides',
+                'ordering': ['test__order'],
+                'unique_together': {('test', 'language')},
+            },
+        ),
+    ]
diff --git a/oioioi/encdec/migrations/__init__.py b/oioioi/encdec/migrations/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/oioioi/encdec/models.py b/oioioi/encdec/models.py
new file mode 100644
index 000000000..c2281b153
--- /dev/null
+++ b/oioioi/encdec/models.py
@@ -0,0 +1,207 @@
+from django.db import models
+
+from django.utils.translation import gettext_lazy as _
+
+from functools import wraps
+
+from oioioi.base.fields import EnumField
+from oioioi.contests.fields import ScoreField
+from oioioi.contests.models import (
+    ProblemInstance,
+    SubmissionReport,
+    submission_statuses,
+)
+from oioioi.filetracker.fields import FileField
+from oioioi.problems.models import Problem, make_problem_filename
+from oioioi.programs.models import (
+    test_kinds,
+    validate_memory_limit,
+    validate_time_limit,
+)
+
+
+submission_statuses.register('SKIP', _('Skipped'))
+
+
+class EncdecTest(models.Model):
+    __test__ = False
+    problem_instance = models.ForeignKey(ProblemInstance, on_delete=models.CASCADE)
+    name = models.CharField(max_length=30, verbose_name=_("name"))
+    input_file = FileField(
+        upload_to=make_problem_filename, verbose_name=_("input"), null=True, blank=True
+    )
+    hint_file = FileField(
+        upload_to=make_problem_filename, verbose_name=_("hint"), null=True, blank=True
+    )
+    kind = EnumField(test_kinds, verbose_name=_("kind"))
+    group = models.CharField(max_length=30, verbose_name=_("group"))
+    encoder_time_limit = models.IntegerField(
+        verbose_name=_("encoder time limit (ms)"),
+        null=True,
+        blank=False,
+        validators=[validate_time_limit],
+    )
+    decoder_time_limit = models.IntegerField(
+        verbose_name=_("decoder time limit (ms)"),
+        null=True,
+        blank=False,
+        validators=[validate_time_limit],
+    )
+    encoder_memory_limit = models.IntegerField(
+        verbose_name=_("encoder memory limit (KiB)"),
+        null=True,
+        blank=True,
+        validators=[validate_memory_limit],
+    )
+    decoder_memory_limit = models.IntegerField(
+        verbose_name=_("decoder_memory limit (KiB)"),
+        null=True,
+        blank=True,
+        validators=[validate_memory_limit],
+    )
+    max_score = models.IntegerField(verbose_name=_("score"), default=10)
+    order = models.IntegerField(default=0)
+    is_active = models.BooleanField(default=True)
+
+    @property
+    def problem(self):
+        return self.problem_instance.problem
+
+    @property
+    def total_time_limit(self):
+        return self.encoder_time_limit + self.decoder_time_limit
+
+    def __str__(self):
+        return str(self.name)
+
+    class Meta(object):
+        ordering = ['order']
+        verbose_name = _("test")
+        verbose_name_plural = _("tests")
+        unique_together = ('problem_instance', 'name')
+
+
+class LanguageOverrideForEncdecTest(models.Model):
+    test = models.ForeignKey(EncdecTest, on_delete=models.CASCADE)
+    encoder_time_limit = models.IntegerField(
+        verbose_name=_("encoder time limit (ms)"),
+        null=True,
+        blank=False,
+        validators=[validate_time_limit],
+    )
+    decoder_time_limit = models.IntegerField(
+        verbose_name=_("decoder time limit (ms)"),
+        null=True,
+        blank=False,
+        validators=[validate_time_limit],
+    )
+    encoder_memory_limit = models.IntegerField(
+        verbose_name=_("encoder memory limit (KiB)"),
+        null=True,
+        blank=True,
+        validators=[validate_memory_limit],
+    )
+    decoder_memory_limit = models.IntegerField(
+        verbose_name=_("decoder memory limit (KiB)"),
+        null=True,
+        blank=True,
+        validators=[validate_memory_limit],
+    )
+    language = models.CharField(max_length=30, verbose_name=_("language"))
+
+    class Meta(object):
+        ordering = ['test__order']
+        verbose_name = _("encoder-decoder test limit override")
+        verbose_name_plural = _("encoder-decoder tests limit overrides")
+        unique_together = ('test', 'language')
+
+
+class EncdecChannel(models.Model):
+    problem = models.OneToOneField(Problem, on_delete=models.CASCADE)
+    exe_file = FileField(
+        upload_to=make_problem_filename,
+        null=True,
+        blank=True,
+        verbose_name=_("encoder-decoder channel executable file"),
+    )
+
+    class Meta(object):
+        verbose_name = _("encoder-decoder channel")
+        verbose_name_plural = _("encoder-decoder channels")
+
+
+class EncdecChecker(models.Model):
+    problem = models.OneToOneField(Problem, on_delete=models.CASCADE)
+    exe_file = FileField(
+        upload_to=make_problem_filename,
+        null=True,
+        blank=True,
+        verbose_name=_("encoder-decoder checker executable file"),
+    )
+
+    class Meta(object):
+        verbose_name = _("encoder-decoder output checker")
+        verbose_name_plural = _("encoder-decoder output checkers")
+
+
+def make_encoder_output_filename(instance, filename):
+    # This code is dead (it's result is ignored) with current implementation
+    # of assigning file from filetracker to a FileField.
+
+    # My honest take:
+    # So why the fuck it is still here? Just to suffer?
+    submission = instance.submission_report.submission
+    return 'userouts/%s/%d/%d-encoder-out' % (
+        submission.problem_instance.contest.id,
+        submission.id,
+        instance.submission_report.id,
+    )
+
+
+def make_decoder_output_filename(instance, filename):
+    # This code is dead (it's result is ignored) with current implementation
+    # of assigning file from filetracker to a FileField.
+
+    # My honest take:
+    # So why the fuck it is still here? Just to suffer?
+    submission = instance.submission_report.submission
+    return 'userouts/%s/%d/%d-decoder-out' % (
+        submission.problem_instance.contest.id,
+        submission.id,
+        instance.submission_report.id,
+    )
+
+
+class EncdecTestReport(models.Model):
+    __test__ = False
+    submission_report = models.ForeignKey(SubmissionReport, on_delete=models.CASCADE)
+    encoder_status = EnumField(submission_statuses)
+    decoder_status = EnumField(submission_statuses)
+    comment = models.CharField(max_length=255, blank=True)
+    score = ScoreField(null=True, blank=True)
+    max_score = ScoreField(null=True, blank=True)
+    encoder_time_used = models.IntegerField(blank=True)
+    decoder_time_used = models.IntegerField(blank=True)
+    encoder_output_file = FileField(upload_to=make_encoder_output_filename, null=True, blank=True)
+    decoder_output_file = FileField(upload_to=make_decoder_output_filename, null=True, blank=True)
+
+    test = models.ForeignKey(EncdecTest, blank=True, null=True, on_delete=models.SET_NULL)
+    test_name = models.CharField(max_length=30)
+    test_group = models.CharField(max_length=30)
+    test_encoder_time_limit = models.IntegerField(null=True, blank=True)
+    test_decoder_time_limit = models.IntegerField(null=True, blank=True)
+
+    @property
+    def has_all_outputs(self):
+        return bool(self.encoder_output_file) and bool(self.decoder_output_file)
+
+
+class EncdecUserOutGenStatus(models.Model):
+    testreport = models.OneToOneField(
+        EncdecTestReport,
+        primary_key=True,
+        related_name='userout_status',
+        on_delete=models.CASCADE,
+    )
+    status = EnumField(submission_statuses, default='?')
+    visible_for_user = models.BooleanField(default=True)
diff --git a/oioioi/encdec/templates/encdec/report-body.html b/oioioi/encdec/templates/encdec/report-body.html
new file mode 100644
index 000000000..bc44c8f58
--- /dev/null
+++ b/oioioi/encdec/templates/encdec/report-body.html
@@ -0,0 +1,98 @@
+{% load i18n %}
+{% load runtimeformat %}
+
+
+<div class="table-responsive-md">
+    <table class="table table-sm table-bordered table-report submission">
+        <thead>
+            <tr>
+                <th class="submission__margin submission--{{ test.status }}"></th>
+                <th>{% trans "Test" %}</th>
+                <th>{% trans "Result" %}</th>
+                <th>{% trans "Time" %}</th>
+                {% if show_scores %}
+                    <th>{% trans "Score" %}</th>
+                {% endif %}
+            </tr>
+        </thead>
+        <tbody>
+            {% for group in groups %}
+                {% for record in group.tests %}
+                {% with test=record.test %}
+                    <tr class="test-row-{{ group.report.id }}">
+                        <td class="submission__margin submission--{{ record.display_type }}" rowspan="2"></td>
+                        <td class="test-actions-toggler {% if is_admin %}fix-float-right{% endif %}" rowspan="2">
+                            {{ test.test_name }}
+                            <span class="test-actions float-right">
+                                {% if test.test %}
+                                    <small>
+                                        {% if is_admin %}
+                                            <a title='{% trans "Download encoder output for this test" %}'
+                                               href="{% url 'download_output_file' test_id=test.test.id %}">
+                                                out
+                                            </a>
+                                            <a title='{% trans "Download encoder input for this test" %}'
+                                               href="{% url 'download_input_file' test_id=test.test.id %}">
+                                                in
+                                            </a>
+                                            <a title='{% trans "Download decoder output for this test" %}'
+                                               href="{% url 'download_output_file' test_id=test.test.id %}">
+                                                out
+                                            </a>
+                                            <a title='{% trans "Download decoder input for this test" %}'
+                                               href="{% url 'download_input_file' test_id=test.test.id %}">
+                                                in
+                                            </a>
+					{% endif %}
+                                        {% if allow_download_out and test.generate_status == 'OK' %}
+                                            <a title='{% trans "Download user output for this test" %}'
+                                               href="{% url 'download_user_output' testreport_id=test.id %}">
+                                                usr-out
+                                            </a>
+					{% endif %}
+                                        {% if allow_download_out and test.generate_status == 'OK' %}
+                                            <a title='{% trans "Download user output for this test" %}'
+                                               href="{% url 'download_user_output' testreport_id=test.id %}">
+                                                usr-out
+                                            </a>
+					{% endif %}
+                                    </small>
+                                {% endif %}
+                            </span>
+                        </td>
+                        <td class="submission--{{ record.encoder_display_type }}">
+			    {{ test.get_encoder_status_display }}
+                        </td>
+                        <td>
+                            {% if is_admin or test.status != 'TLE' %}
+                                {{ test.encoder_time_used|runtimeformat }}
+                            {% else %}-.--s
+                            {% endif %}/ {{ test.test_encoder_time_limit|runtimeformat }}
+                        </td>
+                        {% if show_scores and forloop.first %}
+			    {% with test_count=group.tests|length %}
+			    <td class="group-score text-right" rowspan="{% widthratio 2 1 test_count %}">
+                                {% if group.report.score %}
+                                    {{ group.report.score }} / {{ group.report.max_score }}
+                                {% endif %}
+                            </td>
+			    {% endwith %}
+                        {% endif %}
+                    </tr>
+                    <tr class="test-row-{{ group.report.id }}">
+                        <td class="submission--{{ record.decoder_display_type }}">	
+			    {{ test.get_decoder_status_display }}
+                        </td>
+                        <td>
+                            {% if is_admin or test.status != 'TLE' %}
+                                {{ test.decoder_time_used|runtimeformat }}
+                            {% else %}-.--s
+                            {% endif %}/ {{ test.test_decoder_time_limit|runtimeformat }}
+                        </td>
+		    </tr>
+                {% endwith %}
+                {% endfor %}
+            {% endfor %}
+        </tbody>
+    </table>
+</div>
diff --git a/oioioi/encdec/templates/encdec/report-comments.html b/oioioi/encdec/templates/encdec/report-comments.html
new file mode 100644
index 000000000..98652330e
--- /dev/null
+++ b/oioioi/encdec/templates/encdec/report-comments.html
@@ -0,0 +1,51 @@
+{% load i18n %}
+{% load runtimeformat %}
+
+<ul class="list-unstyled">
+    {% for group in groups %}
+        {% for record in group.tests %}
+        {% with test=record.test %}
+            {% if test.comment and allow_test_comments or test.get_status_display != 'OK' and allow_download_out and test.test %}
+            <li class="small text-muted">
+                <span>{{ test.test_name }}</span>
+                {% if allow_download_out and test.test %}
+                    {% if test.generate_status == 'OK' %}
+                        <a href="{% url 'download_user_output' testreport_id=test.id %}"
+                           title='{% trans "Download user output for this test" %}'>
+                            {% trans "[download out]" %}
+                        </a>
+                    {% elif test.generate_status == '?' %}
+                        <span>[{% trans "processing" %}]</span>
+                    {% else %}
+                        <a href="#" data-post-url="{% url 'generate_user_output' testreport_id=test.id %}"
+                           title='{% trans "Generate user output for this test" %}'>
+                            {% trans "[generate out]" %}
+                        </a>
+                    {% endif %}
+                {% endif %}
+                {% if allow_test_comments %}
+                    <span>{{ test.comment }}</span>
+                {% endif %}
+            </li>
+            {% endif %}
+        {% endwith %}
+        {% endfor %}
+    {% endfor %}
+</ul>
+{% if allow_download_out %}
+    {% if all_outs_generated %}
+        <a role="button" class="btn btn-outline-secondary btn-sm"
+           title='{% trans "Download all user outputs for this test report" %}'
+           href="{% url 'download_user_output' submission_report_id=report.id %}">
+            <i class="fa-solid fa-download"></i>
+            <span>{% trans "Download all" %}</span>
+        </a>
+    {% else %}
+        <a role="button" class="btn btn-outline-secondary btn-sm"
+           title='{% trans "Generate all user outputs for this test report" %}'
+           data-post-url="{% url 'generate_user_output' submission_report_id=report.id %}">
+            <i class="fa-solid fa-circle-arrow-down"></i>
+            <span>{% trans "Generate all" %}</span>
+        </a>
+    {% endif %}
+{% endif %}
diff --git a/oioioi/encdec/templates/encdec/report.html b/oioioi/encdec/templates/encdec/report.html
new file mode 100644
index 000000000..f96372c1e
--- /dev/null
+++ b/oioioi/encdec/templates/encdec/report.html
@@ -0,0 +1,26 @@
+{% load i18n %}
+{% load runtimeformat %}
+
+<article>
+    {% if report.kind == 'INITIAL' %}
+        <h4>{% trans "Initial testing report" %}</h4>
+    {% elif report.kind == 'NORMAL' %}
+        <h4>{% trans "Final testing report" %}</h4>
+    {% elif report.kind == 'HIDDEN' %}
+        <h4>{% trans "Hidden testing report" %}</h4>
+    {% elif report.kind == 'FULL' %}
+        <h4>{% trans "Full testing report" %}</h4>
+    {% else %}
+        <h4>{% trans "Report:" %} {{ report.kind }}</h4>
+    {% endif %}
+
+    {% if compilation_report.status != 'OK' %}
+        <p>{% trans "Compilation failed." %}</p>
+        <pre>
+            {{ compilation_report.compiler_output }}
+        </pre>
+    {% else %}
+        {% include "encdec/report-body.html" %}
+        {% include "encdec/report-comments.html" with groups=groups %}
+    {% endif %}
+</article>
diff --git a/oioioi/encdec/templates/encdec/submission_header.html b/oioioi/encdec/templates/encdec/submission_header.html
new file mode 100644
index 000000000..2ed8dd8ed
--- /dev/null
+++ b/oioioi/encdec/templates/encdec/submission_header.html
@@ -0,0 +1,50 @@
+{% extends "contests/submission_header.html" %}
+{% load i18n %}
+
+{% block controls_admin_buttons %}
+{{ block.super }}
+{% if saved_diff_id %}
+    <a role="button" class="btn btn-sm btn-outline-secondary" id="diff-button-do"
+        href="{% url 'source_diff' submission1_id=submission.submission.id submission2_id=saved_diff_id %}">
+        <i class="fa-solid fa-down-left-and-up-right-to-center"></i>
+        {% trans "Diff with saved" %}
+    </a>
+{% else %}
+    <button class="btn btn-sm btn-outline-secondary" id="diff-button-save">
+        <i class="fa-solid fa-down-left-and-up-right-to-center"></i>
+        <span id="text">{% trans "Diff with other" %}</span>
+    </button>
+{% endif %}
+{% endblock %}
+
+{% block controls_buttons %}
+    {{ block.super }}
+    <a role="button" class="btn btn-sm btn-outline-secondary"
+        href="{% url 'show_submission_source' submission_id=submission.submission.id %}">
+        <i class="fa-solid fa-eye"></i>
+        {% trans "Show code" %}
+    </a>
+    <a role="button" class="btn btn-sm btn-outline-secondary"
+        href="{% url 'download_submission_source' submission_id=submission.submission.id %}">
+        <i class="fa-solid fa-download"></i>
+        {% trans "Download" %}
+    </a>
+    {% if not saved_diff_id %}
+        <script>
+            $(document).ready(
+                function() {
+                    $('#diff-button-save').click(
+                        function() {
+                            $.get('{% url 'save_diff_id' submission_id=submission.submission.id %}',
+                                function(data) {
+                                    $('#diff-button-save').prop('disabled', true);
+                                    $('#diff-button-save #text').text('{% trans "Saved for diffing" %}');
+                                }
+                            );
+                        }
+                    );
+                }
+            );
+        </script>
+    {% endif %}
+{% endblock %}
diff --git a/oioioi/problems/utils.py b/oioioi/problems/utils.py
index 6459c14af..55fd036c6 100644
--- a/oioioi/problems/utils.py
+++ b/oioioi/problems/utils.py
@@ -17,6 +17,7 @@
     is_contest_admin,
     is_contest_basicadmin,
 )
+from oioioi.encdec.models import LanguageOverrideForEncdecTest
 from oioioi.problems.models import (
     AlgorithmTagProposal,
     DifficultyTagProposal,
@@ -171,8 +172,11 @@ def update_tests_from_main_pi(problem_instance, source_instance=None):
     if problem_instance == source_instance:
         return
 
+    # Whosoever wrote this code should be flunked from the databases by the dean himself
     for test in problem_instance.test_set.all():
         test.delete()
+    for test in problem_instance.encdectest_set.all():
+        test.delete()
     for test in source_instance.test_set.all():
         test_pk = test.pk
         test.id = None
@@ -187,6 +191,22 @@ def update_tests_from_main_pi(problem_instance, source_instance=None):
                 memory_limit=override.memory_limit,
                 language=override.language,
             )
+    for test in source_instance.encdectest_set.all():
+        test_pk = test.pk
+        test.id = None
+        test.pk = None
+        test.problem_instance = problem_instance
+        test.save()
+        assiociated_overrides = LanguageOverrideForEncdecTest.objects.filter(test=test_pk)
+        for override in assiociated_overrides:
+            LanguageOverrideForEncdecTest.objects.create(
+                test=test,
+                encoder_time_limit=override.encoder_time_limit,
+                decoder_time_limit=override.decoder_time_limit,
+                encoder_memory_limit=override.encoder_memory_limit,
+                decoder_memory_limit=override.decoder_memory_limit,
+                language=override.language,
+            )
 
 
 def get_new_problem_instance(problem, contest=None):
diff --git a/oioioi/programs/controllers.py b/oioioi/programs/controllers.py
index e74d2a6c8..a6ced014a 100644
--- a/oioioi/programs/controllers.py
+++ b/oioioi/programs/controllers.py
@@ -800,21 +800,26 @@ def filter_visible_reports(self, request, submission, queryset):
 
     def _out_generate_status(self, request, testreport):
         problem = testreport.test.problem_instance.problem
+
         try:
+            userout_status = testreport.userout_status
+        except:
+            userout_status = None
+
+        if userout_status:
             if (
                 can_admin_problem(request, problem)
                 or testreport.userout_status.visible_for_user
             ):
                 # making sure, that output really exists or is processing
                 if (
-                    bool(testreport.output_file)
+                    testreport.has_all_outputs
                     or testreport.userout_status.status == '?'
                 ):
                     return testreport.userout_status.status
 
-        except UserOutGenStatus.DoesNotExist:
-            if testreport.output_file:
-                return 'OK'
+        if testreport.has_all_outputs:
+            return 'OK'
 
         return None
 
@@ -1067,20 +1072,24 @@ def render_submission(self, request, submission):
 
     def _out_generate_status(self, request, testreport):
         try:
+            userout_status = testreport.userout_status
+        except:
+            userout_status = None
+
+        if userout_status:
             if (
                 is_contest_basicadmin(request)
                 or testreport.userout_status.visible_for_user
             ):
                 # making sure, that output really exists or is processing
                 if (
-                    bool(testreport.output_file)
+                    testreport.has_all_outputs
                     or testreport.userout_status.status == '?'
                 ):
                     return testreport.userout_status.status
 
-        except UserOutGenStatus.DoesNotExist:
-            if testreport.output_file:
-                return 'OK'
+        if testreport.has_all_outputs:
+            return 'OK'
 
         return None
 
diff --git a/oioioi/programs/models.py b/oioioi/programs/models.py
index b7bddb62f..0397292e9 100644
--- a/oioioi/programs/models.py
+++ b/oioioi/programs/models.py
@@ -106,6 +106,10 @@ class Test(models.Model):
     def problem(self):
         return self.problem_instance.problem
 
+    @property
+    def total_time_limit(self):
+        return self.time_limit
+
     def __str__(self):
         return str(self.name)
 
@@ -332,6 +336,10 @@ class TestReport(models.Model):
     test_group = models.CharField(max_length=30)
     test_time_limit = models.IntegerField(null=True, blank=True)
 
+    @property
+    def has_all_outputs(self):
+        return bool(self.output_file)
+
 
 class GroupReport(models.Model):
     submission_report = models.ForeignKey(SubmissionReport, on_delete=models.CASCADE)
diff --git a/oioioi/programs/static/common/submission-colors.scss b/oioioi/programs/static/common/submission-colors.scss
index 760341a89..96c61180a 100644
--- a/oioioi/programs/static/common/submission-colors.scss
+++ b/oioioi/programs/static/common/submission-colors.scss
@@ -10,6 +10,8 @@ $submission-yellow: #ffd894 !default;
 $submission-yellow-gradient-to: #fff !default;
 $submission-grey: #e0eee0 !default;
 $submission-grey-gradient-to: #fff !default;
+$submission-pink: #f3a5f3 !default;
+$submission-pink-gradient-to: #fff !default;
 $submission-OK0: #eaffbb !default;
 $submission-OK0-gradient-to: #fff !default;
 $submission-OK25: #ddffbb !default;
@@ -87,4 +89,8 @@ $submission-margin-min-width: 20px !default;
   &--OK100 {
     @include submission($submission-OK100, $submission-OK100-gradient-to);
   }
+
+  &--SKIP {
+    @include submission($submission-pink, $submission-pink-gradient-to);
+  }
 }
diff --git a/oioioi/programs/utils.py b/oioioi/programs/utils.py
index defe5beca..dc78f16a2 100644
--- a/oioioi/programs/utils.py
+++ b/oioioi/programs/utils.py
@@ -105,7 +105,7 @@ def discrete_test_scorer(test, result):
 
 def threshold_linear_test_scorer(test, result):
     """Full score if took less than half of limit and then decreasing to 1"""
-    limit = test.get('exec_time_limit', 0)
+    limit = test.get('exec_time_limit', result.get('exec_time_limit', 0))
     used = result.get('time_used', 0)
     status = result['result_code']
     percentage = result.get('result_percentage', 100)
diff --git a/oioioi/sinolpack/controllers.py b/oioioi/sinolpack/controllers.py
index c054a3ccb..a7990ddf2 100644
--- a/oioioi/sinolpack/controllers.py
+++ b/oioioi/sinolpack/controllers.py
@@ -1,5 +1,6 @@
 from django.utils.translation import gettext_lazy as _
 
+from oioioi.encdec.controllers import EncdecProblemController
 from oioioi.programs.controllers import ProgrammingProblemController
 from oioioi.sinolpack.admin import SinolpackProblemAdminMixin
 from oioioi.sinolpack.utils import add_extra_files
@@ -18,3 +19,18 @@ def mixins_for_admin(self):
         return super(SinolProblemController, self).mixins_for_admin() + (
             SinolpackProblemAdminMixin,
         )
+
+
+class SinolEncdecProblemController(EncdecProblemController):
+    description = _("Sinol package encoder-decoder problem")
+
+    def fill_evaluation_environ(self, environ, submission, **kwargs):
+        super(SinolEncdecProblemController, self).fill_evaluation_environ(
+            environ, submission, **kwargs
+        )
+        add_extra_files(environ, self.problem)
+
+    def mixins_for_admin(self):
+        return super(SinolEncdecProblemController, self).mixins_for_admin() + (
+            SinolpackProblemAdminMixin,
+        )
diff --git a/oioioi/sinolpack/package.py b/oioioi/sinolpack/package.py
index 69c1bd126..5fc90ce4a 100644
--- a/oioioi/sinolpack/package.py
+++ b/oioioi/sinolpack/package.py
@@ -47,6 +47,18 @@
     OutputChecker,
     Test,
 )
+from oioioi.encdec.models import (
+    EncdecChecker,
+    EncdecChannel,
+    EncdecTest,
+    LanguageOverrideForEncdecTest
+)
+from oioioi.encdec.models import (
+    EncdecChecker,
+    EncdecChannel,
+    EncdecTest,
+    LanguageOverrideForEncdecTest
+)
 from oioioi.sinolpack.models import ExtraConfig, ExtraFile, OriginalPackage
 from oioioi.sinolpack.utils import add_extra_files
 from oioioi.sioworkers.jobs import run_sioworkers_job, run_sioworkers_jobs
@@ -111,7 +123,6 @@ def _remove_from_zip(zipfname, *filenames):
 
 
 class SinolPackage(object):
-    controller_name = 'oioioi.sinolpack.controllers.SinolProblemController'
     package_backend_name = 'oioioi.sinolpack.package.SinolPackageBackend'
 
     def __init__(self, path, original_filename=None):
@@ -142,6 +153,7 @@ def __init__(self, path, original_filename=None):
         self.restrict_html = (
             settings.SINOLPACK_RESTRICT_HTML and not settings.USE_SINOLPACK_MAKEFILES
         )
+        self.unusual_task_type = None
 
     def identify(self):
         return self._find_main_dir() is not None
@@ -308,6 +320,7 @@ def unpack(self, env, package):
         self.env = env
         self.package = package
 
+        self._detect_unusual_task_type()
         self._create_problem_or_reuse_if_exists(self.package.problem)
         return self._extract_and_process_package()
 
@@ -346,7 +359,7 @@ def _create_problem_instance(self):
         return Problem.create(
             legacy_name=self.short_name,
             short_name=self.short_name,
-            controller_name=self.controller_name,
+            controller_name=self._get_controller_name(),
             contest=self.package.contest,
             visibility=(
                 Problem.VISIBILITY_PUBLIC
@@ -356,6 +369,14 @@ def _create_problem_instance(self):
             author=author,
         )
 
+    def _get_controller_name(self):
+        if self.unusual_task_type:
+            return {
+                'encdec': 'oioioi.sinolpack.controllers.SinolEncdecProblemController'
+            }[self.unusual_task_type]
+        else:
+            return 'oioioi.sinolpack.controllers.SinolProblemController'
+
     def _extract_and_process_package(self):
         tmpdir = tempfile.mkdtemp()
         logger.info("%s: tmpdir is %s", self.filename, tmpdir)
@@ -385,7 +406,6 @@ def wrapper(*args, **kwargs):
             except Exception:
                 # Reraising as a custom exception allows us to attach extra
                 # information about the raising operation to the exception
-
                 error = PackageProcessingError(
                     func.__name__, func.__doc__.split("\n\n")[0]
                 )
@@ -406,7 +426,12 @@ def _process_package(self):
             self._save_prog_dir()
         self._process_statements()
         self._generate_tests()
-        self._process_checkers()
+
+        if self.unusual_task_type == 'encdec':
+            self._process_encdec_checkers()
+        else:
+            self._process_checkers()
+
         self._process_model_solutions()
         self._process_attachments()
         self._save_original_package()
@@ -427,6 +452,16 @@ def _process_config_yml(self):
         instance.save()
         self.config = instance.parsed_config
 
+    @_describe_processing_error
+    def _detect_unusual_task_type(self):
+        """Checks if the package is of an unusual type. Currently only ``encdec``
+        type is supported.
+        """
+
+        # We would use config.yml but some absolute top idiot decided that loading config.yml requires an instance of Problem. That person is a serious SQL fetishist and should be locked in a mental facility.
+        if any(map(lambda name: 'encdec_task' in name, self.archive.filenames())):
+            self.unusual_task_type = 'encdec'
+
     @_describe_processing_error
     def _detect_full_name(self):
         """Sets the problem's full name from the ``config.yml`` (key ``title``)
@@ -679,15 +714,23 @@ def _generate_tests(self, total_score_if_auto=100):
         self.memory_limits = _stringify_keys(self.config.get('memory_limits', {}))
         self.statement_memory_limit = self._detect_statement_memory_limit()
 
-        created_tests, outs_to_make, scored_groups = self._create_instances_for_tests()
+        if self.unusual_task_type == 'encdec':
+            created_tests, outs_to_make, scored_groups = self._create_instances_for_encdec_tests()
+        else:
+            created_tests, outs_to_make, scored_groups = self._create_instances_for_tests()
+
         sum_of_time_limits = 0
         for test in created_tests:
-            sum_of_time_limits += test.time_limit
+            sum_of_time_limits += test.total_time_limit
         self._verify_time_limits(sum_of_time_limits)
-
         self._verify_inputs(created_tests)
-        self._generate_test_outputs(created_tests, outs_to_make)
-        self._validate_tests(created_tests)
+
+        if self.unusual_task_type == 'encdec':
+            self._validate_encdec_tests(created_tests)
+        else:
+            self._generate_test_outputs(created_tests, outs_to_make)
+            self._validate_tests(created_tests)
+
         self._delete_non_existing_tests(created_tests)
 
         self._assign_scores(scored_groups, total_score_if_auto)
@@ -752,6 +795,46 @@ def _create_instances_for_tests(self):
 
         return created_tests, outs_to_make, scored_groups
 
+    def _create_instances_for_encdec_tests(self):
+        """Iterate through available tests inputs.
+        :return: Triple (created tests instances,
+                         outs that have to be generated,
+                         score groups (determined by test names))
+        """
+        indir = os.path.join(self.rootdir, 'in')
+        outdir = os.path.join(self.rootdir, 'out')
+
+        re_string = r'^(%s(([0-9]+)([a-z]?[a-z0-9]*))).(?:in|hint)$' % (
+            re.escape(self.short_name)
+        )
+        names_re = re.compile(re_string)
+
+        collected_ins = self._make_ins(re_string)
+        all_items = list(set(os.listdir(indir)) | set(collected_ins.keys()))
+
+        created_tests = []
+        outs_to_make = []
+        scored_groups = set()
+
+        if self.use_make and not self.config.get('no_outgen', False):
+            self._find_and_compile('', command='outgen')
+
+        for order, test in enumerate(sorted(all_items, key=naturalsort_key)):
+            instance = self._process_encdec_test(
+                test,
+                order,
+                names_re,
+                indir,
+                outdir,
+                collected_ins,
+                scored_groups,
+                outs_to_make,
+            )
+            if instance:
+                created_tests.append(instance)
+
+        return created_tests, outs_to_make, scored_groups
+
     @_describe_processing_error
     def _verify_time_limits(self, time_limit_sum):
         """Checks whether the sum of test time limits does not exceed
@@ -830,6 +913,19 @@ def _validate_tests(self, created_tests):
             except ValidationError as e:
                 raise ProblemPackageError(e.messages[0])
 
+    @_describe_processing_error
+    def _validate_encdec_tests(self, created_tests):
+        """Checks if all tests have both output files and that
+        all tests have been successfully created.
+
+        :raises: :class:`~oioioi.problem.package.ProblemPackageError`
+        """
+        for instance in created_tests:
+            try:
+                instance.full_clean()
+            except ValidationError as e:
+                raise ProblemPackageError(e)
+
     def _delete_non_existing_tests(self, created_tests):
         for test in Test.objects.filter(
             problem_instance=self.main_problem_instance
@@ -923,6 +1019,89 @@ def _process_test(
         instance.save()
         return instance
 
+    @_describe_processing_error
+    def _process_encdec_test(
+        self,
+        test,
+        order,
+        names_re,
+        indir,
+        outdir,
+        collected_ins,
+        scored_groups,
+        outs_to_make,
+    ):
+        """Responsible for saving test in and out files,
+        setting test limits, assigning test kinds and groups.
+
+        :param test: Test name.
+        :param order: Test number.
+        :param names_re: Compiled regex to match test details from name.
+               Should extract basename, test name,
+               group number and test type.
+        :param indir: Directory with tests inputs.
+        :param outdir: Directory with tests outputs.
+        :param collected_ins: List of inputs that were generated,
+               not taken from archive as a file.
+        :param scored_groups: Accumulator for score groups.
+        :param outs_to_make: Accumulator for name of output files to
+               be generated by model solution.
+        :return: Test instance or None if name couldn't be matched.
+        """
+        match = names_re.match(test)
+        if not match:
+            if test.endswith('.in'):
+                raise ProblemPackageError(_("Unrecognized test: %s") % (test))
+            return None
+
+        # Examples for odl0ocen.in1:
+        basename = match.group(1)  # odl0ocen
+        name = match.group(2)  # 0ocen
+        group = match.group(3)  # 0
+        suffix = match.group(4)  # ocen
+
+        instance, created = EncdecTest.objects.get_or_create(
+            problem_instance=self.main_problem_instance, name = name
+        )
+
+        inname_base = basename + '.in'
+        inname = os.path.join(indir, inname_base)
+
+        hintname_base = basename + '.hint'
+        hintname = os.path.join(indir, hintname_base)
+
+        if not os.path.isfile(hintname):
+            raise ProblemPackageError(_("No hint file for test: %s") % (test,))
+
+        if test in collected_ins:
+            self._save_to_field(instance.input_file, collected_ins[test])
+        else:
+            instance.input_file.save(inname_base, File(open(inname, 'rb')))
+            instance.hint_file.save(hintname_base, File(open(hintname, 'rb')))
+
+        if group == '0' or 'ocen' in suffix:
+            # Example tests
+            instance.kind = 'EXAMPLE'
+            instance.group = name
+        else:
+            instance.kind = 'NORMAL'
+            instance.group = group
+            scored_groups.add(group)
+
+        time_limit = self._get_time_limit(created, name, group)
+        if time_limit:
+            # TODO allow specifying separately
+            instance.encoder_time_limit = instance.decoder_time_limit = time_limit
+
+        memory_limit = self._get_memory_limit(created, name, group)
+        if memory_limit:
+            # TODO allow specifying separately
+            instance.encoder_memory_limit = instance.decoder_memory_limit = memory_limit
+
+        instance.order = order
+        instance.save()
+        return instance
+
     @_describe_processing_error
     def _get_memory_limit(self, created, name, group):
         """If we find the memory limit specified anywhere in the package:
@@ -1002,6 +1181,7 @@ def _make_outs(self, outs_to_make):
         get_client().delete_file(env['compiled_file'])
         return jobs
 
+
     @_describe_processing_error
     def _check_scores_from_config(self, scored_groups, config_scores):
         """Called if ``config.yml`` specifies scores for any tests.
@@ -1081,15 +1261,31 @@ def _assign_scores(self, scored_groups, total_score_if_auto):
             scores = self._compute_scores_automatically(
                 scored_groups, total_score_if_auto
             )
+        encdec_tests = EncdecTest.objects.filter(problem_instance=self.main_problem_instance)
+        for test in encdec_tests:
+            LanguageOverrideForEncdecTest.objects.create(
+                encoder_time_limit=test.encoder_time_limit,
+                decoder_time_limit=test.decoder_time_limit,
+                encoder_memory_limit=test.encoder_memory_limit,
+                decoder_decoder_memory_limit=test.memory_limit,
+                test=test,
+                language=lang,
+            )
 
         Test.objects.filter(problem_instance=self.main_problem_instance).update(
             max_score=0
         )
+        EncdecTest.objects.filter(problem_instance=self.main_problem_instance).update(
+            max_score=0
+        )
 
         for group, score in scores.items():
             Test.objects.filter(
                 problem_instance=self.main_problem_instance, group=group
             ).update(max_score=score)
+            EncdecTest.objects.filter(
+                problem_instance=self.main_problem_instance, group=group
+            ).update(max_score=score)
 
     @_describe_processing_error
     def _process_language_override(self):
@@ -1119,6 +1315,16 @@ def _prepare_overrides(self, lang):
                 test=test,
                 language=lang,
             )
+        encdec_tests = EncdecTest.objects.filter(problem_instance=self.main_problem_instance)
+        for test in encdec_tests:
+            LanguageOverrideForEncdecTest.objects.create(
+                encoder_time_limit=test.encoder_time_limit,
+                decoder_time_limit=test.decoder_time_limit,
+                encoder_memory_limit=test.encoder_memory_limit,
+                decoder_decoder_memory_limit=test.memory_limit,
+                test=test,
+                language=lang,
+            )
 
     @_describe_processing_error
     def _set_memory_limit_overrides(self, lang, rules):
@@ -1178,6 +1384,43 @@ def _process_checkers(self):
             instance.exe_file = self._find_checker_exec()
             instance.save()
 
+    @_describe_processing_error
+    def _process_encdec_checkers(self):
+        """Compiles an encdec output checkers and saves their binaries. Used convention:
+        checker for encoder output is called ``channel`` and checker for decoder
+        output is called as usual (``checker``). """
+        channel_name = '%schn.e' % (self.short_name)
+        out_name = _make_filename_in_job_dir(self.env, channel_name)
+        instance = EncdecChannel.objects.get_or_create(problem=self.problem)[0]
+        env = self._find_and_compile(
+            'chn',
+            command=channel_name,
+            cwd=os.path.join(self.rootdir, 'prog'),
+            log_on_failure=False,
+            out_name=out_name,
+        )
+        if not self.use_make and env:
+            self._save_to_field(instance.exe_file, env['compiled_file'])
+        else:
+            instance.exe_file = self._find_channel_exec()
+            instance.save()
+
+        checker_name = '%schk.e' % (self.short_name)
+        out_name = _make_filename_in_job_dir(self.env, checker_name)
+        instance = EncdecChecker.objects.get_or_create(problem=self.problem)[0]
+        env = self._find_and_compile(
+            'chk',
+            command=checker_name,
+            cwd=os.path.join(self.rootdir, 'prog'),
+            log_on_failure=False,
+            out_name=out_name,
+        )
+        if not self.use_make and env:
+            self._save_to_field(instance.exe_file, env['compiled_file'])
+        else:
+            instance.exe_file = self._find_checker_exec()
+            instance.save()
+
     def _find_checker_exec(self):
         checker_prefix = os.path.join(self.rootdir, 'prog', self.short_name + 'chk')
         exe_candidates = [checker_prefix + '.e', checker_prefix + '.sh']
@@ -1187,6 +1430,13 @@ def _find_checker_exec(self):
 
         return None
 
+    def _find_channel_exec(self):
+        channel_prefix = os.path.join(self.rootdir, 'prog', self.short_name + 'chn')
+        exe_candidates = [channel_prefix + '.e', channel_prefix + '.sh']
+        for exe in exe_candidates:
+            if os.path.isfile(exe):
+                return File(open(exe, 'rb'))
+
     def _process_model_solutions(self):
         """Saves model solutions to the database."""
         ModelSolution.objects.filter(problem=self.problem).delete()
@@ -1361,6 +1611,16 @@ def _pack_tests(self):
                 test.output_file,
                 os.path.join(self.short_name, 'out', basename + '.out'),
             )
+        for test in EncdecTest.objects.filter(
+            problem_instance=self.problem.main_problem_instance
+        ):
+            basename = '%s%s' % (self.short_name, test.name)
+            self._pack_django_file(
+                test.input_file, os.path.join(self.short_name, 'in', basename + '.in')
+            )
+            self._pack_django_file(
+                test.input_file, os.path.join(self.short_name, 'in', basename + '.hint')
+            )
 
     def _pack_model_solutions(self):
         for solution in ModelSolution.objects.filter(problem=self.problem):