diff --git a/internal/kibana/savedobjects.go b/internal/kibana/savedobjects.go index 82bc734e65..af54ee2c0f 100644 --- a/internal/kibana/savedobjects.go +++ b/internal/kibana/savedobjects.go @@ -150,9 +150,10 @@ func (c *Client) SetManagedSavedObject(ctx context.Context, savedObjectType stri } type ExportSavedObjectsRequest struct { + Type string `json:"type,omitempty"` ExcludeExportDetails bool `json:"excludeExportDetails"` IncludeReferencesDeep bool `json:"includeReferencesDeep"` - Objects []ExportSavedObjectsRequestObject `json:"objects"` + Objects []ExportSavedObjectsRequestObject `json:"objects,omitempty"` } type ExportSavedObjectsRequestObject struct { diff --git a/internal/packages/assets.go b/internal/packages/assets.go index 01f027e0d5..f241548bbf 100644 --- a/internal/packages/assets.go +++ b/internal/packages/assets.go @@ -11,6 +11,8 @@ import ( "os" "path/filepath" + "gopkg.in/yaml.v3" + "github.com/elastic/elastic-package/internal/multierror" ) @@ -39,25 +41,37 @@ func newAssetTypeWithFolder(typeName AssetType, folderName string) assetTypeFold var ( AssetTypeElasticsearchIndexTemplate = newAssetType("index_template") AssetTypeElasticsearchIngestPipeline = newAssetType("ingest_pipeline") - - AssetTypeKibanaSavedSearch = newAssetType("search") - AssetTypeKibanaVisualization = newAssetType("visualization") - AssetTypeKibanaDashboard = newAssetType("dashboard") - AssetTypeKibanaMap = newAssetType("map") - AssetTypeKibanaLens = newAssetType("lens") - AssetTypeSecurityRule = newAssetTypeWithFolder("security-rule", "security_rule") + AssetTypeKibanaDashboard = newAssetType("dashboard") + AssetTypeKibanaLens = newAssetType("lens") + AssetTypeKibanaMap = newAssetType("map") + AssetTypeKibanaSavedSearch = newAssetType("search") + AssetTypeKibanaTag = newAssetType("tag") + AssetTypeKibanaVisualization = newAssetType("visualization") + AssetTypeSecurityRule = newAssetTypeWithFolder("security-rule", "security_rule") ) // Asset represents a package asset to be loaded into Kibana or Elasticsearch. type Asset struct { ID string `json:"id"` Type AssetType `json:"type"` + Name string DataStream string SourcePath string } +// IDOrName returns the ID if set, or the Name if not. +func (asset Asset) IDOrName() string { + if asset.ID != "" { + return asset.ID + } + return asset.Name +} + // String method returns a string representation of the asset func (asset Asset) String() string { + if asset.ID == "" && asset.Name != "" { + return fmt.Sprintf("%q (type: %s)", asset.Name, asset.Type) + } return fmt.Sprintf("%s (type: %s)", asset.ID, asset.Type) } @@ -68,6 +82,12 @@ func LoadPackageAssets(pkgRootPath string) ([]Asset, error) { return nil, fmt.Errorf("could not load kibana assets: %w", err) } + tags, err := loadKibanaTags(pkgRootPath) + if err != nil { + return nil, fmt.Errorf("could not load kibana tags: %w", err) + } + assets = append(assets, tags...) + a, err := loadElasticsearchAssets(pkgRootPath) if err != nil { return a, fmt.Errorf("could not load elasticsearch assets: %w", err) @@ -85,10 +105,11 @@ func loadKibanaAssets(pkgRootPath string) ([]Asset, error) { assetTypes = []assetTypeFolder{ AssetTypeKibanaDashboard, - AssetTypeKibanaVisualization, - AssetTypeKibanaSavedSearch, - AssetTypeKibanaMap, AssetTypeKibanaLens, + AssetTypeKibanaMap, + AssetTypeKibanaSavedSearch, + AssetTypeKibanaTag, + AssetTypeKibanaVisualization, AssetTypeSecurityRule, } @@ -112,6 +133,34 @@ func loadKibanaAssets(pkgRootPath string) ([]Asset, error) { return assets, nil } +func loadKibanaTags(pkgRootPath string) ([]Asset, error) { + tagsFilePath := filepath.Join(pkgRootPath, "kibana", "tags.yml") + tagsFile, err := os.ReadFile(tagsFilePath) + if errors.Is(err, os.ErrNotExist) { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("reading tags file failed: %w", err) + } + + type tag struct { + Text string `yaml:"text"` + } + var tags []tag + err = yaml.Unmarshal(tagsFile, &tags) + if err != nil { + return nil, fmt.Errorf("parsing tags file failed: %w", err) + } + + assets := make([]Asset, len(tags)) + for i, tag := range tags { + assets[i].Name = tag.Text + assets[i].Type = AssetTypeKibanaTag.typeName + assets[i].SourcePath = tagsFilePath + } + return assets, nil +} + func loadElasticsearchAssets(pkgRootPath string) ([]Asset, error) { packageManifestPath := filepath.Join(pkgRootPath, PackageManifestFile) pkgManifest, err := ReadPackageManifest(packageManifestPath) diff --git a/internal/testrunner/coverage.go b/internal/testrunner/coverage.go index c4badce219..14023047e3 100644 --- a/internal/testrunner/coverage.go +++ b/internal/testrunner/coverage.go @@ -44,6 +44,11 @@ func GenerateBasePackageCoverageReport(pkgName, rootPath, format string) (Covera return nil } + // Exclude validation configuration from coverage reports. + if d.Name() == "validation.yml" && filepath.Dir(match) == filepath.Clean(rootPath) { + return nil + } + fileCoverage, err := generateBaseFileCoverageReport(repoPath, pkgName, match, format, false) if err != nil { return fmt.Errorf("failed to generate base coverage for \"%s\": %w", match, err) diff --git a/internal/testrunner/runners/asset/tester.go b/internal/testrunner/runners/asset/tester.go index d6996e3609..ac7a3024d7 100644 --- a/internal/testrunner/runners/asset/tester.go +++ b/internal/testrunner/runners/asset/tester.go @@ -8,15 +8,21 @@ import ( "context" "errors" "fmt" + "slices" "strings" + "time" + "github.com/elastic/elastic-package/internal/common" "github.com/elastic/elastic-package/internal/kibana" "github.com/elastic/elastic-package/internal/logger" "github.com/elastic/elastic-package/internal/packages" "github.com/elastic/elastic-package/internal/resources" "github.com/elastic/elastic-package/internal/testrunner" + "github.com/elastic/elastic-package/internal/wait" ) +const assetsPresentTimeout = time.Minute + type tester struct { testFolder testrunner.TestFolder packageRootPath string @@ -124,58 +130,73 @@ func (r *tester) run(ctx context.Context) ([]testrunner.TestResult, error) { if err != nil { return result.WithError(fmt.Errorf("cannot read the package manifest from %s: %w", r.packageRootPath, err)) } - installedPackage, err := r.kibanaClient.GetPackage(ctx, manifest.Name) - if err != nil { - return result.WithError(fmt.Errorf("cannot get installed package %q: %w", manifest.Name, err)) - } - installedAssets := installedPackage.Assets() - - // No Elasticsearch asset is created when an Input package is installed through the API. - // This would require to create a Agent policy and add that input package to the Agent policy. - // As those input packages could have some required fields, it would also require to add - // configuration files as in system tests to fill those fields. - // In these tests, mainly it is required to test Kibana assets, therefore it is not added - // support for Elasticsearch assets in input packages. - // Related issue: https://github.com/elastic/elastic-package/issues/1623 - expectedAssets, err := packages.LoadPackageAssets(r.packageRootPath) - if err != nil { - return result.WithError(fmt.Errorf("could not load expected package assets: %w", err)) - } - results := make([]testrunner.TestResult, 0, len(expectedAssets)) - for _, e := range expectedAssets { - rc := testrunner.NewResultComposer(testrunner.TestResult{ - Name: fmt.Sprintf("%s %s is loaded", e.Type, e.ID), - Package: r.testFolder.Package, - DataStream: e.DataStream, - TestType: TestType, - }) - - var tr []testrunner.TestResult - if !findActualAsset(installedAssets, e) { - tr, _ = rc.WithError(testrunner.ErrTestCaseFailed{ - Reason: "could not find expected asset", - Details: fmt.Sprintf("could not find %s asset \"%s\". Assets loaded:\n%s", e.Type, e.ID, formatAssetsAsString(installedAssets)), - }) - } else { - tr, _ = rc.WithSuccess() + var results []testrunner.TestResult + _, err = wait.UntilTrue(ctx, func(ctx context.Context) (bool, error) { + installedPackage, err := r.kibanaClient.GetPackage(ctx, manifest.Name) + if err != nil { + results, err = result.WithError(fmt.Errorf("cannot get installed package %q: %w", manifest.Name, err)) + return false, err + } + installedAssets := installedPackage.Assets() + + installedTags, err := r.kibanaClient.ExportSavedObjects(ctx, kibana.ExportSavedObjectsRequest{Type: "tag"}) + if err != nil { + results, err = result.WithError(fmt.Errorf("cannot get installed tags: %w", err)) + return false, err } - result := tr[0] - if r.withCoverage && e.SourcePath != "" { - result.Coverage, err = testrunner.GenerateBaseFileCoverageReport(rc.CoveragePackageName(), e.SourcePath, r.coverageType, true) - if err != nil { + + // No Elasticsearch asset is created when an Input package is installed through the API. + // This would require to create a Agent policy and add that input package to the Agent policy. + // As those input packages could have some required fields, it would also require to add + // configuration files as in system tests to fill those fields. + // In these tests, mainly it is required to test Kibana assets, therefore it is not added + // support for Elasticsearch assets in input packages. + // Related issue: https://github.com/elastic/elastic-package/issues/1623 + expectedAssets, err := packages.LoadPackageAssets(r.packageRootPath) + if err != nil { + results, err = result.WithError(fmt.Errorf("could not load expected package assets: %w", err)) + return false, err + } + + results = make([]testrunner.TestResult, 0, len(expectedAssets)) + success := true + for _, e := range expectedAssets { + rc := testrunner.NewResultComposer(testrunner.TestResult{ + Name: fmt.Sprintf("%s %s is loaded", e.Type, e.IDOrName()), + Package: r.testFolder.Package, + DataStream: e.DataStream, + TestType: TestType, + }) + + tr, _ := rc.WithSuccess() + if !findActualAsset(installedAssets, installedTags, e) { tr, _ = rc.WithError(testrunner.ErrTestCaseFailed{ - Reason: "could not generate test coverage", - Details: fmt.Sprintf("could not generate test coverage for asset in %s: %v", e.SourcePath, err), + Reason: "could not find expected asset", + Details: fmt.Sprintf("could not find %s asset \"%s\". Assets loaded:\n%s", e.Type, e.IDOrName(), formatAssetsAsString(installedAssets, installedTags)), }) - result = tr[0] + success = false + } + result := tr[0] + if r.withCoverage && e.SourcePath != "" { + result.Coverage, err = testrunner.GenerateBaseFileCoverageReport(rc.CoveragePackageName(), e.SourcePath, r.coverageType, true) + if err != nil { + tr, _ = rc.WithError(testrunner.ErrTestCaseFailed{ + Reason: "could not generate test coverage", + Details: fmt.Sprintf("could not generate test coverage for asset in %s: %v", e.SourcePath, err), + }) + result = tr[0] + } + success = false } + + results = append(results, result) } - results = append(results, result) - } + return success, nil + }, time.Second, assetsPresentTimeout) - return results, nil + return results, err } func (r *tester) TearDown(ctx context.Context) error { @@ -191,20 +212,67 @@ func (r *tester) TearDown(ctx context.Context) error { return nil } -func findActualAsset(actualAssets []packages.Asset, expectedAsset packages.Asset) bool { +func findActualAsset(actualAssets []packages.Asset, savedObjects []common.MapStr, expectedAsset packages.Asset) bool { for _, a := range actualAssets { if a.Type == expectedAsset.Type && a.ID == expectedAsset.ID { return true } } + if expectedAsset.Type == "tag" && expectedAsset.ID == "" { + // If we haven't found the asset, and it is a tag, it could be some of the shared + // tags defined in tags.yml, whose id can be unpredictable, so check by name. + if len(actualAssets) == 0 { + // If there are no assets, the tag may not be installed, so assume it would have been. + // TODO: More accurately we should check if any of the listed objects in `tags.yml` is present. + return true + } + for _, so := range savedObjects { + soType, _ := so.GetValue("type") + if soType, ok := soType.(string); !ok || soType != "tag" { + continue + } + + name, _ := so.GetValue("attributes.name") + if name, ok := name.(string); ok && name == expectedAsset.Name { + return true + } + } + } + return false } -func formatAssetsAsString(assets []packages.Asset) string { +func formatAssetsAsString(assets []packages.Asset, savedObjects []common.MapStr) string { var sb strings.Builder for _, asset := range assets { - sb.WriteString(fmt.Sprintf("- %s\n", asset.String())) + fmt.Fprintf(&sb, "- %s\n", asset.String()) + } + for _, so := range savedObjects { + idValue, _ := so.GetValue("id") + id, ok := idValue.(string) + if !ok { + continue + } + soTypeValue, _ := so.GetValue("type") + soType, ok := soTypeValue.(string) + if !ok { + continue + } + + // Avoid repeating. + if slices.ContainsFunc(assets, func(a packages.Asset) bool { + return a.Type == packages.AssetType(soType) && a.ID == id + }) { + continue + } + + name, _ := so.GetValue("attributes.name") + if name, ok := name.(string); ok && name != "" { + fmt.Fprintf(&sb, "- %s (name: %q, type: %s)\n", id, name, soType) + } else { + fmt.Fprintf(&sb, "- %s (type: %s)\n", id, soType) + } } return sb.String() } diff --git a/internal/testrunner/runners/system/tester.go b/internal/testrunner/runners/system/tester.go index cc79e81c85..0a42355469 100644 --- a/internal/testrunner/runners/system/tester.go +++ b/internal/testrunner/runners/system/tester.go @@ -2503,6 +2503,8 @@ func (r *tester) generateCoverageReport(pkgName string) (testrunner.CoverageRepo filepath.Join(r.packageRootPath, "fields", "*.yml"), filepath.Join(r.packageRootPath, "data_stream", dsPattern, "manifest.yml"), filepath.Join(r.packageRootPath, "data_stream", dsPattern, "fields", "*.yml"), + filepath.Join(r.packageRootPath, "elasticsearch", "transform", "*", "*.yml"), + filepath.Join(r.packageRootPath, "elasticsearch", "transform", "*", "fields", "*.yml"), } return testrunner.GenerateBaseFileCoverageReportGlob(pkgName, patterns, r.coverageType, true) diff --git a/test/packages/other/tags_without_assets/LICENSE.txt b/test/packages/other/tags_without_assets/LICENSE.txt new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/packages/other/tags_without_assets/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/packages/other/tags_without_assets/changelog.yml b/test/packages/other/tags_without_assets/changelog.yml new file mode 100644 index 0000000000..bb0320a524 --- /dev/null +++ b/test/packages/other/tags_without_assets/changelog.yml @@ -0,0 +1,6 @@ +# newer versions go on top +- version: "0.0.1" + changes: + - description: Initial draft of the package + type: enhancement + link: https://github.com/elastic/integrations/pull/1 # FIXME Replace with the real PR link diff --git a/test/packages/other/tags_without_assets/data_stream/foo/agent/stream/filestream.yml.hbs b/test/packages/other/tags_without_assets/data_stream/foo/agent/stream/filestream.yml.hbs new file mode 100644 index 0000000000..3bede63284 --- /dev/null +++ b/test/packages/other/tags_without_assets/data_stream/foo/agent/stream/filestream.yml.hbs @@ -0,0 +1,44 @@ +paths: +{{#each paths as |path|}} + - {{path}} +{{/each}} +{{#if exclude_files}} +prospector.scanner.exclude_files: +{{#each exclude_files as |pattern f|}} + - {{pattern}} +{{/each}} +{{/if}} +{{#if multiline_json}} +multiline.pattern: '^{' +multiline.negate: true +multiline.match: after +multiline.max_lines: 5000 +multiline.timeout: 10 +{{/if}} +{{#if custom}} +{{custom}} +{{/if}} + +{{#if tags.length}} +tags: +{{#each tags as |tag|}} +- {{tag}} +{{/each}} +{{#if preserve_original_event}} +- preserve_original_event +{{/if}} +{{else}} +{{#if preserve_original_event}} +tags: +- preserve_original_event +{{/if}} +{{/if}} + +{{#contains "forwarded" tags}} +publisher_pipeline.disable_host: true +{{/contains}} + +{{#if processors}} +processors: +{{processors}} +{{/if}} \ No newline at end of file diff --git a/test/packages/other/tags_without_assets/data_stream/foo/elasticsearch/ingest_pipeline/default.yml b/test/packages/other/tags_without_assets/data_stream/foo/elasticsearch/ingest_pipeline/default.yml new file mode 100644 index 0000000000..1a308fded0 --- /dev/null +++ b/test/packages/other/tags_without_assets/data_stream/foo/elasticsearch/ingest_pipeline/default.yml @@ -0,0 +1,10 @@ +--- +description: Pipeline for processing sample logs +processors: +- set: + field: sample_field + value: "1" +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/test/packages/other/tags_without_assets/data_stream/foo/fields/base-fields.yml b/test/packages/other/tags_without_assets/data_stream/foo/fields/base-fields.yml new file mode 100644 index 0000000000..7c798f4534 --- /dev/null +++ b/test/packages/other/tags_without_assets/data_stream/foo/fields/base-fields.yml @@ -0,0 +1,12 @@ +- name: data_stream.type + type: constant_keyword + description: Data stream type. +- name: data_stream.dataset + type: constant_keyword + description: Data stream dataset. +- name: data_stream.namespace + type: constant_keyword + description: Data stream namespace. +- name: '@timestamp' + type: date + description: Event timestamp. diff --git a/test/packages/other/tags_without_assets/data_stream/foo/manifest.yml b/test/packages/other/tags_without_assets/data_stream/foo/manifest.yml new file mode 100644 index 0000000000..a663948854 --- /dev/null +++ b/test/packages/other/tags_without_assets/data_stream/foo/manifest.yml @@ -0,0 +1,252 @@ +title: "Just an empty data stream" +type: logs +streams: + - input: filestream + title: "logs via filestream" + description: |- + Collect logs with filestream + template_path: filestream.yml.hbs + vars: + - name: paths + type: text + title: "Paths" + multi: true + required: true + show_user: true + default: + - /var/log/*.log + - name: data_stream.dataset + type: text + title: "Dataset name" + description: |- + Dataset to write data to. Changing the dataset will send the data to a different index. You can't use `-` in the name of a dataset and only valid characters for [Elasticsearch index names](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html). + required: true + show_user: true + default: filestream.generic + - name: pipeline + type: text + title: "Ingest Pipeline" + description: |- + The Ingest Node pipeline ID to be used by the integration. + show_user: true + - name: parsers + type: yaml + title: "Parsers" + description: |- + This option expects a list of parsers that the log line has to go through. For more information see [Parsers](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-input-filestream.html#_parsers) + show_user: true + default: "" + #- ndjson: + # target: "" + # message_key: msg + #- multiline: + # type: count + # count_lines: 3 + - name: exclude_files + type: text + title: "Exclude Files" + description: |- + A list of regular expressions to match the files that you want Elastic Agent to ignore. By default no files are excluded. + multi: true + show_user: true + default: + - \.gz$ + - name: include_files + type: text + title: "Include Files" + description: |- + A list of regular expressions to match the files that you want Elastic Agent to include. If a list of regexes is provided, only the files that are allowed by the patterns are harvested. + multi: true + show_user: true + - name: processors + type: yaml + title: "Processors" + description: |- + Processors are used to reduce the number of fields in the exported event or to enhance the event with metadata. This executes in the agent before the logs are parsed. See [Processors](https://www.elastic.co/guide/en/beats/filebeat/current/filtering-and-enhancing-data.html) for details. + - name: tags + type: text + title: "Tags" + description: |- + Tags to include in the published event + multi: true + show_user: true + - name: encoding + type: text + title: "Encoding" + description: |- + The file encoding to use for reading data that contains international characters. For a full list of valid encodings, see the [Documentation](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-input-filestream.html#_encoding_2) + - name: recursive_glob + type: bool + title: "Recursive Glob" + description: |- + Enable expanding `**` into recursive glob patterns. With this feature enabled, the rightmost `**` in each path is expanded into a fixed number of glob patterns. For example: `/foo/**` expands to `/foo`, `/foo/*`, `/foo/*/*`, and so on. If enabled it expands a single `**` into a 8-level deep `*` pattern. + This feature is enabled by default. Set prospector.scanner.recursive_glob to false to disable it. + default: true + - name: symlinks + type: bool + title: "Enable symlinks" + description: |- + The symlinks option allows Elastic Agent to harvest symlinks in addition to regular files. When harvesting symlinks, Elastic Agent opens and reads the original file even though it reports the path of the symlink. + **Because this option may lead to data loss, it is disabled by default.** + - name: resend_on_touch + type: bool + title: "Resend on touch" + description: |- + If this option is enabled a file is resent if its size has not changed but its modification time has changed to a later time than before. It is disabled by default to avoid accidentally resending files. + - name: check_interval + type: text + title: "Check Interval" + description: |- + How often Elastic Agent checks for new files in the paths that are specified for harvesting. For example Specify 1s to scan the directory as frequently as possible without causing Elastic Agent to scan too frequently. **We do not recommend to set this value <1s.** + - name: ignore_older + type: text + title: "Ignore Older" + description: |- + If this option is enabled, Elastic Agent ignores any files that were modified before the specified timespan. You can use time strings like 2h (2 hours) and 5m (5 minutes). The default is 0, which disables the setting. + You must set Ignore Older to be greater than On State Change Inactive. + For more information, please see the [Documentation](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-input-filestream.html#filebeat-input-filestream-ignore-older) + - name: ignore_inactive + type: text + title: "Ignore Inactive" + description: |- + If this option is enabled, Elastic Agent ignores every file that has not been updated since the selected time. Possible options are since_first_start and since_last_start. + - name: close_on_state_changed_inactive + type: text + title: "Close on State Changed Inactive" + description: |- + When this option is enabled, Elastic Agent closes the file handle if a file has not been harvested for the specified duration. The counter for the defined period starts when the last log line was read by the harvester. It is not based on the modification time of the file. If the closed file changes again, a new harvester is started and the latest changes will be picked up after Check Interval has elapsed. + - name: close_on_state_changed_renamed + type: bool + title: "Close on State Changed Renamed" + description: |- + **Only use this option if you understand that data loss is a potential side effect.** + When this option is enabled, Elastic Agent closes the file handler when a file is renamed. This happens, for example, when rotating files. By default, the harvester stays open and keeps reading the file because the file handler does not depend on the file name. + - name: close_on_state_changed_removed + type: bool + title: "Close on State Changed Removed" + description: |- + When this option is enabled, Elastic Agent closes the harvester when a file is removed. Normally a file should only be removed after it’s inactive for the duration specified by close.on_state_change.inactive. + - name: close_reader_eof + type: bool + title: "Close Reader EOF" + description: |- + **Only use this option if you understand that data loss is a potential side effect.** + When this option is enabled, Elastic Agent closes a file as soon as the end of a file is reached. This is useful when your files are only written once and not updated from time to time. For example, this happens when you are writing every single log event to a new file. This option is disabled by default. + - name: close_reader_after_interval + type: text + title: "Close Reader After Interval" + description: |- + **Only use this option if you understand that data loss is a potential side effect. Another side effect is that multiline events might not be completely sent before the timeout expires.** + This option is particularly useful in case the output is blocked, which makes Elastic Agent keep open file handlers even for files that were deleted from the disk. + For more information see the [documentation](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-input-filestream.html#filebeat-input-filestream-close-timeout). + - name: clean_inactive + type: text + title: "Clean Inactive" + description: |- + **Only use this option if you understand that data loss is a potential side effect.** + When this option is enabled, Elastic Agent removes the state of a file after the specified period of inactivity has elapsed. + E.g: "30m", Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". By default cleaning inactive states is disabled, -1 is used to disable it. + default: -1 + - name: clean_removed + type: bool + title: "Clean Removed" + description: |- + When this option is enabled, Elastic Agent cleans files from the registry if they cannot be found on disk anymore under the last known name. + **You must disable this option if you also disable Close Removed.** + - name: harvester_limit + type: integer + title: "Harvester Limit" + description: |- + The harvester_limit option limits the number of harvesters + that are started in parallel for one input. This directly + relates to the maximum number of file handlers that are + opened. The default is 0 (no limit). + default: 0 + - name: backoff_init + type: text + title: "Backoff Init" + description: |- + The backoff option defines how long Elastic Agent waits before checking a file again after EOF is reached. The default is 1s. + - name: backoff_max + type: text + title: "Backoff Max" + description: |- + The maximum time for Elastic Agent to wait before checking a file again after EOF is reached. The default is 10s. + **Requirement: Set Backoff Max to be greater than or equal to Backoff Init and less than or equal to Check Interval (Backoff Init <= Backoff Max <= Check Interval).** + - name: fingerprint + type: bool + title: "File identity: Fingerprint" + description: |- + **Changing file_identity methods between runs may result in + duplicated events in the output.** + Uses a fingerprint generated from the first few bytes (1k is + the default, this can be configured via Fingerprint offset + and length) to identify a file instead inode + device ID. + Refer to https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-input-filestream.html#_file_identity_2 + for more details. If this option is disabled (and 'Native + file identity is not enabled'), Elastic-Agent < 9.0.0 will + use Native as the file identity, and >= 9.0.0 will use + Fingerprint with the default offset and length. + default: true + - name: fingerprint_offset + type: integer + title: "File identity: Fingerprint offset" + description: |- + Offset from the beginning of the file to start calculating + the fingerprint. The default is 0. Only used when the + fingerprint file identity is selected + default: 0 + - name: fingerprint_length + type: integer + title: "File identity: Fingerprint length" + description: |- + The number of bytes used to calculate the fingerprint. The + default is 1024. Only used when the fingerprint file + identity is selected. + default: 1024 + - name: file_identity_native + type: bool + title: "File identity: Native" + description: |- + **Changing file_identity methods between runs may result in + duplicated events in the output.** + Uses a native identifier for files, on most Unix-like + file systems this is the inode + device ID. On file systems + that do not support inode, the native equivalent is used. + If you enable this option you **MUST disable Fingerprint + file identity**. Refer to + https://www.elastic.co/docs/reference/beats/filebeat/filebeat-input-filestream + for more details. + default: false + - name: rotation_external_strategy_copytruncate + type: yaml + title: "Rotation Strategy" + description: "If the log rotating application copies the contents of the active file and then truncates the original file, use these options to help Elastic Agent to read files correctly.\nSet the option suffix_regex so Elastic Agent can tell active and rotated files apart. \nThere are two supported suffix types in the input: numberic and date." + - name: exclude_lines + type: text + title: "Exclude Lines" + description: |- + A list of regular expressions to match the lines that you want Elastic Agent to exclude. Elastic Agent drops any lines that match a regular expression in the list. By default, no lines are dropped. Empty lines are ignored. + multi: true + - name: include_lines + type: text + title: "Include Lines" + description: |- + A list of regular expressions to match the lines that you want Elastic Agent to include. Elastic Agent exports only the lines that match a regular expression in the list. By default, all lines are exported. Empty lines are ignored. + multi: true + - name: buffer_size + type: text + title: "Buffer Size" + description: |- + The size in bytes of the buffer that each harvester uses when fetching a file. The default is 16384. + - name: message_max_bytes + type: text + title: "Message Max Bytes" + description: |- + The maximum number of bytes that a single log message can have. All bytes after mesage_max_bytes are discarded and not sent. The default is 10MB (10485760). + - name: condition + type: text + title: "Condition" + description: |- + Condition to filter when to collect this input. See [Dynamic Input Configuration](https://www.elastic.co/guide/en/fleet/current/dynamic-input-configuration.html) for details. + show_user: true diff --git a/test/packages/other/tags_without_assets/docs/README.md b/test/packages/other/tags_without_assets/docs/README.md new file mode 100644 index 0000000000..1456af2020 --- /dev/null +++ b/test/packages/other/tags_without_assets/docs/README.md @@ -0,0 +1,3 @@ +### Test package with tags but without assets + +This package as a tags.yml file, but no asset where the tag can be applied. diff --git a/test/packages/other/tags_without_assets/img/sample-logo.svg b/test/packages/other/tags_without_assets/img/sample-logo.svg new file mode 100644 index 0000000000..6268dd88f3 --- /dev/null +++ b/test/packages/other/tags_without_assets/img/sample-logo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/test/packages/other/tags_without_assets/img/sample-screenshot.png b/test/packages/other/tags_without_assets/img/sample-screenshot.png new file mode 100644 index 0000000000..d7a56a3ecc Binary files /dev/null and b/test/packages/other/tags_without_assets/img/sample-screenshot.png differ diff --git a/test/packages/other/tags_without_assets/kibana/tags.yml b/test/packages/other/tags_without_assets/kibana/tags.yml new file mode 100644 index 0000000000..c6e5a5bdfe --- /dev/null +++ b/test/packages/other/tags_without_assets/kibana/tags.yml @@ -0,0 +1,13 @@ +- text: Security Solution + asset_types: + - search + asset_ids: + - system-0d3f2380-fa78-11e6-ae9b-81e5311e8cab + - system-71f720f0-ff18-11e9-8405-516218e3d268 + - system-5517a150-f9ce-11e6-8115-a7c18106d86a + - system-277876d0-fa2c-11e6-bbd3-29c986c96e5a + - system-bae11b00-9bfc-11ea-87e4-49f31ec44891 + - system-bb858830-f412-11e9-8405-516218e3d268 + - system-d401ef40-a7d5-11e9-a422-d144027429da + - system-Windows-Dashboard + - system-Logs-syslog-dashboard diff --git a/test/packages/other/tags_without_assets/manifest.yml b/test/packages/other/tags_without_assets/manifest.yml new file mode 100644 index 0000000000..65220da9e6 --- /dev/null +++ b/test/packages/other/tags_without_assets/manifest.yml @@ -0,0 +1,36 @@ +format_version: 3.4.1 +name: tags_without_assets +title: "Package with tags and without assets" +version: 0.0.1 +source: + license: "Apache-2.0" +description: "This is a package that has a tags.yml file, but doesn't have assets." +type: integration +categories: + - custom +conditions: + kibana: + version: "^9.1.3" + elastic: + subscription: "basic" +screenshots: + - src: /img/sample-screenshot.png + title: Sample screenshot + size: 600x600 + type: image/png +icons: + - src: /img/sample-logo.svg + title: Sample logo + size: 32x32 + type: image/svg+xml +policy_templates: + - name: sample + title: Sample logs + description: Collect sample logs + inputs: + - type: logfile + title: Collect sample logs from instances + description: Collecting sample logs +owner: + github: elastic/ecosystem + type: elastic diff --git a/test/packages/other/tags_without_assets/sample_event.json b/test/packages/other/tags_without_assets/sample_event.json new file mode 100644 index 0000000000..d668d56022 --- /dev/null +++ b/test/packages/other/tags_without_assets/sample_event.json @@ -0,0 +1,3 @@ +{ + "description": "This is an example sample-event for Package with tags and without assets. Replace it with a real sample event. Hint: If system tests exist, running `elastic-package test system --generate` will generate this file." +}