From 9d143385548bbd909f3dea69bb4907b9b64fff29 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Thu, 29 Jan 2026 18:46:01 +0100 Subject: [PATCH] docs(release): documentation for release 0.11.0 Signed-off-by: Marco Nenciarini --- .../version-0.10.0/object_stores.md | 2 +- .../version-0.10.0/troubleshooting.md | 2 +- .../version-0.11.0/compression.md | 43 ++ web/versioned_docs/version-0.11.0/concepts.md | 177 ++++++ web/versioned_docs/version-0.11.0/images.md | 37 ++ .../version-0.11.0/installation.mdx | 109 ++++ web/versioned_docs/version-0.11.0/intro.md | 86 +++ .../version-0.11.0/migration.md | 274 ++++++++ web/versioned_docs/version-0.11.0/misc.md | 97 +++ .../version-0.11.0/object_stores.md | 498 +++++++++++++++ .../version-0.11.0/observability.md | 24 + .../version-0.11.0/parameters.md | 19 + .../version-0.11.0/plugin-barman-cloud.v1.md | 108 ++++ .../version-0.11.0/resource-name-migration.md | 219 +++++++ .../version-0.11.0/retention.md | 38 ++ .../version-0.11.0/troubleshooting.md | 591 ++++++++++++++++++ .../version-0.11.0/upgrades.mdx | 16 + web/versioned_docs/version-0.11.0/usage.md | 283 +++++++++ .../version-0.4.0/object_stores.md | 2 +- .../version-0.4.1/object_stores.md | 2 +- .../version-0.5.0/object_stores.md | 2 +- .../version-0.6.0/object_stores.md | 2 +- .../version-0.7.0/object_stores.md | 2 +- .../version-0.7.0/troubleshooting.md | 2 +- .../version-0.8.0/object_stores.md | 2 +- .../version-0.8.0/troubleshooting.md | 2 +- .../version-0.9.0/object_stores.md | 2 +- .../version-0.9.0/troubleshooting.md | 2 +- .../version-0.11.0-sidebars.json | 8 + web/versions.json | 1 + 30 files changed, 2640 insertions(+), 12 deletions(-) create mode 100644 web/versioned_docs/version-0.11.0/compression.md create mode 100644 web/versioned_docs/version-0.11.0/concepts.md create mode 100644 web/versioned_docs/version-0.11.0/images.md create mode 100644 web/versioned_docs/version-0.11.0/installation.mdx create mode 100644 web/versioned_docs/version-0.11.0/intro.md create mode 100644 web/versioned_docs/version-0.11.0/migration.md create mode 100644 web/versioned_docs/version-0.11.0/misc.md create mode 100644 web/versioned_docs/version-0.11.0/object_stores.md create mode 100644 web/versioned_docs/version-0.11.0/observability.md create mode 100644 web/versioned_docs/version-0.11.0/parameters.md create mode 100644 web/versioned_docs/version-0.11.0/plugin-barman-cloud.v1.md create mode 100644 web/versioned_docs/version-0.11.0/resource-name-migration.md create mode 100644 web/versioned_docs/version-0.11.0/retention.md create mode 100644 web/versioned_docs/version-0.11.0/troubleshooting.md create mode 100644 web/versioned_docs/version-0.11.0/upgrades.mdx create mode 100644 web/versioned_docs/version-0.11.0/usage.md create mode 100644 web/versioned_sidebars/version-0.11.0-sidebars.json diff --git a/web/versioned_docs/version-0.10.0/object_stores.md b/web/versioned_docs/version-0.10.0/object_stores.md index f1714c93..74e0473c 100644 --- a/web/versioned_docs/version-0.10.0/object_stores.md +++ b/web/versioned_docs/version-0.10.0/object_stores.md @@ -103,7 +103,7 @@ spec: ### S3 Lifecycle Policy -Barman Cloud uploads backup files to S3 but does not modify or delete them afterward. +Barman Cloud uploads backup files to S3 but does not modify them afterward. To enhance data durability and protect against accidental or malicious loss, it's recommended to implement the following best practices: diff --git a/web/versioned_docs/version-0.10.0/troubleshooting.md b/web/versioned_docs/version-0.10.0/troubleshooting.md index a062ae84..2e3cb4dc 100644 --- a/web/versioned_docs/version-0.10.0/troubleshooting.md +++ b/web/versioned_docs/version-0.10.0/troubleshooting.md @@ -406,7 +406,7 @@ For detailed PITR configuration and WAL management, see the 3. **Adjust provider-specific settings (endpoint, path style, etc.)** - See [Object Store Configuration](object_stores.md) for provider-specific settings - - Ensure `endpointURL` and `s3UsePathStyle` match your storage type + - Ensure `endpointURL` match your storage type - Verify network policies allow egress to your storage provider ## Diagnostic Commands diff --git a/web/versioned_docs/version-0.11.0/compression.md b/web/versioned_docs/version-0.11.0/compression.md new file mode 100644 index 00000000..2abbeded --- /dev/null +++ b/web/versioned_docs/version-0.11.0/compression.md @@ -0,0 +1,43 @@ +--- +sidebar_position: 80 +--- + +# Compression + + + +By default, backups and WAL files are archived **uncompressed**. However, the +Barman Cloud Plugin supports multiple compression algorithms via +`barman-cloud-backup` and `barman-cloud-wal-archive`, allowing you to optimize +for space, speed, or a balance of both. + +### Supported Compression Algorithms + +- `bzip2` +- `gzip` +- `lz4` (WAL only) +- `snappy` +- `xz` (WAL only) +- `zstd` (WAL only) + +Compression settings for base backups and WAL archives are configured +independently. For implementation details, refer to the corresponding API +definitions: + +- [`DataBackupConfiguration`](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#DataBackupConfiguration) +- [`WALBackupConfiguration`](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#WalBackupConfiguration) + +:::important +Compression impacts both performance and storage efficiency. Choose the right +algorithm based on your recovery time objectives (RTO), storage capacity, and +network throughput. +::: + +## Compression Benchmark (on MinIO) + +| Compression | Backup Time (ms) | Restore Time (ms) | Uncompressed Size (MB) | Compressed Size (MB) | Ratio | +| ----------- | ---------------- | ----------------- | ---------------------- | -------------------- | ----- | +| None | 10,927 | 7,553 | 395 | 395 | 1.0:1 | +| bzip2 | 25,404 | 13,886 | 395 | 67 | 5.9:1 | +| gzip | 116,281 | 3,077 | 395 | 91 | 4.3:1 | +| snappy | 8,134 | 8,341 | 395 | 166 | 2.4:1 | diff --git a/web/versioned_docs/version-0.11.0/concepts.md b/web/versioned_docs/version-0.11.0/concepts.md new file mode 100644 index 00000000..3832df37 --- /dev/null +++ b/web/versioned_docs/version-0.11.0/concepts.md @@ -0,0 +1,177 @@ +--- +sidebar_position: 10 +--- + +# Main Concepts + + + +:::important +Before proceeding, make sure to review the following sections of the +CloudNativePG documentation: + +- [**Backup**](https://cloudnative-pg.io/documentation/current/backup/) +- [**WAL Archiving**](https://cloudnative-pg.io/documentation/current/wal_archiving/) +- [**Recovery**](https://cloudnative-pg.io/documentation/current/recovery/) +::: + +The **Barman Cloud Plugin** enables **hot (online) backups** of PostgreSQL +clusters in CloudNativePG through [`barman-cloud`](https://pgbarman.org), +supporting continuous physical backups and WAL archiving to an **object +store**—without interrupting write operations. + +It also supports both **full recovery** and **Point-in-Time Recovery (PITR)** +of a PostgreSQL cluster. + +## The Object Store + +At the core is the [`ObjectStore` custom resource (CRD)](plugin-barman-cloud.v1.md#objectstorespec), +which acts as the interface between the PostgreSQL cluster and the target +object storage system. It allows you to configure: + +- **Authentication and bucket location** via the `.spec.configuration` section +- **WAL archiving** settings—such as compression type, parallelism, and + server-side encryption—under `.spec.configuration.wal` +- **Base backup options**—with similar settings for compression, concurrency, + and encryption—under `.spec.configuration.data` +- **Retention policies** to manage the life-cycle of archived WALs and backups + via `.spec.configuration.retentionPolicy` + +WAL files are archived in the `wals` directory, while base backups are stored +as **tarballs** in the `base` directory, following the +[Barman Cloud convention](https://docs.pgbarman.org/cloud/latest/usage/#object-store-layout). + +The plugin also offers advanced capabilities, including +[backup tagging](misc.md#backup-object-tagging) and +[extra options for backups and WAL archiving](misc.md#extra-options-for-backup-and-wal-archiving). + +:::tip +For details, refer to the +[API reference for the `ObjectStore` resource](plugin-barman-cloud.v1.md#objectstorespec). +::: + +## Integration with a CloudNativePG Cluster + +CloudNativePG can delegate continuous backup and recovery responsibilities to +the **Barman Cloud Plugin** by configuring the `.spec.plugins` section of a +`Cluster` resource. This setup requires a corresponding `ObjectStore` resource +to be defined. + +:::important +While it is technically possible to reuse the same `ObjectStore` for multiple +`Cluster` resources within the same namespace, it is strongly recommended to +dedicate one object store per PostgreSQL cluster to ensure data isolation and +operational clarity. +::: + +The following example demonstrates how to configure a CloudNativePG cluster +named `cluster-example` to use a previously defined `ObjectStore` (also named +`cluster-example`) in the same namespace. Setting `isWALArchiver: true` enables +WAL archiving through the plugin: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + # Other cluster settings... + plugins: + - name: barman-cloud.cloudnative-pg.io + isWALArchiver: true + parameters: + barmanObjectName: cluster-example +``` + +## Backup of a Postgres Cluster + +Once the object store is defined and the `Cluster` is configured to use the +Barman Cloud Plugin, **WAL archiving is activated immediately** on the +PostgreSQL primary. + +Physical base backups are seamlessly managed by CloudNativePG using the +`Backup` and `ScheduledBackup` resources, respectively for +[on-demand](https://cloudnative-pg.io/documentation/current/backup/#on-demand-backups) +and +[scheduled](https://cloudnative-pg.io/documentation/current/backup/#scheduled-backups) +backups. + +To use the Barman Cloud Plugin, you must set the `method` to `plugin` and +configure the `pluginConfiguration` section as shown: + +```yaml +[...] +spec: + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io + [...] +``` + +With this configuration, CloudNativePG supports: + +- Backups from both **primary** and **standby** instances +- Backups from **designated primaries** in a distributed topology using + [replica clusters](https://cloudnative-pg.io/documentation/current/replica_cluster/) + +:::tip +For details on how to back up from a standby, refer to the official documentation: +[Backup from a standby](https://cloudnative-pg.io/documentation/current/backup/#backup-from-a-standby). +::: + +:::important +Both backup and WAL archiving operations are executed by sidecar containers +running in the same pod as the PostgreSQL `Cluster` primary instance—except +when backups are taken from a standby, in which case the sidecar runs alongside +the standby pod. +The sidecar containers use a [dedicated container image](images.md) that +includes only the supported version of Barman Cloud. +::: + +## Recovery of a Postgres Cluster + +In PostgreSQL, *recovery* refers to the process of starting a database instance +from an existing backup. The Barman Cloud Plugin integrates with CloudNativePG +to support both **full recovery** and **Point-in-Time Recovery (PITR)** from an +object store. + +Recovery in this context is *not in-place*: it bootstraps a brand-new +PostgreSQL cluster from a backup and replays the necessary WAL files to reach +the desired recovery target. + +To perform a recovery, define an *external cluster* that references the +appropriate `ObjectStore`, and use it as the source in the `bootstrap` section +of the target cluster: + +```yaml +[...] +spec: + [...] + bootstrap: + recovery: + source: source + externalClusters: + - name: source + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: cluster-example + serverName: cluster-example + [...] +``` + +The critical element here is the `externalClusters` section of the `Cluster` +resource, where the `plugin` stanza instructs CloudNativePG to use the Barman +Cloud Plugin to access the object store for recovery. + +This same mechanism can be used for a variety of scenarios enabled by the +CloudNativePG API, including: + +* **Full cluster recovery** from the latest backup +* **Point-in-Time Recovery (PITR)** +* Bootstrapping **replica clusters** in a distributed topology + +:::tip +For complete instructions and advanced use cases, refer to the official +[Recovery documentation](https://cloudnative-pg.io/documentation/current/recovery/). +::: diff --git a/web/versioned_docs/version-0.11.0/images.md b/web/versioned_docs/version-0.11.0/images.md new file mode 100644 index 00000000..f6c32d34 --- /dev/null +++ b/web/versioned_docs/version-0.11.0/images.md @@ -0,0 +1,37 @@ +--- +sidebar_position: 99 +--- + +# Container Images + + + +The Barman Cloud Plugin is distributed using two container images: + +- One for deploying the plugin components +- One for the sidecar that runs alongside each PostgreSQL instance in a + CloudNativePG `Cluster` using the plugin + +## Plugin Container Image + +The plugin image contains the logic required to operate the Barman Cloud Plugin +within your Kubernetes environment with CloudNativePG. It is published on the +GitHub Container Registry at `ghcr.io/cloudnative-pg/plugin-barman-cloud`. + +This image is built from the +[`Dockerfile.plugin`](https://github.com/cloudnative-pg/plugin-barman-cloud/blob/main/containers/Dockerfile.plugin) +in the plugin repository. + +## Sidecar Container Image + +The sidecar image is used within each PostgreSQL pod in the cluster. It +includes the latest supported version of Barman Cloud and is responsible for +performing WAL archiving and backups on behalf of CloudNativePG. + +It is available at `ghcr.io/cloudnative-pg/plugin-barman-cloud-sidecar` and is +built from the +[`Dockerfile.sidecar`](https://github.com/cloudnative-pg/plugin-barman-cloud/blob/main/containers/Dockerfile.sidecar). + +These sidecar images are designed to work seamlessly with the +[`minimal` PostgreSQL container images](https://github.com/cloudnative-pg/postgres-containers?tab=readme-ov-file#minimal-images) +maintained by the CloudNativePG Community. diff --git a/web/versioned_docs/version-0.11.0/installation.mdx b/web/versioned_docs/version-0.11.0/installation.mdx new file mode 100644 index 00000000..027d1e8f --- /dev/null +++ b/web/versioned_docs/version-0.11.0/installation.mdx @@ -0,0 +1,109 @@ +--- +sidebar_position: 20 +--- + +# Installation + +:::important +1. The plugin **must** be installed in the same namespace as the CloudNativePG + operator (typically `cnpg-system`). + +2. Keep in mind that the operator's **listening namespaces** may differ from its + installation namespace. Double-check this to avoid configuration issues. +::: + +## Verifying the Requirements + +Before installing the plugin, make sure the [requirements](intro.md#requirements) are met. + +### CloudNativePG Version + +Ensure you're running a version of CloudNativePG that is compatible with the +plugin. If installed in the default `cnpg-system` namespace, you can verify the +version with: + +```sh +kubectl get deployment -n cnpg-system cnpg-controller-manager \ + -o jsonpath="{.spec.template.spec.containers[*].image}" +``` + +Example output: + +```output +ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0 +``` + +The version **must be 1.26 or newer**. + +### cert-manager + +Use the [cmctl](https://cert-manager.io/docs/reference/cmctl/#installation) +tool to confirm that `cert-manager` is installed and available: + +```sh +cmctl check api +``` + +Example output: + +```output +The cert-manager API is ready +``` + +Both checks are required before proceeding with the installation. + +## Installing the Barman Cloud Plugin + +import { InstallationSnippet } from '@site/src/components/Installation'; + +Install the plugin using `kubectl` by applying the manifest for the latest +release: + + + +Example output: + +```output +customresourcedefinition.apiextensions.k8s.io/objectstores.barmancloud.cnpg.io created +serviceaccount/plugin-barman-cloud created +role.rbac.authorization.k8s.io/leader-election-role created +clusterrole.rbac.authorization.k8s.io/metrics-auth-role created +clusterrole.rbac.authorization.k8s.io/metrics-reader created +clusterrole.rbac.authorization.k8s.io/objectstore-editor-role created +clusterrole.rbac.authorization.k8s.io/objectstore-viewer-role created +clusterrole.rbac.authorization.k8s.io/plugin-barman-cloud created +rolebinding.rbac.authorization.k8s.io/leader-election-rolebinding created +clusterrolebinding.rbac.authorization.k8s.io/metrics-auth-rolebinding created +clusterrolebinding.rbac.authorization.k8s.io/plugin-barman-cloud-binding created +secret/plugin-barman-cloud-8tfddg42gf created +service/barman-cloud created +deployment.apps/barman-cloud configured +certificate.cert-manager.io/barman-cloud-client created +certificate.cert-manager.io/barman-cloud-server created +issuer.cert-manager.io/selfsigned-issuer created +``` + +Finally, check that the deployment is up and running: + +```sh +kubectl rollout status deployment \ + -n cnpg-system barman-cloud +``` + +Example output: + +```output +deployment "barman-cloud" successfully rolled out +``` + +This confirms that the plugin is deployed and ready to use. + +## Testing the latest development snapshot + +You can also test the latest development snapshot of the plugin with the +following command: + +```sh +kubectl apply -f \ + https://raw.githubusercontent.com/cloudnative-pg/plugin-barman-cloud/refs/heads/main/manifest.yaml +``` diff --git a/web/versioned_docs/version-0.11.0/intro.md b/web/versioned_docs/version-0.11.0/intro.md new file mode 100644 index 00000000..9781d0d6 --- /dev/null +++ b/web/versioned_docs/version-0.11.0/intro.md @@ -0,0 +1,86 @@ +--- +sidebar_position: 1 +sidebar_label: "Introduction" +--- + +# Barman Cloud Plugin + + + +The **Barman Cloud Plugin** for [CloudNativePG](https://cloudnative-pg.io/) +enables online continuous physical backups of PostgreSQL clusters to object storage +using the `barman-cloud` suite from the [Barman](https://docs.pgbarman.org/release/latest/) +project. + +:::important +If you plan to migrate your existing CloudNativePG cluster to the new +plugin-based approach using the Barman Cloud Plugin, see +["Migrating from Built-in CloudNativePG Backup"](migration.md) +for detailed instructions. +::: + +## Requirements + +Before using the Barman Cloud Plugin, ensure that the following components are +installed and properly configured: + +- [CloudNativePG](https://cloudnative-pg.io) version 1.26 or later + + - We strongly recommend version 1.27.0 or later, which includes improved + error handling and status reporting for the plugin. + - If you are running an earlier release, refer to the + [upgrade guide](https://cloudnative-pg.io/documentation/current/installation_upgrade). + +- [cert-manager](https://cert-manager.io/) + + - The recommended way to enable secure TLS communication between the plugin + and the operator. + - Alternatively, you can provide your own certificate bundles. See the + [CloudNativePG documentation on TLS configuration](https://cloudnative-pg.io/documentation/current/cnpg_i/#configuring-tls-certificates). + +- [`kubectl-cnpg`](https://cloudnative-pg.io/documentation/current/kubectl-plugin/) + plugin (optional but recommended) + + - Simplifies debugging and monitoring with additional status and inspection + commands. + - Multiple installation options are available in the + [installation guide](https://cloudnative-pg.io/documentation/current/kubectl-plugin/#install). + +## Key Features + +This plugin provides the following capabilities: + +- Physical online backup of the data directory +- Physical restore of the data directory +- Write-Ahead Log (WAL) archiving +- WAL restore +- Full cluster recovery +- Point-in-Time Recovery (PITR) +- Seamless integration with replica clusters for bootstrap and WAL restore from archive + +:::important +The Barman Cloud Plugin is designed to **replace the in-tree object storage support** +previously provided via the `.spec.backup.barmanObjectStore` section in the +`Cluster` resource. +Backups created using the in-tree approach are fully supported and compatible +with this plugin. +::: + +## Supported Object Storage Providers + +The plugin works with all storage backends supported by `barman-cloud`, including: + +- **Amazon S3** +- **Google Cloud Storage** +- **Microsoft Azure Blob Storage** + +In addition, the following S3-compatible and simulator solutions have been +tested and verified: + +- [MinIO](https://min.io/) – An S3-compatible storage solution +- [Azurite](https://github.com/Azure/Azurite) – A simulator for Azure Blob Storage +- [fake-gcs-server](https://github.com/fsouza/fake-gcs-server) – A simulator for Google Cloud Storage + +:::tip +For more details, refer to [Object Store Providers](object_stores.md). +::: diff --git a/web/versioned_docs/version-0.11.0/migration.md b/web/versioned_docs/version-0.11.0/migration.md new file mode 100644 index 00000000..2c99ada6 --- /dev/null +++ b/web/versioned_docs/version-0.11.0/migration.md @@ -0,0 +1,274 @@ +--- +sidebar_position: 40 +--- + +# Migrating from Built-in CloudNativePG Backup + + + +The in-tree support for Barman Cloud in CloudNativePG is **deprecated starting +from version 1.26** and will be removed in a future release. + +If you're currently relying on the built-in Barman Cloud integration, you can +migrate seamlessly to the new **plugin-based architecture** using the Barman +Cloud Plugin, without data loss. Follow these steps: + +- [Install the Barman Cloud Plugin](installation.mdx) +- Create an `ObjectStore` resource by translating the contents of the + `.spec.backup.barmanObjectStore` section from your existing `Cluster` + definition +- Modify the `Cluster` resource in a single atomic change to switch from + in-tree backup to the plugin +- Update any `ScheduledBackup` resources to use the plugin +- Update the `externalClusters` configuration, where applicable + +:::tip +For a working example, refer to [this commit](https://github.com/cloudnative-pg/cnpg-playground/commit/596f30e252896edf8f734991c3538df87630f6f7) +from the [CloudNativePG Playground project](https://github.com/cloudnative-pg/cnpg-playground), +which demonstrates a full migration. +::: + +--- + +## Step 1: Define the `ObjectStore` + +Begin by creating an `ObjectStore` resource in the same namespace as your +PostgreSQL `Cluster`. + +There is a **direct mapping** between the `.spec.backup.barmanObjectStore` +section in CloudNativePG and the `.spec.configuration` field in the +`ObjectStore` CR. The conversion is mostly mechanical, with one key difference: + +:::warning +In the plugin architecture, retention policies are defined as part of the `ObjectStore`. +In contrast, the in-tree implementation defined them at the `Cluster` level. +::: + +If your `Cluster` used `.spec.backup.retentionPolicy`, move that configuration +to `.spec.retentionPolicy` in the `ObjectStore`. + +--- + +### Example + +Here’s an excerpt from a traditional in-tree CloudNativePG backup configuration +taken from the CloudNativePG Playground project: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: pg-eu +spec: + # [...] + backup: + barmanObjectStore: + destinationPath: s3://backups/ + endpointURL: http://minio-eu:9000 + s3Credentials: + accessKeyId: + name: minio-eu + key: ACCESS_KEY_ID + secretAccessKey: + name: minio-eu + key: ACCESS_SECRET_KEY + wal: + compression: gzip +``` + +This configuration translates to the following `ObjectStore` resource for the +plugin: + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: minio-eu +spec: + configuration: + destinationPath: s3://backups/ + endpointURL: http://minio-eu:9000 + s3Credentials: + accessKeyId: + name: minio-eu + key: ACCESS_KEY_ID + secretAccessKey: + name: minio-eu + key: ACCESS_SECRET_KEY + wal: + compression: gzip +``` + +As you can see, the contents of `barmanObjectStore` have been copied directly +under the `configuration` field of the `ObjectStore` resource, using the same +secret references. + +## Step 2: Update the `Cluster` for plugin WAL archiving + +Once the `ObjectStore` resource is in place, update the `Cluster` resource as +follows in a single atomic change: + +- Remove the `.spec.backup.barmanObjectStore` section +- Remove `.spec.backup.retentionPolicy` if it was defined (as it is now in the + `ObjectStore`) +- Remove the entire `spec.backup` section if it is now empty +- Add `barman-cloud.cloudnative-pg.io` to the `plugins` list, as described in + [Configuring WAL archiving](usage.md#configuring-wal-archiving) + +This will trigger a rolling update of the `Cluster`, switching continuous +backup from the in-tree implementation to the plugin-based approach. + +### Example + +The updated `pg-eu` cluster will have this configuration instead of the +previous `backup` section: + +```yaml + plugins: + - name: barman-cloud.cloudnative-pg.io + isWALArchiver: true + parameters: + barmanObjectName: minio-eu +``` + +--- + +## Step 3: Update the `ScheduledBackup` + +After switching the `Cluster` to use the plugin, update your `ScheduledBackup` +resources to match. + +Set the backup `method` to `plugin` and reference the plugin name via +`pluginConfiguration`, as shown in ["Performing a base backup"](usage.md#performing-a-base-backup). + +### Example + +Original in-tree `ScheduledBackup`: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: pg-eu-backup +spec: + cluster: + name: pg-eu + schedule: '0 0 0 * * *' + backupOwnerReference: self +``` + +Updated version using the plugin: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: pg-eu-backup +spec: + cluster: + name: pg-eu + schedule: '0 0 0 * * *' + backupOwnerReference: self + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io +``` + +--- + +## Step 4: Update the `externalClusters` configuration + +If your `Cluster` relies on one or more external clusters that use the in-tree +Barman Cloud integration, you need to update those configurations to use the +plugin-based architecture. + +When a replica cluster fetches WAL files or base backups from an external +source that used the built-in backup method, follow these steps: + +1. Create a corresponding `ObjectStore` resource for the external cluster, as + shown in [Step 1](#step-1-define-the-objectstore) +2. Update the `externalClusters` section of your replica cluster to use the + plugin instead of the in-tree `barmanObjectStore` field + +### Example + +Consider the original configuration using in-tree Barman Cloud: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: pg-us +spec: + # [...] + externalClusters: + - name: pg-eu + barmanObjectStore: + destinationPath: s3://backups/ + endpointURL: http://minio-eu:9000 + serverName: pg-eu + s3Credentials: + accessKeyId: + name: minio-eu + key: ACCESS_KEY_ID + secretAccessKey: + name: minio-eu + key: ACCESS_SECRET_KEY + wal: + compression: gzip +``` + +Create the `ObjectStore` resource for the external cluster: + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: minio-eu +spec: + configuration: + destinationPath: s3://backups/ + endpointURL: http://minio-eu:9000 + s3Credentials: + accessKeyId: + name: minio-eu + key: ACCESS_KEY_ID + secretAccessKey: + name: minio-eu + key: ACCESS_SECRET_KEY + wal: + compression: gzip +``` + +Update the external cluster configuration to use the plugin: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: pg-us +spec: + # [...] + externalClusters: + - name: pg-eu + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: minio-eu + serverName: pg-eu +``` + +## Step 5: Verify your metrics + +When migrating from the in-core solution to the plugin-based approach, you need +to monitor a different set of metrics, as described in the +["Observability"](observability.md) section. + +The table below summarizes the name changes between the old in-core metrics and +the new plugin-based ones: + +| Old metric name | New metric name | +| ------------------------------------------------ | ---------------------------------------------------------------- | +| `cnpg_collector_last_failed_backup_timestamp` | `barman_cloud_cloudnative_pg_io_last_failed_backup_timestamp` | +| `cnpg_collector_last_available_backup_timestamp` | `barman_cloud_cloudnative_pg_io_last_available_backup_timestamp` | +| `cnpg_collector_first_recoverability_point` | `barman_cloud_cloudnative_pg_io_first_recoverability_point` | diff --git a/web/versioned_docs/version-0.11.0/misc.md b/web/versioned_docs/version-0.11.0/misc.md new file mode 100644 index 00000000..0f03b284 --- /dev/null +++ b/web/versioned_docs/version-0.11.0/misc.md @@ -0,0 +1,97 @@ +--- +sidebar_position: 90 +--- + +# Miscellaneous + + + +## Backup Object Tagging + +You can attach key-value metadata tags to backup artifacts—such as base +backups, WAL files, and history files—via the `.spec.configuration` section of +the `ObjectStore` resource. + +- `tags`: applied to base backups and WAL files +- `historyTags`: applied to history files only + +### Example + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: my-store +spec: + configuration: + [...] + tags: + backupRetentionPolicy: "expire" + historyTags: + backupRetentionPolicy: "keep" + [...] +``` + +## Extra Options for Backup and WAL Archiving + +You can pass additional command-line arguments to `barman-cloud-backup` and +`barman-cloud-wal-archive` using the `additionalCommandArgs` field in the +`ObjectStore` configuration. + +- `.spec.configuration.data.additionalCommandArgs`: for `barman-cloud-backup` +- `.spec.configuration.wal.archiveAdditionalCommandArgs`: for `barman-cloud-wal-archive` + +Each field accepts a list of string arguments. If an argument is already +configured elsewhere in the plugin, the duplicate will be ignored. + +### Example: Extra Backup Options + +```yaml +kind: ObjectStore +metadata: + name: my-store +spec: + configuration: + data: + additionalCommandArgs: + - "--min-chunk-size=5MB" + - "--read-timeout=60" +``` + +### Example: Extra WAL Archive Options + +```yaml +kind: ObjectStore +metadata: + name: my-store +spec: + configuration: + wal: + archiveAdditionalCommandArgs: + - "--max-concurrency=1" + - "--read-timeout=60" +``` + +For a complete list of supported options, refer to the +[official Barman Cloud documentation](https://docs.pgbarman.org/release/latest/). + +## Enable the pprof debug server for the sidecar + +You can enable the instance sidecar's pprof debug HTTP server by adding the `--pprof-server=
` flag to the container's +arguments via `.spec.instanceSidecarConfiguration.additionalContainerArgs`. + +Pass a bind address in the form `:` (for example, `0.0.0.0:6061`). +An empty value disables the server (disabled by default). + +### Example + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: my-store +spec: + instanceSidecarConfiguration: + additionalContainerArgs: + - "--pprof-server=0.0.0.0:6061" +``` diff --git a/web/versioned_docs/version-0.11.0/object_stores.md b/web/versioned_docs/version-0.11.0/object_stores.md new file mode 100644 index 00000000..69a83f61 --- /dev/null +++ b/web/versioned_docs/version-0.11.0/object_stores.md @@ -0,0 +1,498 @@ +--- +sidebar_position: 50 +--- + +# Object Store Providers + + + +The Barman Cloud Plugin enables the storage of PostgreSQL cluster backup files +in any object storage service supported by the +[Barman Cloud infrastructure](https://docs.pgbarman.org/release/latest/). + +Currently, Barman Cloud supports the following providers: + +- [Amazon S3](#aws-s3) +- [Microsoft Azure Blob Storage](#azure-blob-storage) +- [Google Cloud Storage](#google-cloud-storage) + +You may also use any S3- or Azure-compatible implementation of the above +services. + +To configure object storage with Barman Cloud, you must define an +[`ObjectStore` object](plugin-barman-cloud.v1.md#objectstore), which +establishes the connection between your PostgreSQL cluster and the object +storage backend. + +Configuration details — particularly around authentication — will vary depending on +the specific object storage provider you are using. + +The following sections detail the setup for each. + +:::note Authentication Methods +The Barman Cloud Plugin does not independently test all authentication methods +supported by `barman-cloud`. The plugin's responsibility is limited to passing +the provided credentials to `barman-cloud`, which then handles authentication +according to its own implementation. Users should refer to the +[Barman Cloud documentation](https://docs.pgbarman.org/release/latest/) to +verify that their chosen authentication method is supported and properly +configured. +::: + +--- + +## AWS S3 + +[AWS Simple Storage Service (S3)](https://aws.amazon.com/s3/) is one of the +most widely adopted object storage solutions. + +The Barman Cloud plugin for CloudNativePG integrates with S3 through two +primary authentication mechanisms: + +- [IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) — + recommended for clusters running on EKS +- Access keys — using `ACCESS_KEY_ID` and `ACCESS_SECRET_KEY` credentials + +### Access Keys + +To authenticate using access keys, you’ll need: + +- `ACCESS_KEY_ID`: the public key used to authenticate to S3 +- `ACCESS_SECRET_KEY`: the corresponding secret key +- `ACCESS_SESSION_TOKEN`: (optional) a temporary session token, if required + +These credentials must be stored securely in a Kubernetes secret: + +```sh +kubectl create secret generic aws-creds \ + --from-literal=ACCESS_KEY_ID= \ + --from-literal=ACCESS_SECRET_KEY= +# --from-literal=ACCESS_SESSION_TOKEN= # if required +``` + +The credentials will be encrypted at rest if your Kubernetes environment +supports it. + +You can then reference the secret in your `ObjectStore` definition: + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: aws-store +spec: + configuration: + destinationPath: "s3://BUCKET_NAME/path/to/folder" + s3Credentials: + accessKeyId: + name: aws-creds + key: ACCESS_KEY_ID + secretAccessKey: + name: aws-creds + key: ACCESS_SECRET_KEY + [...] +``` + +### IAM Role for Service Account (IRSA) + +To use IRSA with EKS, configure the service account of the PostgreSQL cluster +with the appropriate annotation: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + [...] +spec: + serviceAccountTemplate: + metadata: + annotations: + eks.amazonaws.com/role-arn: arn:[...] + [...] +``` + +### S3 Lifecycle Policy + +Barman Cloud uploads backup files to S3 but does not modify them afterward. +To enhance data durability and protect against accidental or malicious loss, +it's recommended to implement the following best practices: + +- Enable object versioning +- Enable object locking to prevent objects from being deleted or overwritten + for a defined period or indefinitely (this provides an additional layer of + protection against accidental deletion and ransomware attacks) +- Set lifecycle rules to expire current versions a few days after your Barman + retention window +- Expire non-current versions after a longer period + +These strategies help you safeguard backups without requiring broad delete +permissions, ensuring both security and compliance with minimal operational +overhead. + + +### S3-Compatible Storage Providers + +You can use S3-compatible services like **MinIO**, **Linode (Akamai) Object Storage**, +or **DigitalOcean Spaces** by specifying a custom `endpointURL`. + +Example with Linode (Akamai) Object Storage (`us-east1`): + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: linode-store +spec: + configuration: + destinationPath: "s3://BUCKET_NAME/" + endpointURL: "https://us-east1.linodeobjects.com" + s3Credentials: + [...] + [...] +``` + +Recent changes to the [boto3 implementation](https://github.com/boto/boto3/issues/4392) +of [Amazon S3 Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html) +may lead to the `x-amz-content-sha256` error when using the Barman Cloud +Plugin. + +If you encounter this issue (see [GitHub issue #393](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/393)), +you can apply the following workaround by setting specific environment +variables in the `ObjectStore` resource: + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: linode-store +spec: + instanceSidecarConfiguration: + env: + - name: AWS_REQUEST_CHECKSUM_CALCULATION + value: when_required + - name: AWS_RESPONSE_CHECKSUM_VALIDATION + value: when_required + [...] +``` + +These settings ensure that checksum calculations and validations are only +applied when explicitly required, avoiding compatibility issues with certain +S3-compatible storage providers. + +Example with DigitalOcean Spaces (SFO3, path-style): + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: digitalocean-store +spec: + configuration: + destinationPath: "s3://BUCKET_NAME/path/to/folder" + endpointURL: "https://sfo3.digitaloceanspaces.com" + s3Credentials: + [...] + [...] +``` + +### Using Object Storage with a Private CA + +For object storage services (e.g., MinIO) that use HTTPS with certificates +signed by a private CA, set the `endpointCA` field in the `ObjectStore` +definition. Unless you already have it, create a Kubernetes `Secret` with the +CA bundle: + +```sh +kubectl create secret generic my-ca-secret --from-file=ca.crt +``` + +Then reference it: + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: minio-store +spec: + configuration: + endpointURL: + endpointCA: + name: my-ca-secret + key: ca.crt + [...] +``` + + +:::note +If you want `ConfigMaps` and `Secrets` to be **automatically** reloaded by +instances, you can add a label with the key `cnpg.io/reload` to the +`Secrets`/`ConfigMaps`. Otherwise, you will have to reload the instances using the +`kubectl cnpg reload` subcommand. +::: + +--- + +## Azure Blob Storage + +[Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/) +is Microsoft’s cloud-based object storage solution. + +Barman Cloud supports the following authentication methods: + +- [Connection String](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string) +- Storage Account Name + [Storage Account Access Key](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage) +- Storage Account Name + [Storage Account SAS Token](https://learn.microsoft.com/en-us/azure/storage/blobs/sas-service-create) +- [Azure AD Managed Identity](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview) +- [Default Azure Credentials](https://learn.microsoft.com/en-us/dotnet/api/azure.identity.defaultazurecredential?view=azure-dotnet) + +### Azure AD Managed Identity + +This method avoids storing credentials in Kubernetes by enabling the +usage of [Azure Managed Identities](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview) authentication mechanism. +This can be enabled by setting the `inheritFromAzureAD` option to `true`. +Managed Identity can be configured for the AKS Cluster by following +the [Azure documentation](https://learn.microsoft.com/en-us/azure/aks/use-managed-identity?pivots=system-assigned). + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: azure-store +spec: + configuration: + destinationPath: "" + azureCredentials: + inheritFromAzureAD: true + [...] +``` + +### Default Azure Credentials + +The `useDefaultAzureCredentials` option enables the default Azure credentials +flow, which uses [`DefaultAzureCredential`](https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity.defaultazurecredential) +to automatically discover and use available credentials in the following order: + +1. **Environment Variables** — `AZURE_CLIENT_ID`, `AZURE_CLIENT_SECRET`, and `AZURE_TENANT_ID` for Service Principal authentication +2. **Managed Identity** — Uses the managed identity assigned to the pod +3. **Azure CLI** — Uses credentials from the Azure CLI if available +4. **Azure PowerShell** — Uses credentials from Azure PowerShell if available + +This approach is particularly useful for getting started with development and testing; it allows +the SDK to attempt multiple authentication mechanisms seamlessly across different environments. +However, this is not recommended for production. Please refer to the +[official Azure guidance](https://learn.microsoft.com/en-us/dotnet/azure/sdk/authentication/credential-chains?tabs=dac#usage-guidance-for-defaultazurecredential) +for a comprehensive understanding of `DefaultAzureCredential`. + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: azure-store +spec: + configuration: + destinationPath: "" + azureCredentials: + useDefaultAzureCredentials: true + [...] +``` + +### Access Key, SAS Token, or Connection String + +Store credentials in a Kubernetes secret: + +```sh +kubectl create secret generic azure-creds \ + --from-literal=AZURE_STORAGE_ACCOUNT= \ + --from-literal=AZURE_STORAGE_KEY= \ + --from-literal=AZURE_STORAGE_SAS_TOKEN= \ + --from-literal=AZURE_STORAGE_CONNECTION_STRING= +``` + +Then reference the required keys in your `ObjectStore`: + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: azure-store +spec: + configuration: + destinationPath: "" + azureCredentials: + connectionString: + name: azure-creds + key: AZURE_CONNECTION_STRING + storageAccount: + name: azure-creds + key: AZURE_STORAGE_ACCOUNT + storageKey: + name: azure-creds + key: AZURE_STORAGE_KEY + storageSasToken: + name: azure-creds + key: AZURE_STORAGE_SAS_TOKEN + [...] +``` + +For Azure Blob, the destination path format is: + +``` +://..core.windows.net// +``` + +### Azure-Compatible Providers + +If you're using a different implementation (e.g., Azurite or emulator): + +``` +://:/// +``` + +--- + +## Google Cloud Storage + +[Google Cloud Storage](https://cloud.google.com/storage/) is supported with two +authentication modes: + +- **GKE Workload Identity** (recommended inside Google Kubernetes Engine) +- **Service Account JSON key** via the `GOOGLE_APPLICATION_CREDENTIALS` environment variable + +### GKE Workload Identity + +Use the [Workload Identity authentication](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) +when running in GKE: + +1. Set `googleCredentials.gkeEnvironment` to `true` in the `ObjectStore` + resource +2. Annotate the `serviceAccountTemplate` in the `Cluster` resource with the GCP + service account + +For example, in the `ObjectStore` resource: + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: google-store +spec: + configuration: + destinationPath: "gs:///" + googleCredentials: + gkeEnvironment: true +``` + +And in the `Cluster` resource: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +spec: + serviceAccountTemplate: + metadata: + annotations: + iam.gke.io/gcp-service-account: [...].iam.gserviceaccount.com +``` + +### Service Account JSON Key + +Follow Google’s [authentication setup](https://cloud.google.com/docs/authentication/getting-started), +then: + +```sh +kubectl create secret generic backup-creds --from-file=gcsCredentials=gcs_credentials_file.json +``` + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: google-store +spec: + configuration: + destinationPath: "gs:///" + googleCredentials: + applicationCredentials: + name: backup-creds + key: gcsCredentials + [...] +``` + +:::important +This authentication method generates a JSON file within the container +with all the credentials required to access your Google Cloud Storage +bucket. As a result, if someone gains access to the `Pod`, they will also have +write permissions to the bucket. +::: + +--- + + +## MinIO Object Store + +In order to use the Tenant resource you first need to deploy the +[MinIO operator](https://docs.min.io/community/minio-object-store/operations/deployments/installation.html). +For the latest documentation of MinIO, please refer to the +[MinIO official documentation](https://docs.min.io/community/minio-object-store/). + +MinIO Object Store's API is compatible with S3, and the default configuration of the Tenant +will create these services: +- `-console` on port 9090 (with autocert) or 9443 (without autocert) +- `-hl` on port 9000 +Where `` is the `metadata.name` you assigned to your Tenant resource. + +:::note +The `-console` service will only be available if you have enabled the +[MinIO Console](https://docs.min.io/community/minio-object-store/administration/minio-console.html). + +For example, the following Tenant: +```yml +apiVersion: minio.min.io/v2 +kind: Tenant +metadata: + name: cnpg-backups +spec: + [...] +``` +would have services called `cnpg-backups-console` and `cnpg-backups-hl` respectively. + +The `console` service is for managing the tenant, while the `hl` service exposes the S3 +compatible API. If your tenant is configured with `requestAutoCert` you will communicate +to these services over HTTPS, if not you will use HTTP. + +For authentication you can use your username and password, or create an access key. +Whichever method you choose, it has to be stored as a secret. + +```sh +kubectl create secret generic minio-creds \ + --from-literal=MINIO_ACCESS_KEY= \ + --from-literal=MINIO_SECRET_KEY= +``` + +Finally, create the Barman ObjectStore: + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: minio-store +spec: + configuration: + destinationPath: s3://BUCKET_NAME/ + endpointURL: http://-hl:9000 + s3Credentials: + accessKeyId: + name: minio-creds + key: MINIO_ACCESS_KEY + secretAccessKey: + name: minio-creds + key: MINIO_SECRET_KEY + [...] +``` + +:::important +Verify on `s3://BUCKET_NAME/` the presence of archived WAL files before +proceeding with a backup. +::: + +--- diff --git a/web/versioned_docs/version-0.11.0/observability.md b/web/versioned_docs/version-0.11.0/observability.md new file mode 100644 index 00000000..274b9737 --- /dev/null +++ b/web/versioned_docs/version-0.11.0/observability.md @@ -0,0 +1,24 @@ +--- +sidebar_position: 55 +--- + +# Observability + + + +The Barman Cloud Plugin exposes the following metrics through the native +Prometheus exporter of the instance manager: + +- `barman_cloud_cloudnative_pg_io_last_failed_backup_timestamp`: + the UNIX timestamp of the most recent failed backup. + +- `barman_cloud_cloudnative_pg_io_last_available_backup_timestamp`: + the UNIX timestamp of the most recent successfully available backup. + +- `barman_cloud_cloudnative_pg_io_first_recoverability_point`: + the UNIX timestamp representing the earliest point in time from which the + cluster can be recovered. + +These metrics supersede the previously available in-core metrics that used the +`cnpg_collector` prefix. The new metrics are exposed under the +`barman_cloud_cloudnative_pg_io` prefix instead. diff --git a/web/versioned_docs/version-0.11.0/parameters.md b/web/versioned_docs/version-0.11.0/parameters.md new file mode 100644 index 00000000..ca0cd2ba --- /dev/null +++ b/web/versioned_docs/version-0.11.0/parameters.md @@ -0,0 +1,19 @@ +--- +sidebar_position: 100 +--- + +# Parameters + + + +The following parameters are available for the Barman Cloud Plugin: + +- `barmanObjectName`: references the `ObjectStore` resource to be used by the + plugin. +- `serverName`: Specifies the server name in the object store. + +:::important +The `serverName` parameter in the `ObjectStore` resource is retained solely for +API compatibility with the in-tree `barmanObjectStore` and must always be left empty. +When needed, use the `serverName` plugin parameter in the Cluster configuration instead. +::: diff --git a/web/versioned_docs/version-0.11.0/plugin-barman-cloud.v1.md b/web/versioned_docs/version-0.11.0/plugin-barman-cloud.v1.md new file mode 100644 index 00000000..7bd607cd --- /dev/null +++ b/web/versioned_docs/version-0.11.0/plugin-barman-cloud.v1.md @@ -0,0 +1,108 @@ +# API Reference + +## Packages +- [barmancloud.cnpg.io/v1](#barmancloudcnpgiov1) + + +## barmancloud.cnpg.io/v1 + +Package v1 contains API Schema definitions for the barmancloud v1 API group + +### Resource Types +- [ObjectStore](#objectstore) + + + +#### InstanceSidecarConfiguration + + + +InstanceSidecarConfiguration defines the configuration for the sidecar that runs in the instance pods. + + + +_Appears in:_ +- [ObjectStoreSpec](#objectstorespec) + +| Field | Description | Required | Default | Validation | +| --- | --- | --- | --- | --- | +| `env` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#envvar-v1-core) array_ | The environment to be explicitly passed to the sidecar | | | | +| `retentionPolicyIntervalSeconds` _integer_ | The retentionCheckInterval defines the frequency at which the
system checks and enforces retention policies. | | 1800 | | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | Resources define cpu/memory requests and limits for the sidecar that runs in the instance pods. | | | | +| `additionalContainerArgs` _string array_ | AdditionalContainerArgs is an optional list of command-line arguments
to be passed to the sidecar container when it starts.
The provided arguments are appended to the container’s default arguments. | | | | +| `logLevel` _string_ | The log level for PostgreSQL instances. Valid values are: `error`, `warning`, `info` (default), `debug`, `trace` | | info | Enum: [error warning info debug trace]
| + + +#### ObjectStore + + + +ObjectStore is the Schema for the objectstores API. + + + + + +| Field | Description | Required | Default | Validation | +| --- | --- | --- | --- | --- | +| `apiVersion` _string_ | `barmancloud.cnpg.io/v1` | True | | | +| `kind` _string_ | `ObjectStore` | True | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | True | | | +| `spec` _[ObjectStoreSpec](#objectstorespec)_ | Specification of the desired behavior of the ObjectStore.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | True | | | +| `status` _[ObjectStoreStatus](#objectstorestatus)_ | Most recently observed status of the ObjectStore. This data may not be up to
date. Populated by the system. Read-only.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | | | | + + +#### ObjectStoreSpec + + + +ObjectStoreSpec defines the desired state of ObjectStore. + + + +_Appears in:_ +- [ObjectStore](#objectstore) + +| Field | Description | Required | Default | Validation | +| --- | --- | --- | --- | --- | +| `configuration` _[BarmanObjectStoreConfiguration](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#BarmanObjectStoreConfiguration)_ | The configuration for the barman-cloud tool suite | True | | | +| `retentionPolicy` _string_ | RetentionPolicy is the retention policy to be used for backups
and WALs (i.e. '60d'). The retention policy is expressed in the form
of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` -
days, weeks, months. | | | Pattern: `^[1-9][0-9]*[dwm]$`
| +| `instanceSidecarConfiguration` _[InstanceSidecarConfiguration](#instancesidecarconfiguration)_ | The configuration for the sidecar that runs in the instance pods | | | | + + +#### ObjectStoreStatus + + + +ObjectStoreStatus defines the observed state of ObjectStore. + + + +_Appears in:_ +- [ObjectStore](#objectstore) + +| Field | Description | Required | Default | Validation | +| --- | --- | --- | --- | --- | +| `serverRecoveryWindow` _object (keys:string, values:[RecoveryWindow](#recoverywindow))_ | ServerRecoveryWindow maps each server to its recovery window | True | | | + + +#### RecoveryWindow + + + +RecoveryWindow represents the time span between the first +recoverability point and the last successful backup of a PostgreSQL +server, defining the period during which data can be restored. + + + +_Appears in:_ +- [ObjectStoreStatus](#objectstorestatus) + +| Field | Description | Required | Default | Validation | +| --- | --- | --- | --- | --- | +| `firstRecoverabilityPoint` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#time-v1-meta)_ | The first recoverability point in a PostgreSQL server refers to
the earliest point in time to which the database can be
restored. | True | | | +| `lastSuccessfulBackupTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#time-v1-meta)_ | The last successful backup time | True | | | +| `lastFailedBackupTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#time-v1-meta)_ | The last failed backup time | True | | | + + diff --git a/web/versioned_docs/version-0.11.0/resource-name-migration.md b/web/versioned_docs/version-0.11.0/resource-name-migration.md new file mode 100644 index 00000000..f5c1cc39 --- /dev/null +++ b/web/versioned_docs/version-0.11.0/resource-name-migration.md @@ -0,0 +1,219 @@ +--- +sidebar_position: 90 +--- + +# Resource name migration guide + + + +:::warning +Before proceeding with the migration process, please: +1. **Read this guide in its entirety** to understand what changes will be made +2. **Test in a non-production environment** first if possible +3. **Ensure you have proper backups** of your cluster configuration + +This migration will delete old RBAC resources only after the +`plugin-barman-cloud` upgrade. While the operation is designed to be safe, you +should review and understand the changes before proceeding. The maintainers of +this project are not responsible for any issues that may arise during +migration. + +**Note:** This guide assumes you are using the default `cnpg-system` namespace. +::: + +## Overview + +Starting from version **0.8.0**, the `plugin-barman-cloud` deployment manifests +use more specific, prefixed resource names to avoid conflicts with other +components deployed in the same Kubernetes cluster. + +## What Changed + +The following resources have been renamed to use proper prefixes. + +### Cluster-scoped Resources + +| Old Name | New Name | +|----------------------------|------------------------------------------| +| `metrics-auth-role` | `barman-plugin-metrics-auth-role` | +| `metrics-auth-rolebinding` | `barman-plugin-metrics-auth-rolebinding` | +| `metrics-reader` | `barman-plugin-metrics-reader` | +| `objectstore-viewer-role` | `barman-plugin-objectstore-viewer-role` | +| `objectstore-editor-role` | `barman-plugin-objectstore-editor-role` | + +### Namespace-scoped Resources + +| Old Name | New Name | Namespace | +|-------------------------------|---------------------------------------------|---------------| +| `leader-election-role` | `barman-plugin-leader-election-role` | `cnpg-system` | +| `leader-election-rolebinding` | `barman-plugin-leader-election-rolebinding` | `cnpg-system` | + +## Why This Change? + +Using generic names for cluster-wide resources is discouraged as they may +conflict with other components deployed in the same cluster. The new names make +it clear that these resources belong to the Barman Cloud plugin and help avoid +naming collisions. + +## Migration Instructions + +This three steps migration process is straightforward and can be completed with +a few `kubectl` commands. + +### Step 1: Upgrade plugin-barman-cloud + +Please refer to the [Installation](installation.mdx) section to deploy the new +`plugin-barman-cloud` release. + +### Step 2: Delete Old Cluster-scoped Resources + +:::danger Verify Resources Before Deletion +**IMPORTANT**: The old resource names are generic and could potentially belong +to other components in your cluster. + +**Before deleting each resource, verify it belongs to the Barman Cloud plugin +by checking:** +- For `objectstore-*` roles: Look for `barmancloud.cnpg.io` in the API groups +- For `metrics-*` roles: Check if they reference the `plugin-barman-cloud` + ServiceAccount in `cnpg-system` namespace +- For other roles: Look for labels like `app.kubernetes.io/name: plugin-barman-cloud` + +If a resource doesn't have these indicators, **DO NOT DELETE IT** as it may +belong to another application. + +Carefully review the output of each verification command before proceeding with +the `delete`. +::: + +:::tip Dry Run First +You can add `--dry-run=client` to any `kubectl delete` command to preview what +would be deleted without actually removing anything. +::: + +**Only proceed if you've verified these resources belong to the Barman Cloud +plugin (see warning above).** + +For each resource below, first verify it belongs to Barman Cloud, then delete +it: + +```bash +# 1. Check metrics-auth-rolebinding FIRST (we'll check the role after) +# Look for references to plugin-barman-cloud ServiceAccount +kubectl describe clusterrolebinding metrics-auth-rolebinding +# If it references plugin-barman-cloud ServiceAccount in cnpg-system namespace, +# delete it: +kubectl delete clusterrolebinding metrics-auth-rolebinding + +# 2. Check metrics-auth-role +# Look for references to authentication.k8s.io and authorization.k8s.io +kubectl describe clusterrole metrics-auth-role +# Verify it's not being used by any other rolebindings: +kubectl get clusterrolebinding -o json \ + | jq -r '.items[] | select(.roleRef.name=="metrics-auth-role") \ + | .metadata.name' +# If the above returns nothing (role is not in use) and the role looks like the +# Barman Cloud one, delete it (see warnings section): +kubectl delete clusterrole metrics-auth-role + +# 3. Check objectstore-viewer-role +# Look for barmancloud.cnpg.io API group or +# for `app.kubernetes.io/name: plugin-barman-cloud` label +kubectl describe clusterrole objectstore-viewer-role +# If it shows barmancloud.cnpg.io in API groups, delete it: +kubectl delete clusterrole objectstore-viewer-role + +# 4. Check objectstore-editor-role +# Look for barmancloud.cnpg.io API group or +# for `app.kubernetes.io/name: plugin-barman-cloud` label +kubectl describe clusterrole objectstore-editor-role +# If it shows barmancloud.cnpg.io in API groups, delete it: +kubectl delete clusterrole objectstore-editor-role + +# 5. Check metrics-reader (MOST DANGEROUS - very generic name) +# First, check if it's being used by any rolebindings OTHER than barman's: +kubectl get clusterrolebinding -o json | jq -r '.items[] \ + | select(.roleRef.name=="metrics-reader") \ + | "\(.metadata.name) -> \(.subjects[0].name) in \(.subjects[0].namespace)"' +# If this shows ANY rolebindings, review them carefully. Only proceed if +# they're all Barman-related. Then check the role itself: +kubectl describe clusterrole metrics-reader +# If it ONLY has nonResourceURLs: /metrics and NO other rolebindings use it, +# delete it: +kubectl delete clusterrole metrics-reader +``` + +:::warning +The `metrics-reader` role is particularly dangerous to delete blindly. Many +monitoring systems use this exact name. Only delete it if: + +1. You've verified it ONLY grants access to `/metrics` +2. No other rolebindings reference it (checked with the jq command above) +3. You're certain it was created by the Barman Cloud plugin + +If you're unsure, it's safer to leave it and let the new +`barman-plugin-metrics-reader` role coexist with it. +::: + +If any resource is not found during the `describe` command, that's okay - it +means it was never created or already deleted. Simply skip the delete command +for that resource. + +### Step 3: Delete Old Namespace-scoped Resources + +Delete the old namespace-scoped resources in the `cnpg-system` namespace: + +```bash +# Delete the old leader-election resources +kubectl delete role leader-election-role -n cnpg-system +kubectl delete rolebinding leader-election-rolebinding -n cnpg-system +``` + +If any resource is not found, that's okay - it means it was never created or +already deleted. + +## Impact + +- **Permissions:** If you have custom RBAC rules or tools that reference the + old resource names, they will need to be updated. +- **External Users:** If end users have been granted the + `objectstore-viewer-role` or `objectstore-editor-role`, they will need to be + re-granted the new role names (`barman-plugin-objectstore-viewer-role` and + `barman-plugin-objectstore-editor-role`). + +## Verification + +After migration, verify that the new resources are created: + +```bash +# Check cluster-scoped resources +kubectl get clusterrole | grep barman +kubectl get clusterrolebinding | grep barman + +# Check namespace-scoped resources +kubectl get role,rolebinding -n cnpg-system | grep barman +``` + +You should see the new prefixed resource names. + +## Troubleshooting + +### Plugin Not Starting After Migration + +If the plugin fails to start after migration, check: + +1. **ServiceAccount permissions:** Ensure the `plugin-barman-cloud` ServiceAccount is bound to the new roles: + ```bash + kubectl get clusterrolebinding barman-plugin-metrics-auth-rolebinding -o yaml + kubectl get rolebinding barman-plugin-leader-election-rolebinding -n cnpg-system -o yaml + ``` + +2. **Role references:** Verify that the rolebindings reference the correct role names: + ```bash + kubectl describe rolebinding barman-plugin-leader-election-rolebinding -n cnpg-system + kubectl describe clusterrolebinding barman-plugin-metrics-auth-rolebinding + ``` + +## Support + +If you encounter issues during migration, please open an issue on the [GitHub +repository](https://github.com/cloudnative-pg/plugin-barman-cloud/issues). diff --git a/web/versioned_docs/version-0.11.0/retention.md b/web/versioned_docs/version-0.11.0/retention.md new file mode 100644 index 00000000..fefbd085 --- /dev/null +++ b/web/versioned_docs/version-0.11.0/retention.md @@ -0,0 +1,38 @@ +--- +sidebar_position: 60 +--- + +# Retention Policies + + + +The Barman Cloud Plugin supports **automated cleanup of obsolete backups** via +retention policies, configured in the `.spec.retentionPolicy` field of the +`ObjectStore` resource. + +:::note +This feature uses the `barman-cloud-backup-delete` command with the +`--retention-policy "RECOVERY WINDOW OF {{ value }} {{ unit }}"` syntax. +::: + +#### Example: 30-Day Retention Policy + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: my-store +spec: + [...] + retentionPolicy: "30d" +```` + +:::note +A **recovery window retention policy** ensures the cluster can be restored to +any point in time between the calculated *Point of Recoverability* (PoR) and +the latest WAL archive. The PoR is defined as `current time - recovery window`. +The **first valid backup** is the most recent backup completed before the PoR. +Backups older than that are marked as *obsolete* and deleted after the next +backup completes. +::: + diff --git a/web/versioned_docs/version-0.11.0/troubleshooting.md b/web/versioned_docs/version-0.11.0/troubleshooting.md new file mode 100644 index 00000000..fe57262f --- /dev/null +++ b/web/versioned_docs/version-0.11.0/troubleshooting.md @@ -0,0 +1,591 @@ +--- +sidebar_position: 90 +--- + +# Troubleshooting + + + +This guide helps you diagnose and resolve common issues with the Barman Cloud +plugin. + +:::important +We are continuously improving the integration between CloudNativePG and the +Barman Cloud plugin as it moves toward greater stability and maturity. For this +reason, we recommend using the latest available version of both components. +See the [*Requirements* section](intro.md#requirements) for details. +::: + +:::note +The following commands assume you installed the CloudNativePG operator in +the default `cnpg-system` namespace. If you installed it in a different +namespace, adjust the commands accordingly. +::: + +## Viewing Logs + +To troubleshoot effectively, you’ll often need to review logs from multiple +sources: + +```sh +# View operator logs (includes plugin interaction logs) +kubectl logs -n cnpg-system deployment/cnpg-controller-manager -f + +# View plugin manager logs +kubectl logs -n cnpg-system deployment/barman-cloud -f + +# View sidecar container logs (Barman Cloud operations) +kubectl logs -n -c plugin-barman-cloud -f + +# View all containers in a pod +kubectl logs -n --all-containers=true + +# View previous container logs (if container restarted) +kubectl logs -n -c plugin-barman-cloud --previous +``` + +## Common Issues + +### Plugin Installation Issues + +#### Plugin pods not starting + +**Symptoms:** + +- Plugin pods stuck in `CrashLoopBackOff` or `Error` +- Plugin deployment not ready + +**Possible causes and solutions:** + +1. **Certificate issues** + + ```sh + # Check if cert-manager is installed and running + kubectl get pods -n cert-manager + + # Check if the plugin certificate is created + kubectl get certificates -n cnpg-system + ``` + + If cert-manager is not installed, install it first: + + ```sh + # Note: other installation methods for cert-manager are available + kubectl apply -f \ + https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml + ``` + + If you are using your own certificates without cert-manager, you will need + to verify the entire certificate chain yourself. + + +2. **Image pull errors** + + ```sh + # Check pod events for image pull errors + kubectl describe pod -n cnpg-system -l app=barman-cloud + ``` + + Verify the image exists and you have proper credentials if using a private + registry. + + +3. **Resource constraints** + + ```sh + # Check node resources + kubectl top nodes + kubectl describe nodes + ``` + + Make sure your cluster has sufficient CPU and memory resources. + +### Backup Failures + +#### Quick Backup Troubleshooting Checklist + +When a backup fails, follow these steps in order: + +1. **Check backup status**: + + ```sh + kubectl get backups.postgresql.cnpg.io -n + ``` +2. **Get error details and target pod**: + + ```sh + kubectl describe backups.postgresql.cnpg.io \ + -n + + kubectl get backups.postgresql.cnpg.io \ + -n \ + -o jsonpath='{.status.instanceID.podName}' + ``` +3. **Check the target pod’s sidecar logs**: + + ```sh + TARGET_POD=$(kubectl get backups.postgresql.cnpg.io \ + -n \ + -o jsonpath='{.status.instanceID.podName}') + + kubectl logs \ + -n $TARGET_POD -c plugin-barman-cloud \ + --tail=100 | grep -E "ERROR|FATAL|panic" + ``` +4. **Check cluster events**: + + ```sh + kubectl get events -n \ + --field-selector involvedObject.name= \ + --sort-by='.lastTimestamp' + ``` +5. **Verify plugin is running**: + + ```sh + kubectl get pods \ + -n cnpg-system -l app=barman-cloud + ``` +6. **Check operator logs**: + + ```sh + kubectl logs \ + -n cnpg-system deployment/cnpg-controller-manager \ + --tail=100 | grep -i "backup\|plugin" + ``` +7. **Check plugin manager logs**: + + ```sh + kubectl logs \ + -n cnpg-system deployment/barman-cloud --tail=100 + ``` + +#### Backup job fails immediately + +**Symptoms:** + +- Backup pods terminate with error +- No backup files appear in object storage +- Backup shows `failed` phase with various error messages + +**Common failure modes and solutions:** + +1. **"requested plugin is not available" errors** + + ``` + requested plugin is not available: barman + requested plugin is not available: barman-cloud + requested plugin is not available: barman-cloud.cloudnative-pg.io + ``` + + **Cause:** The plugin name in the Cluster configuration doesn’t match the + deployed plugin, or the plugin isn’t registered. + + **Solution:** + + a. **Check plugin registration:** + + ```sh + # If you have the `cnpg` plugin installed (v1.27.0+) + kubectl cnpg status -n + ``` + + Look for the "Plugins status" section: + ``` + Plugins status + Name Version Status Reported Operator Capabilities + ---- ------- ------ ------------------------------ + barman-cloud.cloudnative-pg.io 0.6.0 N/A Reconciler Hooks, Lifecycle Service + ``` + + b. **Verify plugin name in `Cluster` spec**: + + ```yaml + apiVersion: postgresql.cnpg.io/v1 + kind: Cluster + spec: + plugins: + - name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: + ``` + + c. **Check plugin deployment is running**: + + ```sh + kubectl get deployment -n cnpg-system barman-cloud + ``` + +2. **"rpc error: code = Unknown desc = panic caught: assignment to entry in nil map" errors** + + **Cause:** Misconfiguration in the `ObjectStore` (e.g., typo or missing field). + + **Solution:** + + - Review sidecar logs for details + - Verify `ObjectStore` configuration and secrets + - Common issues include: + - Missing or incorrect secret references + - Typos in configuration parameters + - Missing required environment variables in secrets + +#### Backup performance issues + +**Symptoms:** + +- Backups take extremely long +- Backups timeout + +**Plugin-specific considerations:** + +1. **Check `ObjectStore` parallelism settings** + - Adjust `maxParallel` in `ObjectStore` configuration + - Monitor sidecar container resource usage during backups + +2. **Verify plugin resource allocation** + - Check if the sidecar container has sufficient CPU/memory + - Review plugin container logs for resource-related warnings + +:::tip +For Barman-specific features like compression, encryption, and performance +tuning, refer to the [Barman documentation](https://docs.pgbarman.org/latest/). +::: + +### WAL Archiving Issues + +#### WAL archiving stops + +**Symptoms:** + +- WAL files accumulate on the primary +- Cluster shows WAL archiving warnings +- Sidecar logs show WAL errors + +**Debugging steps:** + +1. **Check plugin sidecar logs for WAL archiving errors** + ```sh + # Check recent WAL archive operations in sidecar + kubectl logs -n -c plugin-barman-cloud \ + --tail=50 | grep -i wal + ``` + +2. **Check ObjectStore configuration for WAL settings** + - Ensure ObjectStore has proper WAL retention settings + - Verify credentials have permissions for WAL operations + +### Restore Issues + +#### Restore fails during recovery + +**Symptoms:** + +- New cluster stuck in recovery +- Plugin sidecar shows restore errors +- PostgreSQL won’t start + +**Debugging steps:** + +1. **Check plugin sidecar logs during restore** + + ```sh + # Check the sidecar logs on the recovering cluster pods + kubectl logs -n \ + -c plugin-barman-cloud --tail=100 + + # Look for restore-related errors + kubectl logs -n \ + -c plugin-barman-cloud | grep -E "restore|recovery|ERROR" + ``` + +2. **Verify plugin can access backups** + + ```sh + # Check if `ObjectStore` is properly configured for restore + kubectl get objectstores.barmancloud.cnpg.io \ + -n -o yaml + + # Check PostgreSQL recovery logs + kubectl logs -n \ + -c postgres | grep -i recovery + ``` + +:::tip +For detailed Barman restore operations and troubleshooting, refer to the +[Barman documentation](https://docs.pgbarman.org/latest/barman-cloud-restore.html). +::: + +#### Point-in-time recovery (PITR) configuration issues + +**Symptoms:** + +- PITR doesn’t reach target time +- WAL access errors +- Recovery halts early + +**Debugging steps:** + +1. **Verify PITR configuration in the `Cluster` spec** + + ```yaml + apiVersion: postgresql.cnpg.io/v1 + kind: Cluster + metadata: + name: + spec: + storage: + size: 1Gi + + bootstrap: + recovery: + source: origin + recoveryTarget: + targetTime: "2024-01-15T10:30:00Z" + + externalClusters: + - name: origin + plugin: + enabled: true + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: + serverName: + ``` + +2. **Check sidecar logs for WAL-related errors** + + ```sh + kubectl logs -n \ + -c plugin-barman-cloud | grep -i wal + ``` + +:::note +Timestamps without an explicit timezone suffix +(e.g., `2024-01-15 10:30:00`) are interpreted as UTC. +::: + +:::warning +Always specify an explicit timezone in your timestamp to avoid ambiguity. +For example, use `2024-01-15T10:30:00Z` or `2024-01-15T10:30:00+02:00` +instead of `2024-01-15 10:30:00`. +::: + +:::note +For detailed PITR configuration and WAL management, see the +[Barman PITR documentation](https://docs.pgbarman.org/latest/). +::: + +### Plugin Configuration Issues + +#### Plugin cannot connect to object storage + +**Symptoms:** + +- Sidecar logs show connection errors +- Backups fail with authentication or network errors +- `ObjectStore` resource reports errors + +**Solution:** + +1. **Verify `ObjectStore` CRD configuration and secrets** + + ```sh + # Check ObjectStore resource status + kubectl get objectstores.barmancloud.cnpg.io \ + -n -o yaml + + # Verify the secret exists and has correct keys for your provider + kubectl get secret -n \ + -o jsonpath='{.data}' | jq 'keys' + ``` + +2. **Check sidecar logs for connectivity issues** + ```sh + kubectl logs -n \ + -c plugin-barman-cloud | grep -E "connect|timeout|SSL|cert" + ``` + +3. **Adjust provider-specific settings (endpoint, path style, etc.)** + - See [Object Store Configuration](object_stores.md) for provider-specific settings + - Ensure `endpointURL` is set correctly for your storage provider + - Verify network policies allow egress to your storage provider + +## Diagnostic Commands + +### Using the `cnpg` plugin for `kubectl` + +The `cnpg` plugin for `kubectl` provides extended debugging capabilities. +Keep it updated: + +```sh +# Install or update the `cnpg` plugin +kubectl krew install cnpg +# Or using an alternative method: https://cloudnative-pg.io/documentation/current/kubectl-plugin/#install + +# Check plugin status (requires CNPG 1.27.0+) +kubectl cnpg status -n + +# View cluster status in detail +kubectl cnpg status -n --verbose +``` + +## Getting Help + +If problems persist: + +1. **Check the documentation** + + - [Installation Guide](installation.mdx) + - [Object Store Configuration](object_stores.md) (for provider-specific settings) + - [Usage Examples](usage.md) + + +2. **Gather diagnostic information** + + ```sh + # Create a diagnostic bundle (⚠️ sanitize these before sharing!) + kubectl get objectstores.barmancloud.cnpg.io -A -o yaml > /tmp/objectstores.yaml + kubectl get clusters.postgresql.cnpg.io -A -o yaml > /tmp/clusters.yaml + kubectl logs -n cnpg-system deployment/barman-cloud --tail=1000 > /tmp/plugin.log + ``` + + +3. **Community support** + + - CloudNativePG Slack: [#cloudnativepg-users](https://cloud-native.slack.com/messages/cloudnativepg-users) + - GitHub Issues: [plugin-barman-cloud](https://github.com/cloudnative-pg/plugin-barman-cloud/issues) + + +4. **Include when reporting** + + - CloudNativePG version + - Plugin version + - Kubernetes version + - Cloud provider and region + - Relevant configuration (⚠️ sanitize/redact sensitive information) + - Error messages and logs + - Steps to reproduce + +## Known Issues and Limitations + +### Current Known Issues + +1. **Migration compatibility**: After migrating from in-tree backup to the + plugin, the `kubectl cnpg backup` command syntax has changed + ([#353](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/353)): + + ```sh + # Old command (in-tree, no longer works after migration) + kubectl cnpg backup -n \ + --method=barmanObjectStore + + # New command (plugin-based) + kubectl cnpg backup -n \ + --method=plugin --plugin-name=barman-cloud.cloudnative-pg.io + ``` + +### Plugin Limitations + +1. **Installation method**: Currently only supports manifest and Kustomize + installation ([#351](https://github.com/cloudnative-pg/plugin-barman-cloud/issues/351) - + Helm chart requested) + +2. **Sidecar resource sharing**: The plugin sidecar container shares pod + resources with PostgreSQL + +3. **Plugin restart behavior**: Restarting the sidecar container requires + restarting the entire PostgreSQL pod + +## Recap of General Debugging Steps + +### Check Backup Status and Identify the Target Instance + +```sh +# List all backups and their status +kubectl get backups.postgresql.cnpg.io -n + +# Get detailed backup information including error messages and target instance +kubectl describe backups.postgresql.cnpg.io \ + -n + +# Extract the target pod name from a failed backup +kubectl get backups.postgresql.cnpg.io \ + -n \ + -o jsonpath='{.status.instanceID.podName}' + +# Get more details including the target pod, method, phase, and error +kubectl get backups.postgresql.cnpg.io \ + -n \ + -o jsonpath='Pod: {.status.instanceID.podName}{"\n"}Method: {.status.method}{"\n"}Phase: {.status.phase}{"\n"}Error: {.status.error}{"\n"}' + +# Check the cluster status for backup-related information +kubectl cnpg status -n --verbose +``` + +### Check Sidecar Logs on the Backup Target Pod + +```sh +# Identify which pod was the backup target (from the previous step) +TARGET_POD=$(kubectl get backups.postgresql.cnpg.io \ + -n \ + -o jsonpath='{.status.instanceID.podName}') +echo "Backup target pod: $TARGET_POD" + +# Check the sidecar logs on the specific target pod +kubectl logs -n $TARGET_POD \ + -c plugin-barman-cloud --tail=100 + +# Follow the logs in real time +kubectl logs -n $TARGET_POD \ + -c plugin-barman-cloud -f + +# Check for specific errors in the target pod around the backup time +kubectl logs -n $TARGET_POD \ + -c plugin-barman-cloud --since=10m | grep -E "ERROR|FATAL|panic|failed" + +# Alternative: List all cluster pods and their roles +kubectl get pods -n -l cnpg.io/cluster= \ + -o custom-columns=NAME:.metadata.name,ROLE:.metadata.labels.cnpg\\.io/instanceRole,INSTANCE:.metadata.labels.cnpg\\.io/instanceName + +# Check sidecar logs on ALL cluster pods (if the target is unclear) +for pod in $(kubectl get pods -n -l cnpg.io/cluster= -o name); do + echo "=== Checking $pod ===" + kubectl logs -n $pod -c plugin-barman-cloud \ + --tail=20 | grep -i error || echo "No errors found" +done +``` + +### Check Events for Backup-Related Issues + +```sh +# Check events for the cluster +kubectl get events -n \ + --field-selector involvedObject.name= + +# Check events for failed backups +kubectl get events -n \ + --field-selector involvedObject.kind=Backup + +# Get all recent events in the namespace +kubectl get events -n --sort-by='.lastTimestamp' | tail -20 +``` + +### Verify `ObjectStore` Configuration + +```sh +# Check the ObjectStore resource +kubectl get objectstores.barmancloud.cnpg.io \ + -n -o yaml + +# Verify the secret exists and has the correct keys +kubectl get secret -n -o yaml +# Alternatively +kubectl get secret -n -o jsonpath='{.data}' | jq 'keys' +``` + +### Common Error Messages and Solutions + +* **"AccessDenied" or "403 Forbidden"** — Check cloud credentials and bucket permissions. +* **"NoSuchBucket"** — Verify the bucket exists and the endpoint URL is correct. +* **"Connection timeout"** — Check network connectivity and firewall rules. +* **"SSL certificate problem"** — For self-signed certificates, verify the CA bundle configuration. + diff --git a/web/versioned_docs/version-0.11.0/upgrades.mdx b/web/versioned_docs/version-0.11.0/upgrades.mdx new file mode 100644 index 00000000..0ab9ddbe --- /dev/null +++ b/web/versioned_docs/version-0.11.0/upgrades.mdx @@ -0,0 +1,16 @@ +--- +sidebar_position: 25 +--- + +# Upgrades + + + +You can upgrade the plugin simply by installing the new version. Unless +explicitly stated below or in the release notes, no special steps are required. + +## Upgrading to version 0.8.x from previous versions + +Version **0.8.0** introduces breaking changes to resource naming. +To complete the upgrade successfully, follow the instructions in the +["Resource name migration guide"](resource-name-migration.md). diff --git a/web/versioned_docs/version-0.11.0/usage.md b/web/versioned_docs/version-0.11.0/usage.md new file mode 100644 index 00000000..bce6ae3b --- /dev/null +++ b/web/versioned_docs/version-0.11.0/usage.md @@ -0,0 +1,283 @@ +--- +sidebar_position: 30 +--- + +# Using the Barman Cloud Plugin + + + +After [installing the plugin](installation.mdx) in the same namespace as the +CloudNativePG operator, enabling your PostgreSQL cluster to use the Barman +Cloud Plugin involves just a few steps: + +- Defining the object store containing your WAL archive and base backups, using + your preferred [provider](object_stores.md) +- Instructing the Postgres cluster to use the Barman Cloud Plugin + +From that moment, you’ll be able to issue on-demand backups or define a backup +schedule, as well as rely on the object store for recovery operations. + +The rest of this page details each step, using MinIO as object store provider. + +## Defining the `ObjectStore` + +An `ObjectStore` resource must be created for each object store used in your +PostgreSQL architecture. Here's an example configuration using MinIO: + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: minio-store +spec: + configuration: + destinationPath: s3://backups/ + endpointURL: http://minio:9000 + s3Credentials: + accessKeyId: + name: minio + key: ACCESS_KEY_ID + secretAccessKey: + name: minio + key: ACCESS_SECRET_KEY + wal: + compression: gzip +``` + +The `.spec.configuration` schema follows the same format as the +[in-tree barman-cloud support](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#BarmanObjectStoreConfiguration). +Refer to [the CloudNativePG documentation](https://cloudnative-pg.io/documentation/preview/backup_barmanobjectstore/) +for additional details. + +:::important +The `serverName` parameter in the `ObjectStore` resource is retained solely for +API compatibility with the in-tree `barmanObjectStore` and must always be left empty. +When needed, use the `serverName` plugin parameter in the Cluster configuration instead. +::: + +## Configuring WAL Archiving + +Once the `ObjectStore` is defined, you can configure your PostgreSQL cluster +to archive WALs by referencing the store in the `.spec.plugins` section: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + instances: 3 + imagePullPolicy: Always + plugins: + - name: barman-cloud.cloudnative-pg.io + isWALArchiver: true + parameters: + barmanObjectName: minio-store + storage: + size: 1Gi +``` + +This configuration enables both WAL archiving and data directory backups. + +## Performing a Base Backup + +Once WAL archiving is enabled, the cluster is ready for backups. Backups can be +created either declaratively (with YAML manifests) or imperatively (with the +`cnpg` plugin). + +### Declarative approach (YAML manifest) + +Create a backup resource by applying a YAML manifest: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Backup +metadata: + name: backup-example +spec: + cluster: + name: cluster-example + method: plugin + pluginConfiguration: + name: barman-cloud.cloudnative-pg.io +``` + +### Imperative approach (using the `cnpg` plugin) + +The quickest way to trigger an on-demand backup is with the `cnpg` plugin: + +```bash +kubectl cnpg backup -n \ + --method=plugin \ + --plugin-name=barman-cloud.cloudnative-pg.io +``` + +:::note Migration from in-tree backups +If you are migrating from the in-tree backup system, note the change in syntax: + +```bash +# Old command (in-tree backup) +kubectl cnpg backup -n --method=barmanObjectStore + +# New command (plugin-based backup) +kubectl cnpg backup -n \ + --method=plugin \ + --plugin-name=barman-cloud.cloudnative-pg.io +``` +::: + +## Restoring a Cluster + +To restore a cluster from an object store, create a new `Cluster` resource that +references the store containing the backup. Below is an example configuration: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-restore +spec: + instances: 3 + imagePullPolicy: IfNotPresent + bootstrap: + recovery: + source: source + externalClusters: + - name: source + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: minio-store + serverName: cluster-example + storage: + size: 1Gi +``` + +:::important +The above configuration does **not** enable WAL archiving for the restored cluster. +::: + +To enable WAL archiving for the restored cluster, include the `.spec.plugins` +section alongside the `externalClusters.plugin` section, as shown below: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-restore +spec: + instances: 3 + imagePullPolicy: IfNotPresent + bootstrap: + recovery: + source: source + plugins: + - name: barman-cloud.cloudnative-pg.io + isWALArchiver: true + parameters: + # Backup Object Store (push, read-write) + barmanObjectName: minio-store-bis + externalClusters: + - name: source + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + # Recovery Object Store (pull, read-only) + barmanObjectName: minio-store + serverName: cluster-example + storage: + size: 1Gi +``` + +The same object store may be used for both transaction log archiving and +restoring a cluster, or you can configure separate stores for these purposes. + +## Configuring Replica Clusters + +You can set up a distributed topology by combining the previously defined +configurations with the `.spec.replica` section. Below is an example of how to +define a replica cluster: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-dc-a +spec: + instances: 3 + primaryUpdateStrategy: unsupervised + + storage: + storageClass: csi-hostpath-sc + size: 1Gi + + plugins: + - name: barman-cloud.cloudnative-pg.io + isWALArchiver: true + parameters: + barmanObjectName: minio-store-a + + replica: + self: cluster-dc-a + primary: cluster-dc-a + source: cluster-dc-b + + externalClusters: + - name: cluster-dc-a + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: minio-store-a + + - name: cluster-dc-b + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: minio-store-b +``` + +## Configuring the plugin instance sidecar + +The Barman Cloud Plugin runs as a sidecar container next to each PostgreSQL +instance pod. It manages backup, WAL archiving, and restore processes. + +Configuration comes from multiple `ObjectStore` resources: + +1. The one referenced in the + `.spec.plugins` section of the `Cluster`. This is the + object store used for WAL archiving and base backups. +2. The one referenced in the external cluster + used in the `.spec.replica.source` section of the `Cluster`. This is + used by the log-shipping designated primary to get the WAL files. +3. The one referenced in the + `.spec.bootstrap.recovery.source` section of the `Cluster`. Used by + the initial recovery job to create the cluster from an existing backup. + +You can fine-tune sidecar behavior in the `.spec.instanceSidecarConfiguration` +of your ObjectStore. These settings apply to all PostgreSQL instances that use +this object store. Any updates take effect at the next `Cluster` reconciliation, +and could generate a rollout of the `Cluster`. + +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: minio-store +spec: + configuration: + # [...] + instanceSidecarConfiguration: + retentionPolicyIntervalSeconds: 1800 + resources: + requests: + memory: "XXX" + cpu: "YYY" + limits: + memory: "XXX" + cpu: "YYY" +``` + +:::note +If more than one `ObjectStore` applies, the `instanceSidecarConfiguration` of +the one set in `.spec.plugins` has priority. +::: diff --git a/web/versioned_docs/version-0.4.0/object_stores.md b/web/versioned_docs/version-0.4.0/object_stores.md index 9ca5a2a9..29e7b350 100644 --- a/web/versioned_docs/version-0.4.0/object_stores.md +++ b/web/versioned_docs/version-0.4.0/object_stores.md @@ -103,7 +103,7 @@ spec: ### S3 Lifecycle Policy -Barman Cloud uploads backup files to S3 but does not modify or delete them afterward. +Barman Cloud uploads backup files to S3 but does not modify them afterward. To enhance data durability and protect against accidental or malicious loss, it's recommended to implement the following best practices: diff --git a/web/versioned_docs/version-0.4.1/object_stores.md b/web/versioned_docs/version-0.4.1/object_stores.md index 9ca5a2a9..29e7b350 100644 --- a/web/versioned_docs/version-0.4.1/object_stores.md +++ b/web/versioned_docs/version-0.4.1/object_stores.md @@ -103,7 +103,7 @@ spec: ### S3 Lifecycle Policy -Barman Cloud uploads backup files to S3 but does not modify or delete them afterward. +Barman Cloud uploads backup files to S3 but does not modify them afterward. To enhance data durability and protect against accidental or malicious loss, it's recommended to implement the following best practices: diff --git a/web/versioned_docs/version-0.5.0/object_stores.md b/web/versioned_docs/version-0.5.0/object_stores.md index 9ca5a2a9..29e7b350 100644 --- a/web/versioned_docs/version-0.5.0/object_stores.md +++ b/web/versioned_docs/version-0.5.0/object_stores.md @@ -103,7 +103,7 @@ spec: ### S3 Lifecycle Policy -Barman Cloud uploads backup files to S3 but does not modify or delete them afterward. +Barman Cloud uploads backup files to S3 but does not modify them afterward. To enhance data durability and protect against accidental or malicious loss, it's recommended to implement the following best practices: diff --git a/web/versioned_docs/version-0.6.0/object_stores.md b/web/versioned_docs/version-0.6.0/object_stores.md index c3179ad7..b9d48248 100644 --- a/web/versioned_docs/version-0.6.0/object_stores.md +++ b/web/versioned_docs/version-0.6.0/object_stores.md @@ -103,7 +103,7 @@ spec: ### S3 Lifecycle Policy -Barman Cloud uploads backup files to S3 but does not modify or delete them afterward. +Barman Cloud uploads backup files to S3 but does not modify them afterward. To enhance data durability and protect against accidental or malicious loss, it's recommended to implement the following best practices: diff --git a/web/versioned_docs/version-0.7.0/object_stores.md b/web/versioned_docs/version-0.7.0/object_stores.md index f1714c93..74e0473c 100644 --- a/web/versioned_docs/version-0.7.0/object_stores.md +++ b/web/versioned_docs/version-0.7.0/object_stores.md @@ -103,7 +103,7 @@ spec: ### S3 Lifecycle Policy -Barman Cloud uploads backup files to S3 but does not modify or delete them afterward. +Barman Cloud uploads backup files to S3 but does not modify them afterward. To enhance data durability and protect against accidental or malicious loss, it's recommended to implement the following best practices: diff --git a/web/versioned_docs/version-0.7.0/troubleshooting.md b/web/versioned_docs/version-0.7.0/troubleshooting.md index 6ab9f371..0d9852cb 100644 --- a/web/versioned_docs/version-0.7.0/troubleshooting.md +++ b/web/versioned_docs/version-0.7.0/troubleshooting.md @@ -395,7 +395,7 @@ For detailed PITR configuration and WAL management, see the 3. **Adjust provider-specific settings (endpoint, path style, etc.)** - See [Object Store Configuration](object_stores.md) for provider-specific settings - - Ensure `endpointURL` and `s3UsePathStyle` match your storage type + - Ensure `endpointURL` match your storage type - Verify network policies allow egress to your storage provider ## Diagnostic Commands diff --git a/web/versioned_docs/version-0.8.0/object_stores.md b/web/versioned_docs/version-0.8.0/object_stores.md index f1714c93..74e0473c 100644 --- a/web/versioned_docs/version-0.8.0/object_stores.md +++ b/web/versioned_docs/version-0.8.0/object_stores.md @@ -103,7 +103,7 @@ spec: ### S3 Lifecycle Policy -Barman Cloud uploads backup files to S3 but does not modify or delete them afterward. +Barman Cloud uploads backup files to S3 but does not modify them afterward. To enhance data durability and protect against accidental or malicious loss, it's recommended to implement the following best practices: diff --git a/web/versioned_docs/version-0.8.0/troubleshooting.md b/web/versioned_docs/version-0.8.0/troubleshooting.md index 6ab9f371..0d9852cb 100644 --- a/web/versioned_docs/version-0.8.0/troubleshooting.md +++ b/web/versioned_docs/version-0.8.0/troubleshooting.md @@ -395,7 +395,7 @@ For detailed PITR configuration and WAL management, see the 3. **Adjust provider-specific settings (endpoint, path style, etc.)** - See [Object Store Configuration](object_stores.md) for provider-specific settings - - Ensure `endpointURL` and `s3UsePathStyle` match your storage type + - Ensure `endpointURL` match your storage type - Verify network policies allow egress to your storage provider ## Diagnostic Commands diff --git a/web/versioned_docs/version-0.9.0/object_stores.md b/web/versioned_docs/version-0.9.0/object_stores.md index f1714c93..74e0473c 100644 --- a/web/versioned_docs/version-0.9.0/object_stores.md +++ b/web/versioned_docs/version-0.9.0/object_stores.md @@ -103,7 +103,7 @@ spec: ### S3 Lifecycle Policy -Barman Cloud uploads backup files to S3 but does not modify or delete them afterward. +Barman Cloud uploads backup files to S3 but does not modify them afterward. To enhance data durability and protect against accidental or malicious loss, it's recommended to implement the following best practices: diff --git a/web/versioned_docs/version-0.9.0/troubleshooting.md b/web/versioned_docs/version-0.9.0/troubleshooting.md index 6ab9f371..0d9852cb 100644 --- a/web/versioned_docs/version-0.9.0/troubleshooting.md +++ b/web/versioned_docs/version-0.9.0/troubleshooting.md @@ -395,7 +395,7 @@ For detailed PITR configuration and WAL management, see the 3. **Adjust provider-specific settings (endpoint, path style, etc.)** - See [Object Store Configuration](object_stores.md) for provider-specific settings - - Ensure `endpointURL` and `s3UsePathStyle` match your storage type + - Ensure `endpointURL` match your storage type - Verify network policies allow egress to your storage provider ## Diagnostic Commands diff --git a/web/versioned_sidebars/version-0.11.0-sidebars.json b/web/versioned_sidebars/version-0.11.0-sidebars.json new file mode 100644 index 00000000..1fd014a2 --- /dev/null +++ b/web/versioned_sidebars/version-0.11.0-sidebars.json @@ -0,0 +1,8 @@ +{ + "docs": [ + { + "type": "autogenerated", + "dirName": "." + } + ] +} diff --git a/web/versions.json b/web/versions.json index a2a04dd4..3f627675 100644 --- a/web/versions.json +++ b/web/versions.json @@ -1,4 +1,5 @@ [ + "0.11.0", "0.10.0", "0.9.0", "0.8.0",