Forward Cluster logs using Openshift Logging
Steps required to stream Openshift logs to external log management tools
documentation used as reference
Prerequisites
- Openshift Logging Operator
- Create Service Account
- Cluster Roles
kind: Subscription
metadata:
labels:
operators.coreos.com/cluster-logging.openshift-logging: ""
name: cluster-logging
namespace: openshift-logging
spec:
channel: stable-6.2
installPlanApproval: Automatic
name: cluster-logging
source: redhat-operators
sourceNamespace: openshift-marketplace
startingCSV: cluster-logging.v6.2.0
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: logging-collector
namespace: openshift-logging
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: logging-collector:write-logs
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: logging-collector-logs-writer
subjects:
- kind: ServiceAccount
name: logging-collector
namespace: openshift-logging
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: logging-collector:collect-application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: collect-application-logs
subjects:
- kind: ServiceAccount
name: logging-collector
namespace: openshift-logging
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: logging-collector:collect-infrastructure
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: collect-infrastructure-logs
subjects:
- kind: ServiceAccount
name: logging-collector
namespace: openshift-logging
Option 1: Foward Cluster Logs to Loki using Minio S3 Storage
- Loki prerequisites
# Deploys a new MinIO Pod into the metadata.namespace Kubernetes namespace
#
# The `spec.containers[0].args` contains the command run on the pod
# The `/data` directory corresponds to the `spec.containers[0].volumeMounts[0].mountPath`
# That mount path corresponds to a Kubernetes HostPath which binds `/data` to a local drive or volume on the worker node where the pod runs
#
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: openshift-logging
name: minio
labels:
app: minio
spec:
replicas: 1
selector:
matchLabels:
app: minio
template:
metadata:
labels:
app: minio
spec:
containers:
- name: minio
image: quay.io/minio/minio:latest
command:
- /bin/bash
- -c
args:
- minio server /data --console-address :9090
volumeMounts:
- mountPath: /mnt
name: localvolume # Corresponds to the `spec.volumes` Persistent Volume
nodeSelector:
kubernetes.io/hostname: # Specify Worker Node on which you want to deploy the pod.
volumes:
- name: localvolume
hostPath: # MinIO generally recommends using locally-attached volumes
path: /tmp # Specify a path to a local drive or volume on the Kubernetes worker node
type: DirectoryOrCreate # The path to the last directory must exist
---
apiVersion: v1
kind: Service
metadata:
namespace: openshift-logging
name: minio
labels:
app: minio
spec:
type: ClusterIP # Or NodePort/LoadBalancer if external access needed
ports:
- name: minio
port: 9000
targetPort: 9000
- name: console
port: 9090
targetPort: 9090
selector:
app: minio
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
labels:
# operators.coreos.com/loki-operator.openshift-operators-redhat: ""
name: loki-operator
# namespace: openshift-operators-redhat
namespace: openshift-operators
spec:
channel: stable-6.2
installPlanApproval: Automatic
name: loki-operator
source: redhat-operators
sourceNamespace: openshift-marketplace
startingCSV: loki-operator.v6.2.0
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
labels:
# operators.coreos.com/cluster-observability-operator.openshift-cluster-observability: ""
name: cluster-observability-operator
# namespace: openshift-cluster-observability-operator
namespace: openshift-operators
spec:
channel: stable
installPlanApproval: Automatic
name: cluster-observability-operator
source: redhat-operators
sourceNamespace: openshift-marketplace
startingCSV: cluster-observability-operator.v1.0.0
- Loki forwarder
apiVersion: observability.openshift.io/v1alpha1
kind: UIPlugin
metadata:
name: logging
spec:
logging:
lokiStack:
name: logging-loki
type: Logging
---
apiVersion: observability.openshift.io/v1
kind: ClusterLogForwarder
metadata:
name: forwarder
namespace: openshift-logging
spec:
serviceAccount:
name: logging-collector
filters:
- name: test
type: drop
drop:
- test:
- field: .kubernetes.namespace_name
notMatches: "test"
outputs:
- name: default-lokistack
type: lokiStack
lokiStack:
authentication:
token:
from: serviceAccount
target:
name: logging-loki
namespace: openshift-logging
tls:
ca:
key: service-ca.crt
configMapName: openshift-service-ca.crt
pipelines:
- name: default-logstore
inputRefs:
- application
# - infrastructure
outputRefs:
- default-lokistack
filterRefs:
- test
Option 2: Forward Cluster logs to AWS Cloudwatch
- Create Cloudwatch AWS access keys
#!/usr/bin/sh
# https://access.redhat.com/articles/7030468
export ROSA_CLUSTER_NAME=$(oc get infrastructure cluster -o jsonpath='{.status.infrastructureName}' | sed 's/-[a-z0-9]\{5\}$//')
# export ROSA_CLUSTER_ID=$(rosa describe cluster -c ${ROSA_CLUSTER_NAME} --output json | jq -r .id)
export ROSA_CLUSTER_ID=$(oc get clusterversion version -o jsonpath='{.spec.clusterID}{"\n"}')
# export REGION=$(rosa describe cluster -c ${ROSA_CLUSTER_NAME} --output json | jq -r .region.id)
export REGION=$(oc get infrastructure cluster -o jsonpath='{.status.platformStatus.aws.region}{"\n"}')
export AWS_ACCOUNT_ID=`aws sts get-caller-identity --query Account --output text`
export AWS_PAGER=""
export SCRATCH="/tmp/${ROSA_CLUSTER_NAME}/clf-cloudwatch-vector"
mkdir -p ${SCRATCH}
echo "Cluster ID: ${ROSA_CLUSTER_ID}, Region: ${REGION}, AWS Account ID: ${AWS_ACCOUNT_ID}"
POLICY_ARN=$(aws iam list-policies --query "Policies[?PolicyName=='RosaCloudWatch'].{ARN:Arn}" --output text)
if [[ -z "${POLICY_ARN}" ]]; then
cat << EOF > ${SCRATCH}/policy.json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:PutRetentionPolicy"
],
"Resource": "arn:aws:logs:*:*:*"
}
]
}
EOF
POLICY_ARN=$(aws iam create-policy --policy-name "RosaCloudWatch" \
--policy-document file:///${SCRATCH}/policy.json --query Policy.Arn --output text)
fi
echo ${POLICY_ARN}
aws iam create-user \
--user-name $ROSA_CLUSTER_NAME-cloud-watch \
> $SCRATCH/aws-user.json
aws iam create-access-key \
--user-name $ROSA_CLUSTER_NAME-cloud-watch \
> $SCRATCH/aws-access-key.json
aws iam attach-user-policy \
--user-name $ROSA_CLUSTER_NAME-cloud-watch \
--policy-arn ${POLICY_ARN}
echo "AccessKeyId: `cat $SCRATCH/aws-access-key.json | jq -r '.AccessKey.AccessKeyId'`"
echo "SecretAccessKey: `cat $SCRATCH/aws-access-key.json | jq -r '.AccessKey.SecretAccessKey'`"
AWS_ID=`cat $SCRATCH/aws-access-key.json | jq -r '.AccessKey.AccessKeyId'`
AWS_KEY=`cat $SCRATCH/aws-access-key.json | jq -r '.AccessKey.SecretAccessKey'`
minijinja-cli --env cw-forwarder.yaml | oc apply -f -
- CW Forwarder
apiVersion: v1
kind: Secret
metadata:
name: cloudwatch-credentials
namespace: openshift-logging
stringData:
aws_access_key_id:
aws_secret_access_key:
---
apiVersion: observability.openshift.io/v1
kind: ClusterLogForwarder
metadata:
name: logging
namespace: openshift-logging
spec:
managementState: Managed
outputs:
- cloudwatch:
authentication:
awsAccessKey:
keyId:
key: aws_access_key_id
secretName: cloudwatch-credentials
keySecret:
secretName: cloudwatch-credentials
key: aws_secret_access_key
type: awsAccessKey
groupName: two-preprod
region: eu-central-1
groupPrefix: rosa-
type: cloudwatch
name: output-two
- cloudwatch:
authentication:
awsAccessKey:
keyId:
key: aws_access_key_id
secretName: cloudwatch-credentials
keySecret:
secretName: cloudwatch-credentials
key: aws_secret_access_key
type: awsAccessKey
groupName: one-preprod
region: eu-central-1
groupPrefix: rosa-
type: cloudwatch
name: output-one
pipelines:
- name: integration-layer
inputRefs:
- application
# - infrastructure
outputRefs:
- output-one
filterRefs:
- filter-one
- name: mas
inputRefs:
- application
# - infrastructure
filterRefs:
- filter-two
outputRefs:
- output-two
serviceAccount:
name: logging-collector
filters:
- name: filter-two
type: drop
drop:
- test:
- field: .kubernetes.namespace_name
notMatches: "test1"
- field: .kubernetes.namespace_name
notMatches: "test2"
- field: .kubernetes.namespace_name
notMatches: "test3"
- field: .kubernetes.namespace_name
notMatches: "test4"
- name: filter-one
type: drop
drop:
- test:
- field: .kubernetes.namespace_name
notMatches: "test5"