Note_Tech

All technological notes.


Project maintained by simonangel-fong Hosted on GitHub Pages — Theme by mattgraham

Kubernetes - Network Policy

Back


Network Policy



Declarative Manifest



spec:
  egress:
    - to:
      # the range 32000 and 32768
      ports:
        - protocol: TCP
          port: 32000
          endPort: 32768

spec:
  egress:
    - to:
        - namespaceSelector:
            matchExpressions:
              - key: namespace
                operator: In
                values: ["frontend", "backend"] # multiple ns


spec:
  ingress:
    - from:
        - ipBlock:
        - namespaceSelector:
        - podSelector:
spec:
  ingress:
    - from:
        - ipBlock:
          namespaceSelector:
          podSelector:

# limit mysql access
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: db-policy
  namespace: default
spec:
  podSelector:
    matchLabels:
      role: db
  policyTypes:
    - Ingress
  # ingress rule
  ingress:
    # from/to
    - from:
        - podSelector:
            matchLabels:
              name: api-pod
      # port
      ports:
        - protocal: TCP
          port: 3306

apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: db-policy
spec:
  # associate with selected pods
  podSelector:
    matchLabels:
      role: db
    # define type of policy
    policyTypes:
      - Ingress
      - Egress
    # rules
    ingress:
      - from:
          # only allow request from api-pod
          - podSelector:
              matchLabel:
                name: api-pod
            # alllow request from a specific ns
            namespaceSelector:
              matchLabel:
                kubernetes.io/metadata.name: prod
          # allow pod with an ip
          - ipBlock:
            cidr: 192.168.5.10/32
        ports:
          - protocal: TCP
            port: 3306
    egress:
    - to:
        # allow request from db to a specific ip pod
        - ipBlock:
            cidr: 192.168.5.10/32
        ports:
        - protocal: TCP
          port: 80

Note: ingress rule has 2 selectors in the above case:

  1. api-pod within the prod ns
  2. the pod with a given IP

Imperative Commands

Command Description
kubectl explain networkpolicy Display documentation for the NetworkPolicy API.
kubectl get networkpolicy/kubectl get netpol List all NetworkPolicies in the current namespace.
kubectl get networkpolicy -A List all NetworkPolicies across all namespaces.
kubectl describe networkpolicy <name> Show details of a specific NetworkPolicy.
kubectl create networkpolicy <name> --pod-selector=<k=v> --policy-types=Ingress,Egress Create a minimal NetworkPolicy allowing no ingress/egress except explicitly added rules.
kubectl delete networkpolicy <name> Delete a specific NetworkPolicy.
kubectl edit networkpolicy <name> Edit an existing NetworkPolicy in-place.

Default policies

Default deny all ingress traffic

apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: default-deny-ingress
spec:
  podSelector: {} # all pods
  policyTypes:
    - Ingress

Default deny all egress traffic

apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: default-deny-egress
spec:
  podSelector: {} # all pods
  policyTypes:
    - Egress

Allow all ingress traffic

apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: allow-all-ingress
spec:
  podSelector: {} # all pods
  policyTypes:
    - Ingress
  ingress:
    - {} # allow all sources and all ports

Allow all egress traffic

apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: allow-all-egress
spec:
  podSelector: {} # pod
  policyTypes:
    - Egress
  egress:
    - {} # allow all sources and all ports

Default deny all ingress and all egress traffic

apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: default-deny-all
spec:
  podSelector: {}
  policyTypes:
    - Ingress
    - Egress

Lab: Control DB connection

Create App

# create db ns
kubectl create ns db
# namespace/db created

# create backend ns
kubectl create ns backend
# namespace/backend created

# confirm
kubectl get ns
# NAME                   STATUS   AGE
# backend                Active   3m39s
# db                     Active   3m48s

# create db
kubectl create deploy mongo -n db --image=mongo --port=27017
# deployment.apps/mongo created

kubectl get deploy mongo -n db
# NAME    READY   UP-TO-DATE   AVAILABLE   AGE
# mongo   1/1     1            1           61s

kubectl get pod -n db -L app
# NAME                     READY   STATUS    RESTARTS   AGE   APP
# mongo-689485f9f7-2fhw7   1/1     Running   0          78s   mongo

kubectl expose deploy mongo -n db --port=27017 --target-port=27017
# service/mongo exposed

kubectl get svc -n db -o wide
# NAME    TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)     AGE   SELECTOR
# mongo   ClusterIP   10.108.202.161   <none>        27017/TCP   6s    app=mongo

kubectl describe svc mongo -n db
# Name:                     mongo
# Namespace:                db
# Labels:                   app=mongo
# Annotations:              <none>
# Selector:                 app=mongo
# Type:                     ClusterIP
# IP Family Policy:         SingleStack
# IP Families:              IPv4
# IP:                       10.108.202.161
# IPs:                      10.108.202.161
# Port:                     <unset>  27017/TCP
# TargetPort:               27017/TCP
# Endpoints:                10.244.2.70:27017
# Session Affinity:         None
# Internal Traffic Policy:  Cluster
# Events:                   <none>

# test from backend
kubectl run test-backend -n backend --image=mongo --labels=role=backend -it --rm -- mongosh --host mongo.db --port 27017 --eval "db.runCommand({ ping: 1 })"
# { ok: 1 }

# test from default
kubectl run test-default -n default --image=mongo --labels=role=random -it --rm -- mongosh --host mongo.db --port 27017 --eval "db.runCommand({ ping: 1 })"
# { ok: 1 }

Create Default Network Policy

# db-deny-all-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: db-deny-all-ingress
  namespace: db
spec:
  podSelector:
    matchLabels:
      app: mongo
  policyTypes:
    - Ingress
kubectl apply -f db-deny-all-ingress.yaml
# networkpolicy.networking.k8s.io/db-deny-all-ingress created

kubectl get networkpolicy -n db
# NAME                  POD-SELECTOR   AGE
# db-deny-all-ingress   app=mongo      3m10s

kubectl describe networkpolicy db-deny-all-ingress -n db
# Name:         db-deny-all-ingress
# Namespace:    db
# Created on:   2026-01-07 16:16:54 -0500 EST
# Labels:       <none>
# Annotations:  <none>
# Spec:
#   PodSelector:     app=mongo
#   Allowing ingress traffic:
#     <none> (Selected pods are isolated for ingress connectivity)
#   Not affecting egress traffic
#   Policy Types: Ingress

# test backend
kubectl run test-backend -n backend --image=mongo -it --rm -- mongosh --host mongo.db --port 27017 --eval "db.runCommand({ ping: 1 })"
# { ok: 1 }

Create

# allow-backend.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: allow-backend
  namespace: db
spec:
  podSelector:
    matchLabels:
      app: mongo
  policyTypes:
    - Ingress
  ingress:
    - from:
        - namespaceSelector:
            matchLabels:
              kubernetes.io/metadata.name: backend
          podSelector: {}
      ports:
        - protocol: TCP
          port: 27017
kubectl apply -f allow-backend.yaml
# networkpolicy.networking.k8s.io/allow-backend created

# test backend
kubectl run test-backend -n backend --image=mongo -it --rm -- mongosh --host mongo.db --port 27017 --eval "db.runCommand({ ping: 1 })"
# { ok: 1 }

# test default
kubectl run test-default -n default --image=mongo -it --rm -- mongosh --host mongo.db --port 27017 --eval "db.runCommand({ ping: 1 })"
# { ok: 1 }

# Install the Tigera Operator and custom resource definitions.
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.31.3/manifests/tigera-operator.yaml
# sercontent.com/projectcalico/calico/v3.31.3/manifests/tigera-operator.yaml
# namespace/tigera-operator created
# serviceaccount/tigera-operator created
# clusterrole.rbac.authorization.k8s.io/tigera-operator-secrets created
# clusterrole.rbac.authorization.k8s.io/tigera-operator created
# clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created
# rolebinding.rbac.authorization.k8s.io/tigera-operator-secrets created
# deployment.apps/tigera-operator created

# Install Calico by creating the necessary custom resources.
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.31.3/manifests/custom-resources.yaml
# sercontent.com/projectcalico/calico/v3.31.3/manifests/custom-resources.yaml
# installation.operator.tigera.io/default created
# apiserver.operator.tigera.io/default created
# goldmane.operator.tigera.io/default created
# whisker.operator.tigera.io/default created


# confirm
watch kubectl get tigerastatus



kubectl delete ns db --force --grace-period=0
# Source - https://stackoverflow.com/a
# Posted by teoincontatto
# Retrieved 2026-01-07, License - CC BY-SA 4.0

kubectl get namespace "db" -o json \
  | tr -d "\n" | sed "s/\"finalizers\": \[[^]]\+\]/\"finalizers\": []/" \
  | kubectl replace --raw /api/v1/namespaces/db/finalize -f -