Compare commits

...

15 Commits

Author SHA1 Message Date
Kirill Sobakin
315d118c17 Fix: protobuf version specify 2026-03-17 00:04:20 +03:00
Kirill Sobakin
fee4746b57 Fix: fix from copilot 2026-03-16 23:54:56 +03:00
Kirill Sobakin
689a9ea755 Fix: strict and sys 1 2026-03-16 23:40:46 +03:00
Kirill Sobakin
1a6e1cf718 Feat: add links 2026-03-16 23:11:45 +03:00
Kirill Sobakin
98fb1ca2df Ref: no docker 2026-03-16 23:01:36 +03:00
Kirill Sobakin
2868f4576e Ref: no docker 2026-03-16 22:55:17 +03:00
Kirill Sobakin
76f8f79434 Ref: add protobuf 2026-03-16 21:29:55 +03:00
Kirill Sobakin
623b97ff1b Ref: deduplicate convert funcs, generate DAT with pyhton protobuf 2026-03-16 21:29:15 +03:00
Kirill Sobakin
6d441b4cbf Ref: some fixes 2026-03-16 20:48:16 +03:00
Kirill Sobakin
8ca8769a77 Ref: to API stat.ripe.net. Some fixes 2026-03-16 19:32:14 +03:00
GitHub Action
a06c1739a4 Update subnet 2026-03-16 15:18:38 +00:00
Kirill Sobakin
b8fe629d16 Feat: Add ASN Telegram. Fix meet hardcode #110 2026-03-16 18:12:46 +03:00
GitHub Action
ccedd6cfac Update lists 2026-03-16 15:03:51 +00:00
Kirill Sobakin
96e00053c4 Merge pull request #110 from DrRamm/google_meet_list
Feat: Добавлен список Google Meet
2026-03-16 18:03:29 +03:00
DrRamm
b7ff4adda5 Добавлен список Google Meet
На основе
https://support.google.com/a/answer/1279090?hl=en
2026-01-31 17:06:39 +03:00
25 changed files with 430 additions and 507 deletions

View File

@@ -1 +1,8 @@
blank_issues_enabled: false
blank_issues_enabled: false
contact_links:
- name: Предложить домен или сервис
url: https://github.com/itdoginfo/allow-domains/discussions/categories/general
about: Добавление доменов и сервисов производится через Discussions, а не Issues.
- name: Вопрос по использованию
url: https://t.me/itdogchat
about: С вопросами в Telegram-чат.

View File

@@ -19,19 +19,26 @@ jobs:
generate-lists:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v4.3.0
- name: Compile ruleset srs
- uses: actions/checkout@v6.0.2
- name: Cache sing-box
uses: actions/cache@v5.0.3
id: cache-singbox
with:
path: /usr/local/bin/sing-box
key: sing-box-1.12.25
- name: Install sing-box
if: steps.cache-singbox.outputs.cache-hit != 'true'
run: |
docker run --rm \
-v ${{ github.workspace }}/src:/app/src \
-v ${{ github.workspace }}/Subnets:/app/Subnets \
-v ${{ github.workspace }}/Russia:/app/Russia \
-v ${{ github.workspace }}/Ukraine:/app/Ukraine \
-v ${{ github.workspace }}/Categories:/app/Categories \
-v ${{ github.workspace }}/Services:/app/Services \
-v ${{ github.workspace }}/SRS:/app/SRS \
-v ${{ github.workspace }}/DAT:/app/DAT \
itdoginfo/compilesrs:0.1.22
wget -qO- https://github.com/SagerNet/sing-box/releases/download/v1.12.25/sing-box-1.12.25-linux-amd64.tar.gz | tar xz
sudo mv sing-box-*/sing-box /usr/local/bin/
- name: Install Python dependencies
run: pip install -r requirements.txt
- name: Generate lists
run: python3 convert.py
- name: Check Russia/inside-dnsmasq-ipset
uses: itdoginfo/dnsmasq-action@0.1

View File

@@ -14,13 +14,13 @@ permissions:
jobs:
generate-subnet:
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v4.1.7
- uses: actions/checkout@v6.0.2
- name: Generate subnets
uses: actions/setup-python@v5.1.0
uses: actions/setup-python@v6.2.0
with:
python-version: '3.10'
python-version: '3.13'
- run: |
python get-subnets.py
- name: Push subnets

3
.gitignore vendored
View File

@@ -4,4 +4,5 @@ zaboronahelp-domains.lst
SRS
JSON
DAT
geosite_data
geosite_data
proto/__pycache__

View File

@@ -1,22 +0,0 @@
FROM ghcr.io/sagernet/sing-box:v1.11.15 AS sing-box
FROM golang:1.25.5-alpine3.23 AS go-builder
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w" \
github.com/v2fly/domain-list-community@20251222003838
FROM python:3.12.12-alpine3.23
COPY --from=sing-box /usr/local/bin/sing-box /bin/sing-box
COPY --from=go-builder /go/bin/domain-list-community /bin/domain-list-community
COPY requirements.txt /app/requirements.txt
RUN pip install --no-cache-dir -r /app/requirements.txt
WORKDIR /app
COPY convert.py /app/convert.py
CMD ["python3", "convert.py"]

View File

@@ -34,6 +34,7 @@
- Tik-Tok
- Twitter
- YouTube
- Google Meet
## Страны
### Россия
@@ -232,6 +233,16 @@
</details>
<details>
<summary>Google Meet</summary>
- [Subnets](https://raw.githubusercontent.com/itdoginfo/allow-domains/refs/heads/main/Subnets/IPv4/google_meet.lst)
- [SRS](https://github.com/itdoginfo/allow-domains/releases/latest/download/google_meet.srs)
</details>
# Как найти все-все домены ресурса?
https://itdog.info/analiziruem-trafik-i-opredelyaem-domeny-kotorye-ispolzuyut-sajty-i-prilozheniya/

View File

@@ -380,6 +380,7 @@ DOMAIN-SUFFIX,habr.com
DOMAIN-SUFFIX,hackernoon.com
DOMAIN-SUFFIX,hackmd.io
DOMAIN-SUFFIX,halooglasi.com
DOMAIN-SUFFIX,hangouts.googleapis.com
DOMAIN-SUFFIX,hashicorp.com
DOMAIN-SUFFIX,haydaygame.com
DOMAIN-SUFFIX,hbomax.com
@@ -549,6 +550,9 @@ DOMAIN-SUFFIX,mediazona.ca
DOMAIN-SUFFIX,medicalnewstoday.com
DOMAIN-SUFFIX,medium.com
DOMAIN-SUFFIX,meduza.io
DOMAIN-SUFFIX,meet.google.com
DOMAIN-SUFFIX,meetings.clients6.google.com
DOMAIN-SUFFIX,meetings.googleapis.com
DOMAIN-SUFFIX,mega.nz
DOMAIN-SUFFIX,megapeer.ru
DOMAIN-SUFFIX,megapeer.vip
@@ -905,6 +909,7 @@ DOMAIN-SUFFIX,steamstat.info
DOMAIN-SUFFIX,strana.news
DOMAIN-SUFFIX,strana.today
DOMAIN-SUFFIX,strava.com
DOMAIN-SUFFIX,stream.meet.google.com
DOMAIN-SUFFIX,suggestqueries.google.com
DOMAIN-SUFFIX,supercell.com
DOMAIN-SUFFIX,supersliv.biz

View File

@@ -380,6 +380,7 @@ ipset=/habr.com/vpn_domains
ipset=/hackernoon.com/vpn_domains
ipset=/hackmd.io/vpn_domains
ipset=/halooglasi.com/vpn_domains
ipset=/hangouts.googleapis.com/vpn_domains
ipset=/hashicorp.com/vpn_domains
ipset=/haydaygame.com/vpn_domains
ipset=/hbomax.com/vpn_domains
@@ -549,6 +550,9 @@ ipset=/mediazona.ca/vpn_domains
ipset=/medicalnewstoday.com/vpn_domains
ipset=/medium.com/vpn_domains
ipset=/meduza.io/vpn_domains
ipset=/meet.google.com/vpn_domains
ipset=/meetings.clients6.google.com/vpn_domains
ipset=/meetings.googleapis.com/vpn_domains
ipset=/mega.nz/vpn_domains
ipset=/megapeer.ru/vpn_domains
ipset=/megapeer.vip/vpn_domains
@@ -905,6 +909,7 @@ ipset=/steamstat.info/vpn_domains
ipset=/strana.news/vpn_domains
ipset=/strana.today/vpn_domains
ipset=/strava.com/vpn_domains
ipset=/stream.meet.google.com/vpn_domains
ipset=/suggestqueries.google.com/vpn_domains
ipset=/supercell.com/vpn_domains
ipset=/supersliv.biz/vpn_domains

View File

@@ -380,6 +380,7 @@ nftset=/habr.com/4#inet#fw4#vpn_domains
nftset=/hackernoon.com/4#inet#fw4#vpn_domains
nftset=/hackmd.io/4#inet#fw4#vpn_domains
nftset=/halooglasi.com/4#inet#fw4#vpn_domains
nftset=/hangouts.googleapis.com/4#inet#fw4#vpn_domains
nftset=/hashicorp.com/4#inet#fw4#vpn_domains
nftset=/haydaygame.com/4#inet#fw4#vpn_domains
nftset=/hbomax.com/4#inet#fw4#vpn_domains
@@ -549,6 +550,9 @@ nftset=/mediazona.ca/4#inet#fw4#vpn_domains
nftset=/medicalnewstoday.com/4#inet#fw4#vpn_domains
nftset=/medium.com/4#inet#fw4#vpn_domains
nftset=/meduza.io/4#inet#fw4#vpn_domains
nftset=/meet.google.com/4#inet#fw4#vpn_domains
nftset=/meetings.clients6.google.com/4#inet#fw4#vpn_domains
nftset=/meetings.googleapis.com/4#inet#fw4#vpn_domains
nftset=/mega.nz/4#inet#fw4#vpn_domains
nftset=/megapeer.ru/4#inet#fw4#vpn_domains
nftset=/megapeer.vip/4#inet#fw4#vpn_domains
@@ -905,6 +909,7 @@ nftset=/steamstat.info/4#inet#fw4#vpn_domains
nftset=/strana.news/4#inet#fw4#vpn_domains
nftset=/strana.today/4#inet#fw4#vpn_domains
nftset=/strava.com/4#inet#fw4#vpn_domains
nftset=/stream.meet.google.com/4#inet#fw4#vpn_domains
nftset=/suggestqueries.google.com/4#inet#fw4#vpn_domains
nftset=/supercell.com/4#inet#fw4#vpn_domains
nftset=/supersliv.biz/4#inet#fw4#vpn_domains

View File

@@ -379,6 +379,7 @@ habr.com
hackernoon.com
hackmd.io
halooglasi.com
hangouts.googleapis.com
hashicorp.com
haydaygame.com
hbomax.com
@@ -548,6 +549,9 @@ mediazona.ca
medicalnewstoday.com
medium.com
meduza.io
meet.google.com
meetings.clients6.google.com
meetings.googleapis.com
mega.nz
megapeer.ru
megapeer.vip
@@ -904,6 +908,7 @@ steamstat.info
strana.news
strana.today
strava.com
stream.meet.google.com
suggestqueries.google.com
supercell.com
supersliv.biz

View File

@@ -380,6 +380,7 @@
/ip dns static add name=hackernoon.com type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=hackmd.io type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=halooglasi.com type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=hangouts.googleapis.com type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=hashicorp.com type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=haydaygame.com type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=hbomax.com type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
@@ -549,6 +550,9 @@
/ip dns static add name=medicalnewstoday.com type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=medium.com type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=meduza.io type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=meet.google.com type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=meetings.clients6.google.com type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=meetings.googleapis.com type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=mega.nz type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=megapeer.ru type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=megapeer.vip type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
@@ -904,6 +908,7 @@
/ip dns static add name=strana.news type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=strana.today type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=strava.com type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=stream.meet.google.com type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=suggestqueries.google.com type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=supercell.com type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost
/ip dns static add name=supersliv.biz type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost

View File

@@ -380,6 +380,7 @@ habr.com
hackernoon.com
hackmd.io
halooglasi.com
hangouts.googleapis.com
hashicorp.com
haydaygame.com
hbomax.com
@@ -549,6 +550,9 @@ mediazona.ca
medicalnewstoday.com
medium.com
meduza.io
meet.google.com
meetings.clients6.google.com
meetings.googleapis.com
mega.nz
megapeer.ru
megapeer.vip
@@ -905,6 +909,7 @@ steamstat.info
strana.news
strana.today
strava.com
stream.meet.google.com
suggestqueries.google.com
supercell.com
supersliv.biz

5
Services/google_meet.lst Normal file
View File

@@ -0,0 +1,5 @@
meetings.clients6.google.com
meetings.googleapis.com
hangouts.googleapis.com
meet.google.com
stream.meet.google.com

View File

@@ -0,0 +1,3 @@
74.125.247.128/32
74.125.250.0/24
142.250.82.0/24

View File

@@ -111,9 +111,7 @@
64.83.69.0/24
64.94.92.0/23
64.95.150.0/23
64.205.192.0/24
64.225.244.0/23
65.86.32.0/24
66.70.128.0/17
66.92.11.0/24
66.92.25.0/24
@@ -518,7 +516,6 @@
213.218.214.0/24
213.251.128.0/18
216.24.220.0/23
216.132.95.0/24
216.183.120.0/24
216.203.15.0/24
216.211.218.0/24

View File

@@ -1,9 +1,8 @@
91.108.56.0/22
91.108.4.0/22
91.108.8.0/22
91.108.16.0/22
91.108.12.0/22
149.154.160.0/20
91.105.192.0/23
91.108.20.0/22
91.108.4.0/22
91.108.8.0/21
91.108.16.0/21
91.108.56.0/22
95.161.64.0/20
149.154.160.0/20
185.76.151.0/24

View File

@@ -0,0 +1,3 @@
2001:4860:4864:4:8000::/128
2001:4860:4864:5::/64
2001:4860:4864:6::/64

View File

@@ -1,5 +1,4 @@
2001:b28:f23d::/48
2001:b28:f23f::/48
2001:67c:4e8::/48
2001:b28:f23c::/48
2001:b28:f23c::/47
2001:b28:f23f::/48
2a0a:f280::/32

View File

@@ -6,7 +6,9 @@ import re
from pathlib import Path
import json
import os
import shutil
import subprocess
import sys
rusDomainsInsideOut='Russia/inside'
rusDomainsInsideSrcSingle='src/Russia-domains-inside-single.lst'
@@ -16,70 +18,49 @@ rusDomainsOutsideSrc='src/Russia-domains-outside.lst'
rusDomainsOutsideOut='Russia/outside'
uaDomainsSrc='src/Ukraine-domains-inside.lst'
uaDomainsOut='Ukraine/inside'
DiscordSubnets = 'Subnets/IPv4/discord.lst'
MetaSubnets = 'Subnets/IPv4/meta.lst'
TwitterSubnets = 'Subnets/IPv4/twitter.lst'
TelegramSubnets = 'Subnets/IPv4/telegram.lst'
CloudflareSubnets = 'Subnets/IPv4/cloudflare.lst'
HetznerSubnets = 'Subnets/IPv4/hetzner.lst'
OVHSubnets = 'Subnets/IPv4/ovh.lst'
DigitalOceanSubnets = 'Subnets/IPv4/digitalocean.lst'
CloudfrontSubnets = 'Subnets/IPv4/cloudfront.lst'
RobloxSubnets = 'Subnets/IPv4/roblox.lst'
ExcludeServices = {"telegram.lst", "cloudflare.lst", "google_ai.lst", "google_play.lst", 'hetzner.lst', 'ovh.lst', 'digitalocean.lst', 'cloudfront.lst', 'hodca.lst', 'roblox.lst'}
SUBNET_SERVICES = [
'discord', 'meta', 'twitter', 'telegram',
'cloudflare', 'hetzner', 'ovh', 'digitalocean',
'cloudfront', 'roblox', 'google_meet',
]
ExcludeServices = {"telegram.lst", "cloudflare.lst", "google_ai.lst", "google_play.lst", 'hetzner.lst', 'ovh.lst', 'digitalocean.lst', 'cloudfront.lst', 'hodca.lst', 'roblox.lst', 'google_meet.lst'}
def collect_files(src):
files = []
for dir_path in src:
path = Path(dir_path)
if path.is_dir():
files.extend(f for f in path.glob('*') if f.name not in ExcludeServices)
elif path.is_file() and path.name not in ExcludeServices:
files.append(path)
return files
def collect_domains(src, dot_prefix=True):
domains = set()
for f in collect_files(src):
if not f.is_file():
continue
with open(f) as infile:
for line in infile:
ext = tldextract.extract(line.rstrip())
if not ext.suffix:
continue
if re.search(r'[^а\-]', ext.domain):
domains.add(ext.fqdn)
elif not ext.domain:
prefix = '.' if dot_prefix else ''
domains.add(prefix + ext.suffix)
return domains
def raw(src, out):
domains = set()
files = []
if isinstance(src, list):
for dir_path in src:
path = Path(dir_path)
if path.is_dir():
files.extend(f for f in path.glob('*') if f.name not in ExcludeServices)
elif path.is_file() and path.name not in ExcludeServices:
files.append(path)
for f in files:
if f.is_file():
with open(f) as infile:
for line in infile:
if tldextract.extract(line).suffix:
if re.search(r'[^а\-]', tldextract.extract(line).domain):
domains.add(tldextract.extract(line.rstrip()).fqdn)
if not tldextract.extract(line).domain and tldextract.extract(line).suffix:
domains.add("." + tldextract.extract(line.rstrip()).suffix)
domains = sorted(domains)
domains = sorted(collect_domains(src))
with open(f'{out}-raw.lst', 'w') as file:
for name in domains:
file.write(f'{name}\n')
def dnsmasq(src, out, remove={'google.com'}):
domains = set()
files = []
if isinstance(src, list):
for dir_path in src:
path = Path(dir_path)
if path.is_dir():
files.extend(f for f in path.glob('*') if f.name not in ExcludeServices)
elif path.is_file() and path.name not in ExcludeServices:
files.append(path)
for f in files:
if f.is_file():
with open(f) as infile:
for line in infile:
if tldextract.extract(line).suffix:
if re.search(r'[^а\-]', tldextract.extract(line).domain):
domains.add(tldextract.extract(line.rstrip()).fqdn)
if not tldextract.extract(line).domain and tldextract.extract(line).suffix:
domains.add("." + tldextract.extract(line.rstrip()).suffix)
domains = domains - remove
domains = sorted(domains)
domains = sorted(collect_domains(src) - remove)
with open(f'{out}-dnsmasq-nfset.lst', 'w') as file:
for name in domains:
@@ -90,84 +71,21 @@ def dnsmasq(src, out, remove={'google.com'}):
file.write(f'ipset=/{name}/vpn_domains\n')
def clashx(src, out, remove={'google.com'}):
domains = set()
files = []
if isinstance(src, list):
for dir_path in src:
path = Path(dir_path)
if path.is_dir():
files.extend(f for f in path.glob('*') if f.name not in ExcludeServices)
elif path.is_file() and path.name not in ExcludeServices:
files.append(path)
for f in files:
with open(f) as infile:
for line in infile:
if tldextract.extract(line).suffix:
if re.search(r'[^а\-]', tldextract.extract(line).domain):
domains.add(tldextract.extract(line.rstrip()).fqdn)
if not tldextract.extract(line).domain and tldextract.extract(line).suffix:
domains.add("." + tldextract.extract(line.rstrip()).suffix)
domains = domains - remove
domains = sorted(domains)
domains = sorted(collect_domains(src) - remove)
with open(f'{out}-clashx.lst', 'w') as file:
for name in domains:
file.write(f'DOMAIN-SUFFIX,{name}\n')
def kvas(src, out, remove={'google.com'}):
domains = set()
files = []
if isinstance(src, list):
for dir_path in src:
path = Path(dir_path)
if path.is_dir():
files.extend(f for f in path.glob('*') if f.name not in ExcludeServices)
elif path.is_file() and path.name not in ExcludeServices:
files.append(path)
for f in files:
with open(f) as infile:
for line in infile:
if tldextract.extract(line).suffix:
if re.search(r'[^а\-]', tldextract.extract(line).domain):
domains.add(tldextract.extract(line.rstrip()).fqdn)
if not tldextract.extract(line).domain and tldextract.extract(line).suffix:
domains.add(tldextract.extract(line.rstrip()).suffix)
domains = domains - remove
domains = sorted(domains)
domains = sorted(collect_domains(src, dot_prefix=False) - remove)
with open(f'{out}-kvas.lst', 'w') as file:
for name in domains:
file.write(f'{name}\n')
def mikrotik_fwd(src, out, remove={'google.com'}):
domains = set()
files = []
if isinstance(src, list):
for dir_path in src:
path = Path(dir_path)
if path.is_dir():
files.extend(f for f in path.glob('*') if f.name not in ExcludeServices)
elif path.is_file() and path.name not in ExcludeServices:
files.append(path)
for f in files:
with open(f) as infile:
for line in infile:
if tldextract.extract(line).suffix:
if re.search(r'[^а\-]', tldextract.extract(line).domain):
domains.add(tldextract.extract(line.rstrip()).fqdn)
if not tldextract.extract(line).domain and tldextract.extract(line).suffix:
domains.add("." + tldextract.extract(line.rstrip()).suffix)
domains = domains - remove
domains = sorted(domains)
domains = sorted(collect_domains(src) - remove)
with open(f'{out}-mikrotik-fwd.lst', 'w') as file:
for name in domains:
@@ -176,213 +94,56 @@ def mikrotik_fwd(src, out, remove={'google.com'}):
else:
file.write(f'/ip dns static add name={name} type=FWD address-list=allow-domains match-subdomain=yes forward-to=localhost\n')
def domains_from_file(filepath):
domains = []
try:
with open(filepath, 'r', encoding='utf-8') as file:
for line in file:
domain = line.strip()
if domain:
domains.append(domain)
except FileNotFoundError:
print(f"File not found: {filepath}")
return domains
def lines_from_file(filepath):
if not os.path.exists(filepath):
print(f"Warning: input file not found: {filepath}", file=sys.stderr)
return []
with open(filepath, 'r', encoding='utf-8') as f:
return [line.strip() for line in f if line.strip()]
def generate_srs_domains(domains, output_name):
output_directory = 'JSON'
compiled_output_directory = 'SRS'
def compile_srs(data, name, json_dir='JSON', srs_dir='SRS'):
os.makedirs(json_dir, exist_ok=True)
os.makedirs(srs_dir, exist_ok=True)
os.makedirs(output_directory, exist_ok=True)
os.makedirs(compiled_output_directory, exist_ok=True)
json_path = os.path.join(json_dir, f"{name}.json")
srs_path = os.path.join(srs_dir, f"{name}.srs")
data = {
"version": 3,
"rules": [
{"domain_suffix": domains}
]
}
json_file_path = os.path.join(output_directory, f"{output_name}.json")
srs_file_path = os.path.join(compiled_output_directory, f"{output_name}.srs")
with open(json_path, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=4)
try:
with open(json_file_path, 'w', encoding='utf-8') as json_file:
json.dump(data, json_file, indent=4)
print(f"JSON file generated: {json_file_path}")
subprocess.run(
["sing-box", "rule-set", "compile", json_file_path, "-o", srs_file_path], check=True
["sing-box", "rule-set", "compile", json_path, "-o", srs_path], check=True
)
print(f"Compiled .srs file: {srs_file_path}")
print(f"Compiled: {srs_path}")
except subprocess.CalledProcessError as e:
print(f"Compile error {json_file_path}: {e}")
except Exception as e:
print(f"Error while processing {output_name}: {e}")
print(f"Compile error {json_path}: {e}")
sys.exit(1)
def generate_srs_for_categories(directories, output_json_directory='JSON', compiled_output_directory='SRS'):
os.makedirs(output_json_directory, exist_ok=True)
os.makedirs(compiled_output_directory, exist_ok=True)
def srs_rule(name, rules):
compile_srs({"version": 3, "rules": rules}, name)
exclude = {"meta", "twitter", "discord", "telegram", "hetzner", "ovh", "digitalocean", "cloudfront", "roblox"}
def generate_srs_for_categories(directories):
exclude = {"meta", "twitter", "discord", "telegram", "hetzner", "ovh", "digitalocean", "cloudfront", "roblox", "google_meet"}
for directory in directories:
for filename in os.listdir(directory):
if any(keyword in filename for keyword in exclude):
continue
file_path = os.path.join(directory, filename)
if os.path.isfile(file_path):
domains = []
with open(file_path, 'r', encoding='utf-8') as file:
for line in file:
domain = line.strip()
if domain:
domains.append(domain)
data = {
"version": 3,
"rules": [
{
"domain_suffix": domains
}
]
}
output_file_path = os.path.join(output_json_directory, f"{os.path.splitext(filename)[0]}.json")
with open(output_file_path, 'w', encoding='utf-8') as output_file:
json.dump(data, output_file, indent=4)
print(f"JSON file generated: {output_file_path}")
print("\nCompile JSON files to .srs files...")
for filename in os.listdir(output_json_directory):
if filename.endswith('.json'):
json_file_path = os.path.join(output_json_directory, filename)
srs_file_path = os.path.join(compiled_output_directory, f"{os.path.splitext(filename)[0]}.srs")
try:
subprocess.run(
["sing-box", "rule-set", "compile", json_file_path, "-o", srs_file_path], check=True
)
print(f"Compiled .srs file: {srs_file_path}")
except subprocess.CalledProcessError as e:
print(f"Compile error {json_file_path}: {e}")
def generate_srs_subnets(input_file, output_json_directory='JSON', compiled_output_directory='SRS'):
os.makedirs(output_json_directory, exist_ok=True)
os.makedirs(compiled_output_directory, exist_ok=True)
subnets = []
with open(input_file, 'r', encoding='utf-8') as file:
for line in file:
subnet = line.strip()
if subnet:
subnets.append(subnet)
data = {
"version": 3,
"rules": [
{
"ip_cidr": subnets
}
]
}
filename = os.path.splitext(os.path.basename(input_file))[0]
output_file_path = os.path.join(output_json_directory, f"{filename}.json")
with open(output_file_path, 'w', encoding='utf-8') as output_file:
json.dump(data, output_file, indent=4)
print(f"JSON file generated: {output_file_path}")
srs_file_path = os.path.join(compiled_output_directory, f"{filename}.srs")
try:
subprocess.run(
["sing-box", "rule-set", "compile", output_file_path, "-o", srs_file_path], check=True
)
print(f"Compiled .srs file: {srs_file_path}")
except subprocess.CalledProcessError as e:
print(f"Compile error {output_file_path}: {e}")
def generate_srs_combined(input_subnets_file, input_domains_file, output_json_directory='JSON', compiled_output_directory='SRS'):
os.makedirs(output_json_directory, exist_ok=True)
os.makedirs(compiled_output_directory, exist_ok=True)
domains = []
if os.path.exists(input_domains_file):
with open(input_domains_file, 'r', encoding='utf-8') as file:
domains = [line.strip() for line in file if line.strip()]
subnets = []
if os.path.exists(input_subnets_file):
with open(input_subnets_file, 'r', encoding='utf-8') as file:
subnets = [line.strip() for line in file if line.strip()]
if input_subnets_file == "Subnets/IPv4/discord.lst":
data = {
"version": 3,
"rules": [
{
"domain_suffix": domains
},
{
"network": ["udp"],
"ip_cidr": subnets,
"port_range": ["50000:65535"]
}
]
}
elif input_subnets_file == "Subnets/IPv4/telegram.lst" and input_domains_file == "voice_messengers":
data = {
"version": 3,
"rules": [
{
"network": ["udp"],
"ip_cidr": subnets,
"port": [1400],
"port_range": ["596:599"]
}
]
}
else:
data = {
"version": 3,
"rules": [
{
"domain_suffix": domains,
"ip_cidr": subnets
}
]
}
if input_domains_file == "voice_messengers":
filename = "voice_messengers"
else:
filename = os.path.splitext(os.path.basename(input_subnets_file))[0]
output_file_path = os.path.join(output_json_directory, f"{filename}.json")
with open(output_file_path, 'w', encoding='utf-8') as output_file:
json.dump(data, output_file, indent=4)
print(f"JSON file generated: {output_file_path}")
srs_file_path = os.path.join(compiled_output_directory, f"{filename}.srs")
try:
subprocess.run(
["sing-box", "rule-set", "compile", output_file_path, "-o", srs_file_path], check=True
)
print(f"Compiled .srs file: {srs_file_path}")
except subprocess.CalledProcessError as e:
print(f"Compile error {output_file_path}: {e}")
domains = lines_from_file(file_path)
name = os.path.splitext(filename)[0]
srs_rule(name, [{"domain_suffix": domains}])
def prepare_dat_domains(domains, output_name, dirs=[]):
def prepare_dat_domains(domains, output_name, dirs=None):
output_lists_directory = 'geosite_data'
os.makedirs(output_lists_directory, exist_ok=True)
domain_attrs = {domain: [] for domain in domains}
for directory in dirs:
for directory in (dirs or []):
if not os.path.isdir(directory):
continue
for filename in os.listdir(directory):
@@ -407,8 +168,6 @@ def prepare_dat_domains(domains, output_name, dirs=[]):
out_f.write(f"{line}\n")
def prepare_dat_combined(dirs):
import shutil
output_lists_directory = 'geosite_data'
os.makedirs(output_lists_directory, exist_ok=True)
@@ -426,18 +185,65 @@ def prepare_dat_combined(dirs):
shutil.copyfile(source_path, destination_path)
def parse_geosite_line(line):
from proto import geosite_pb2
parts = line.split()
raw_domain = parts[0]
attrs = [p.lstrip('@') for p in parts[1:] if p.startswith('@')]
if raw_domain.startswith('full:'):
domain_type = geosite_pb2.Domain.Full
value = raw_domain[5:]
elif raw_domain.startswith('regexp:'):
domain_type = geosite_pb2.Domain.Regex
value = raw_domain[7:]
elif raw_domain.startswith('keyword:'):
domain_type = geosite_pb2.Domain.Plain
value = raw_domain[8:]
else:
domain_type = geosite_pb2.Domain.RootDomain
value = raw_domain.lstrip('.')
return domain_type, value, attrs
def generate_dat_domains(data_path='geosite_data', output_name='geosite.dat', output_directory='DAT'):
from proto import geosite_pb2
os.makedirs(output_directory, exist_ok=True)
try:
subprocess.run(
["domain-list-community", f"-datapath={data_path}", f"-outputname={output_name}", f"-outputdir={output_directory}"],
check=True,
stdout=subprocess.DEVNULL
)
print(f"Compiled .dat file: {output_directory}/{output_name}")
except subprocess.CalledProcessError as e:
print(f"Compile error {data_path}: {e}")
geo_site_list = geosite_pb2.GeoSiteList()
for filename in sorted(os.listdir(data_path)):
file_path = os.path.join(data_path, filename)
if not os.path.isfile(file_path):
continue
geo_site = geo_site_list.entry.add()
geo_site.country_code = filename.upper()
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
domain_type, value, attrs = parse_geosite_line(line)
domain = geo_site.domain.add()
domain.type = domain_type
domain.value = value
for attr in attrs:
attribute = domain.attribute.add()
attribute.key = attr
attribute.bool_value = True
output_path = os.path.join(output_directory, output_name)
with open(output_path, 'wb') as f:
f.write(geo_site_list.SerializeToString())
print(f"Compiled .dat file: {output_path}")
if __name__ == '__main__':
# Russia inside
@@ -482,31 +288,32 @@ if __name__ == '__main__':
Path(temp_file).unlink()
# Sing-box ruleset main
russia_inside = domains_from_file('Russia/inside-raw.lst')
russia_outside = domains_from_file('Russia/outside-raw.lst')
ukraine_inside = domains_from_file('Ukraine/inside-raw.lst')
generate_srs_domains(russia_inside, 'russia_inside')
generate_srs_domains(russia_outside, 'russia_outside')
generate_srs_domains(ukraine_inside, 'ukraine_inside')
russia_inside = lines_from_file('Russia/inside-raw.lst')
russia_outside = lines_from_file('Russia/outside-raw.lst')
ukraine_inside = lines_from_file('Ukraine/inside-raw.lst')
srs_rule('russia_inside', [{"domain_suffix": russia_inside}])
srs_rule('russia_outside', [{"domain_suffix": russia_outside}])
srs_rule('ukraine_inside', [{"domain_suffix": ukraine_inside}])
# Sing-box categories
directories = ['Categories', 'Services']
generate_srs_for_categories(directories)
# Sing-box subnets + domains
generate_srs_combined(DiscordSubnets, "Services/discord.lst")
generate_srs_combined(TwitterSubnets, "Services/twitter.lst")
generate_srs_combined(MetaSubnets, "Services/meta.lst")
generate_srs_combined(TelegramSubnets, "Services/telegram.lst")
generate_srs_combined(CloudflareSubnets, "Services/cloudflare.lst")
generate_srs_combined(HetznerSubnets, "Services/hetzner.lst")
generate_srs_combined(OVHSubnets, "Services/ovh.lst")
generate_srs_combined(DigitalOceanSubnets, "Services/digitalocean.lst")
generate_srs_combined(CloudfrontSubnets, "Services/cloudfront.lst")
generate_srs_combined(RobloxSubnets, "Services/roblox.lst")
for service in SUBNET_SERVICES:
if service == 'discord':
continue
subnets = lines_from_file(f'Subnets/IPv4/{service}.lst')
domains = lines_from_file(f'Services/{service}.lst')
srs_rule(service, [{"domain_suffix": domains, "ip_cidr": subnets}])
# Sing-box voice for messengers
generate_srs_combined(TelegramSubnets, "voice_messengers")
# Discord (domains + UDP subnets on high ports)
discord_subnets = lines_from_file('Subnets/IPv4/discord.lst')
discord_domains = lines_from_file('Services/discord.lst')
srs_rule('discord', [
{"domain_suffix": discord_domains},
{"network": ["udp"], "ip_cidr": discord_subnets, "port_range": ["50000:65535"]},
])
# Xray domains
prepare_dat_domains(russia_inside, 'russia-inside', directories)

View File

@@ -1,30 +1,28 @@
#!/usr/bin/python3.10
#!/usr/bin/env python3
import ipaddress
import urllib.request
import os
import shutil
import json
import sys
BGP_TOOLS_URL = 'https://bgp.tools/table.txt'
HEADERS = { 'User-Agent': 'itdog.info - hi@itdog.info' }
AS_FILE = 'AS.lst'
RIPE_STAT_URL = 'https://stat.ripe.net/data/announced-prefixes/data.json?resource=AS{}'
USER_AGENT = 'allow-domains/1.0'
IPv4_DIR = 'Subnets/IPv4'
IPv6_DIR = 'Subnets/IPv6'
AS_META = ['32934','63293','54115','149642']
AS_TWITTER = ['13414']
AS_HETZNER = ['24940']
AS_OVH = ['16276']
AS_DIGITALOCEAN = ['14061']
ASN_SERVICES = {
'meta.lst': ['32934', '63293', '54115', '149642'],
'twitter.lst': ['13414'],
'hetzner.lst': ['24940'],
'ovh.lst': ['16276'],
'digitalocean.lst': ['14061'],
}
META = 'meta.lst'
TWITTER = 'twitter.lst'
ASN_TELEGRAM = ['44907', '59930', '62014', '62041', '211157']
TELEGRAM = 'telegram.lst'
CLOUDFLARE = 'cloudflare.lst'
HETZNER = 'hetzner.lst'
OVH = 'ovh.lst'
DIGITALOCEAN = 'digitalocean.lst'
CLOUDFRONT = 'cloudfront.lst'
# From https://iplist.opencck.org/
@@ -38,93 +36,103 @@ TELEGRAM_CIDR_URL = 'https://core.telegram.org/resources/cidr.txt'
CLOUDFLARE_V4='https://www.cloudflare.com/ips-v4'
CLOUDFLARE_V6='https://www.cloudflare.com/ips-v6'
AWS_IP_RANGES_URL='https://ip-ranges.amazonaws.com/ip-ranges.json'
# https://support.google.com/a/answer/1279090
GOOGLE_MEET = 'google_meet.lst'
GOOGLE_MEET_V4 = [
'74.125.247.128/32',
'74.125.250.0/24',
'142.250.82.0/24',
]
GOOGLE_MEET_V6 = [
'2001:4860:4864:4:8000::/128',
'2001:4860:4864:5::/64',
'2001:4860:4864:6::/64',
]
subnet_list = []
AWS_CIDR_URL='https://ip-ranges.amazonaws.com/ip-ranges.json'
def make_request(url):
req = urllib.request.Request(url)
req.add_header('User-Agent', USER_AGENT)
return req
def subnet_summarization(subnet_list):
subnets = [ipaddress.ip_network(subnet) for subnet in subnet_list]
subnets = [ipaddress.ip_network(subnet, strict=False) for subnet in subnet_list]
return list(ipaddress.collapse_addresses(subnets))
def process_subnets(subnet_list, target_as):
def fetch_asn_prefixes(asn_list):
ipv4_subnets = []
ipv6_subnets = []
for subnet_str, as_number in subnet_list:
for asn in asn_list:
url = RIPE_STAT_URL.format(asn)
req = make_request(url)
try:
subnet = ipaddress.ip_network(subnet_str)
if as_number in target_as:
if subnet.version == 4:
ipv4_subnets.append(subnet_str)
elif subnet.version == 6:
ipv6_subnets.append(subnet_str)
except ValueError:
print(f"Invalid subnet: {subnet_str}")
sys.exit(1)
ipv4_merged = subnet_summarization(ipv4_subnets)
ipv6_merged = subnet_summarization(ipv6_subnets)
return ipv4_merged, ipv6_merged
def download_ready_subnets(url_v4, url_v6):
ipv4_subnets = []
ipv6_subnets = []
urls = [(url_v4, 4), (url_v6, 6)]
for url, version in urls:
req = urllib.request.Request(url, headers=HEADERS)
try:
with urllib.request.urlopen(req) as response:
if response.status == 200:
subnets = response.read().decode('utf-8').splitlines()
for subnet_str in subnets:
try:
subnet = ipaddress.ip_network(subnet_str)
if subnet.version == 4:
ipv4_subnets.append(subnet_str)
elif subnet.version == 6:
ipv6_subnets.append(subnet_str)
except ValueError:
print(f"Invalid subnet: {subnet_str}")
sys.exit(1)
with urllib.request.urlopen(req, timeout=30) as response:
data = json.loads(response.read().decode('utf-8'))
for entry in data['data']['prefixes']:
prefix = entry['prefix']
try:
network = ipaddress.ip_network(prefix)
if network.version == 4:
ipv4_subnets.append(prefix)
else:
ipv6_subnets.append(prefix)
except ValueError:
print(f"Invalid subnet: {prefix}")
sys.exit(1)
except Exception as e:
print(f"Query error: {e}")
print(f"Error fetching AS{asn}: {e}")
sys.exit(1)
return ipv4_subnets, ipv6_subnets
def download_ready_split_subnets(url):
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as response:
subnets = response.read().decode('utf-8').splitlines()
def download_subnets(*urls):
ipv4_subnets = []
ipv6_subnets = []
for url in urls:
req = make_request(url)
try:
with urllib.request.urlopen(req, timeout=30) as response:
subnets = response.read().decode('utf-8').splitlines()
for subnet_str in subnets:
try:
network = ipaddress.ip_network(subnet_str, strict=False)
if network.version == 4:
ipv4_subnets.append(subnet_str)
else:
ipv6_subnets.append(subnet_str)
except ValueError:
print(f"Invalid subnet: {subnet_str}")
sys.exit(1)
except Exception as e:
print(f"Query error {url}: {e}")
sys.exit(1)
ipv4_subnets = [cidr for cidr in subnets if isinstance(ipaddress.ip_network(cidr, strict=False), ipaddress.IPv4Network)]
ipv6_subnets = [cidr for cidr in subnets if isinstance(ipaddress.ip_network(cidr, strict=False), ipaddress.IPv6Network)]
return ipv4_subnets, ipv6_subnets
def download_aws_cloudfront_subnets():
ipv4_subnets = []
ipv6_subnets = []
req = urllib.request.Request(AWS_IP_RANGES_URL, headers=HEADERS)
req = make_request(AWS_CIDR_URL)
try:
with urllib.request.urlopen(req) as response:
if response.status == 200:
data = json.loads(response.read().decode('utf-8'))
for prefix in data.get('prefixes', []):
if prefix.get('service') == 'CLOUDFRONT':
ipv4_subnets.append(prefix['ip_prefix'])
for prefix in data.get('ipv6_prefixes', []):
if prefix.get('service') == 'CLOUDFRONT':
ipv6_subnets.append(prefix['ipv6_prefix'])
with urllib.request.urlopen(req, timeout=30) as response:
data = json.loads(response.read().decode('utf-8'))
for prefix in data.get('prefixes', []):
if prefix.get('service') == 'CLOUDFRONT':
ipv4_subnets.append(prefix['ip_prefix'])
for prefix in data.get('ipv6_prefixes', []):
if prefix.get('service') == 'CLOUDFRONT':
ipv6_subnets.append(prefix['ipv6_prefix'])
except Exception as e:
print(f"Error downloading AWS CloudFront ranges: {e}")
sys.exit(1)
return ipv4_subnets, ipv6_subnets
def write_subnets_to_file(subnets, filename):
@@ -138,63 +146,47 @@ def copy_file_legacy(src_filename):
shutil.copy(src_filename, os.path.join(os.path.dirname(src_filename), new_filename))
if __name__ == '__main__':
request = urllib.request.Request(BGP_TOOLS_URL, headers=HEADERS)
with urllib.request.urlopen(request) as response:
for line in response:
decoded_line = line.decode('utf-8').strip()
subnet, as_number = decoded_line.split()
subnet_list.append((subnet, as_number))
# Meta
ipv4_merged_meta, ipv6_merged_meta = process_subnets(subnet_list, AS_META)
write_subnets_to_file(ipv4_merged_meta, f'{IPv4_DIR}/{META}')
write_subnets_to_file(ipv6_merged_meta, f'{IPv6_DIR}/{META}')
# Twitter
ipv4_merged_twitter, ipv6_merged_twitter = process_subnets(subnet_list, AS_TWITTER)
write_subnets_to_file(ipv4_merged_twitter, f'{IPv4_DIR}/{TWITTER}')
write_subnets_to_file(ipv6_merged_twitter, f'{IPv6_DIR}/{TWITTER}')
# Hetzner
ipv4_merged_hetzner, ipv6_merged_hetzner = process_subnets(subnet_list, AS_HETZNER)
write_subnets_to_file(ipv4_merged_hetzner, f'{IPv4_DIR}/{HETZNER}')
write_subnets_to_file(ipv6_merged_hetzner, f'{IPv6_DIR}/{HETZNER}')
# OVH
ipv4_merged_ovh, ipv6_merged_ovh = process_subnets(subnet_list, AS_OVH)
write_subnets_to_file(ipv4_merged_ovh, f'{IPv4_DIR}/{OVH}')
write_subnets_to_file(ipv6_merged_ovh, f'{IPv6_DIR}/{OVH}')
# Digital Ocean
ipv4_merged_digitalocean, ipv6_merged_digitalocean = process_subnets(subnet_list, AS_DIGITALOCEAN)
write_subnets_to_file(ipv4_merged_digitalocean, f'{IPv4_DIR}/{DIGITALOCEAN}')
write_subnets_to_file(ipv6_merged_digitalocean, f'{IPv6_DIR}/{DIGITALOCEAN}')
# Services from ASN (meta, twitter, hetzner, ovh, digitalocean)
for filename, asn_list in ASN_SERVICES.items():
print(f'Fetching {filename}...')
ipv4, ipv6 = fetch_asn_prefixes(asn_list)
write_subnets_to_file(subnet_summarization(ipv4), f'{IPv4_DIR}/{filename}')
write_subnets_to_file(subnet_summarization(ipv6), f'{IPv6_DIR}/{filename}')
# Discord voice
ipv4_discord, ipv6_discord = download_ready_subnets(DISCORD_VOICE_V4, DISCORD_VOICE_V6)
print(f'Fetching {DISCORD}...')
ipv4_discord, ipv6_discord = download_subnets(DISCORD_VOICE_V4, DISCORD_VOICE_V6)
write_subnets_to_file(ipv4_discord, f'{IPv4_DIR}/{DISCORD}')
write_subnets_to_file(ipv6_discord, f'{IPv6_DIR}/{DISCORD}')
# Telegram
ipv4_telegram, ipv6_telegram = download_ready_split_subnets(TELEGRAM_CIDR_URL)
print(f'Fetching {TELEGRAM}...')
ipv4_telegram_file, ipv6_telegram_file = download_subnets(TELEGRAM_CIDR_URL)
ipv4_telegram_asn, ipv6_telegram_asn = fetch_asn_prefixes(ASN_TELEGRAM)
ipv4_telegram = subnet_summarization(ipv4_telegram_file + ipv4_telegram_asn)
ipv6_telegram = subnet_summarization(ipv6_telegram_file + ipv6_telegram_asn)
write_subnets_to_file(ipv4_telegram, f'{IPv4_DIR}/{TELEGRAM}')
write_subnets_to_file(ipv6_telegram, f'{IPv6_DIR}/{TELEGRAM}')
# Cloudflare
ipv4_cloudflare, ipv6_cloudflare = download_ready_subnets(CLOUDFLARE_V4, CLOUDFLARE_V6)
print(f'Fetching {CLOUDFLARE}...')
ipv4_cloudflare, ipv6_cloudflare = download_subnets(CLOUDFLARE_V4, CLOUDFLARE_V6)
write_subnets_to_file(ipv4_cloudflare, f'{IPv4_DIR}/{CLOUDFLARE}')
write_subnets_to_file(ipv6_cloudflare, f'{IPv6_DIR}/{CLOUDFLARE}')
# Google Meet
print(f'Writing {GOOGLE_MEET}...')
write_subnets_to_file(GOOGLE_MEET_V4, f'{IPv4_DIR}/{GOOGLE_MEET}')
write_subnets_to_file(GOOGLE_MEET_V6, f'{IPv6_DIR}/{GOOGLE_MEET}')
# AWS CloudFront
print(f'Fetching {CLOUDFRONT}...')
ipv4_cloudfront, ipv6_cloudfront = download_aws_cloudfront_subnets()
write_subnets_to_file(ipv4_cloudfront, f'{IPv4_DIR}/{CLOUDFRONT}')
write_subnets_to_file(ipv6_cloudfront, f'{IPv6_DIR}/{CLOUDFRONT}')
# Legacy name
copy_file_legacy(f'{IPv4_DIR}/{META}')
copy_file_legacy(f'{IPv6_DIR}/{META}')
copy_file_legacy(f'{IPv4_DIR}/{TWITTER}')
copy_file_legacy(f'{IPv6_DIR}/{TWITTER}')
copy_file_legacy(f'{IPv4_DIR}/{DISCORD}')
copy_file_legacy(f'{IPv6_DIR}/{DISCORD}')
# Legacy copies with capitalized names (e.g. meta.lst -> Meta.lst)
LEGACY_FILES = ['meta.lst', 'twitter.lst', 'discord.lst']
for legacy_file in LEGACY_FILES:
copy_file_legacy(f'{IPv4_DIR}/{legacy_file}')
copy_file_legacy(f'{IPv6_DIR}/{legacy_file}')

0
proto/__init__.py Normal file
View File

37
proto/geosite.proto Normal file
View File

@@ -0,0 +1,37 @@
// Source: github.com/v2fly/v2ray-core/app/router/routercommon/common.proto
syntax = "proto3";
package geosite;
option go_package = "geosite";
message Domain {
enum Type {
Plain = 0;
Regex = 1;
RootDomain = 2;
Full = 3;
}
Type type = 1;
string value = 2;
message Attribute {
string key = 1;
oneof typed_value {
bool bool_value = 2;
int64 int_value = 3;
}
}
repeated Attribute attribute = 3;
}
message GeoSite {
string country_code = 1;
repeated Domain domain = 2;
}
message GeoSiteList {
repeated GeoSite entry = 1;
}

45
proto/geosite_pb2.py Normal file
View File

@@ -0,0 +1,45 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# NO CHECKED-IN PROTOBUF GENCODE
# source: proto/geosite.proto
# Protobuf Python Version: 6.32.1
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import runtime_version as _runtime_version
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
_runtime_version.ValidateProtobufRuntimeVersion(
_runtime_version.Domain.PUBLIC,
6,
32,
1,
'',
'proto/geosite.proto'
)
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13proto/geosite.proto\x12\x07geosite\"\xf5\x01\n\x06\x44omain\x12\"\n\x04type\x18\x01 \x01(\x0e\x32\x14.geosite.Domain.Type\x12\r\n\x05value\x18\x02 \x01(\t\x12,\n\tattribute\x18\x03 \x03(\x0b\x32\x19.geosite.Domain.Attribute\x1aR\n\tAttribute\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x14\n\nbool_value\x18\x02 \x01(\x08H\x00\x12\x13\n\tint_value\x18\x03 \x01(\x03H\x00\x42\r\n\x0btyped_value\"6\n\x04Type\x12\t\n\x05Plain\x10\x00\x12\t\n\x05Regex\x10\x01\x12\x0e\n\nRootDomain\x10\x02\x12\x08\n\x04\x46ull\x10\x03\"@\n\x07GeoSite\x12\x14\n\x0c\x63ountry_code\x18\x01 \x01(\t\x12\x1f\n\x06\x64omain\x18\x02 \x03(\x0b\x32\x0f.geosite.Domain\".\n\x0bGeoSiteList\x12\x1f\n\x05\x65ntry\x18\x01 \x03(\x0b\x32\x10.geosite.GeoSiteB\tZ\x07geositeb\x06proto3')
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'proto.geosite_pb2', _globals)
if not _descriptor._USE_C_DESCRIPTORS:
_globals['DESCRIPTOR']._loaded_options = None
_globals['DESCRIPTOR']._serialized_options = b'Z\007geosite'
_globals['_DOMAIN']._serialized_start=33
_globals['_DOMAIN']._serialized_end=278
_globals['_DOMAIN_ATTRIBUTE']._serialized_start=140
_globals['_DOMAIN_ATTRIBUTE']._serialized_end=222
_globals['_DOMAIN_TYPE']._serialized_start=224
_globals['_DOMAIN_TYPE']._serialized_end=278
_globals['_GEOSITE']._serialized_start=280
_globals['_GEOSITE']._serialized_end=344
_globals['_GEOSITELIST']._serialized_start=346
_globals['_GEOSITELIST']._serialized_end=392
# @@protoc_insertion_point(module_scope)

View File

@@ -1 +1,2 @@
tldextract
protobuf>=6.32.1

View File

@@ -4,6 +4,7 @@ pkgs.mkShell {
buildInputs = with pkgs; [
python312
python312Packages.tldextract
python312Packages.protobuf
sing-box
];