Initial commit: Tamigo CLI with Gitea Actions and global installation support
This commit is contained in:
2
.env
Normal file
2
.env
Normal file
@@ -0,0 +1,2 @@
|
||||
TAMIGO_EMAIL=daniel.dybing@me.com
|
||||
TAMIGO_PASSWORD=Rarlinkiso966!
|
||||
59
.gitea/workflows/build.yml
Normal file
59
.gitea/workflows/build.yml
Normal file
@@ -0,0 +1,59 @@
|
||||
name: Build Tamigo CLI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main", "master" ]
|
||||
pull_request:
|
||||
branches: [ "main", "master" ]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build-linux:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Build with PyInstaller
|
||||
run: |
|
||||
pyinstaller --onefile --name tamigo-cli tamigo.py
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: tamigo-cli-linux
|
||||
path: dist/tamigo-cli
|
||||
|
||||
build-windows:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Build with PyInstaller
|
||||
run: |
|
||||
pyinstaller --onefile --name tamigo-cli tamigo.py
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: tamigo-cli-windows
|
||||
path: dist/tamigo-cli.exe
|
||||
BIN
Application Services - Web Services (API) tamigo Help Center.pdf
Normal file
BIN
Application Services - Web Services (API) tamigo Help Center.pdf
Normal file
Binary file not shown.
Binary file not shown.
31
README.md
Normal file
31
README.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# Tamigo CLI
|
||||
|
||||
A small CLI application to interface with Tamigo.
|
||||
|
||||
## Features
|
||||
- Login with your Tamigo credentials.
|
||||
- Calculate work days (check-ins) for the current month, year, and last 365 days.
|
||||
- View recent work shifts.
|
||||
- View your Tamigo profile info.
|
||||
|
||||
## Installation
|
||||
|
||||
1. Create a virtual environment:
|
||||
```bash
|
||||
python3 -m venv venv
|
||||
source venv/bin/activate
|
||||
```
|
||||
|
||||
2. Install dependencies:
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Run the application:
|
||||
```bash
|
||||
python tamigo.py
|
||||
```
|
||||
|
||||
Follow the prompts to log in and select actions from the menu.
|
||||
Binary file not shown.
Binary file not shown.
19
pyproject.toml
Normal file
19
pyproject.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "tamigo-cli"
|
||||
version = "0.1.0"
|
||||
description = "A CLI tool for Tamigo"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.12"
|
||||
dependencies = [
|
||||
"requests",
|
||||
"questionary",
|
||||
"rich",
|
||||
"python-dotenv",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
tamigo-cli = "tamigo:main"
|
||||
5
requirements.txt
Normal file
5
requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
requests
|
||||
questionary
|
||||
rich
|
||||
python-dotenv
|
||||
pyinstaller
|
||||
346
tamigo.py
Normal file
346
tamigo.py
Normal file
@@ -0,0 +1,346 @@
|
||||
import requests
|
||||
import questionary
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime, timedelta
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables from .env if it exists
|
||||
load_dotenv()
|
||||
|
||||
console = Console()
|
||||
|
||||
BASE_URL = "https://api.tamigo.com"
|
||||
|
||||
def parse_tamigo_date(date_str):
|
||||
"""
|
||||
Parses Tamigo date formats:
|
||||
- /Date(1600898400000+0200)/
|
||||
- 2023-10-27T08:00:00
|
||||
"""
|
||||
if not date_str:
|
||||
return None
|
||||
|
||||
# Handle /Date(1600898400000+0200)/
|
||||
match = re.search(r"/Date\((\d+)([+-]\d+)?\)/", date_str)
|
||||
if match:
|
||||
ms = int(match.group(1))
|
||||
# Convert ms to seconds
|
||||
return datetime.fromtimestamp(ms / 1000.0)
|
||||
|
||||
# Handle ISO format
|
||||
try:
|
||||
# datetime.fromisoformat handles T and optional Z/+offset in Python 3.7+
|
||||
return datetime.fromisoformat(date_str.replace("Z", "+00:00"))
|
||||
except:
|
||||
return None
|
||||
|
||||
class TamigoClient:
|
||||
def __init__(self):
|
||||
self.session_token = None
|
||||
self.user_info = None
|
||||
self.employee_id = None
|
||||
|
||||
def login(self, email, password):
|
||||
# Try different URL patterns based on docs and common patterns
|
||||
urls = [
|
||||
f"{BASE_URL}/login/application",
|
||||
f"{BASE_URL}/Login",
|
||||
f"{BASE_URL}/Login/",
|
||||
f"{BASE_URL}/login",
|
||||
f"{BASE_URL}/login/"
|
||||
]
|
||||
|
||||
last_error = ""
|
||||
for url in urls:
|
||||
payload = {
|
||||
"Email": email,
|
||||
"Password": password,
|
||||
"Name": "TamigoCLI", # For /login/application
|
||||
"Key": password # For /login/application
|
||||
}
|
||||
try:
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json"
|
||||
}
|
||||
response = requests.post(url, json=payload, headers=headers, timeout=15)
|
||||
|
||||
if response.status_code == 200:
|
||||
try:
|
||||
data = response.json()
|
||||
# Token can be in different fields
|
||||
self.session_token = data.get("SessionToken") or data.get("securitytoken") or data.get("Token")
|
||||
if self.session_token:
|
||||
self.user_info = data
|
||||
self.employee_id = data.get("EmployeeId")
|
||||
return True
|
||||
except json.JSONDecodeError:
|
||||
text = response.text.strip().strip('"')
|
||||
if text and len(text) > 20:
|
||||
self.session_token = text
|
||||
self.user_info = {"Email": email}
|
||||
return True
|
||||
|
||||
last_error = f"URL: {url}, Status: {response.status_code}"
|
||||
except Exception as e:
|
||||
last_error = f"URL: {url}, Error: {str(e)}"
|
||||
|
||||
console.print(f"[red]Login failed.[/red]")
|
||||
console.print(f"[dim]Debug: {last_error}[/dim]")
|
||||
return False
|
||||
|
||||
def get_employee_id(self):
|
||||
if self.employee_id:
|
||||
return self.employee_id
|
||||
|
||||
headers = {
|
||||
"x-tamigo-token": self.session_token,
|
||||
"securitytoken": self.session_token,
|
||||
"Accept": "application/json"
|
||||
}
|
||||
|
||||
# Method A: User Info from Login
|
||||
if self.user_info and self.user_info.get("EmployeeId"):
|
||||
self.employee_id = self.user_info.get("EmployeeId")
|
||||
return self.employee_id
|
||||
|
||||
# Method B: My Overview
|
||||
url = f"{BASE_URL}/shifts/myoverview"
|
||||
try:
|
||||
response = requests.get(url, headers=headers)
|
||||
if response.status_code == 200:
|
||||
shifts = response.json()
|
||||
if shifts and len(shifts) > 0:
|
||||
self.employee_id = shifts[0].get("EmployeeId")
|
||||
return self.employee_id
|
||||
except:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
def get_employee_actual_shifts(self, start_date_dt, end_date_dt):
|
||||
"""
|
||||
Fetches actual worked shifts using employee-accessible endpoints.
|
||||
Tamigo's 'past' endpoint often limits to 60 days, so we fetch in chunks.
|
||||
"""
|
||||
if not self.session_token:
|
||||
return None
|
||||
|
||||
headers = {
|
||||
"x-tamigo-token": self.session_token,
|
||||
"securitytoken": self.session_token,
|
||||
"Accept": "application/json"
|
||||
}
|
||||
|
||||
all_shifts = []
|
||||
days_diff = (end_date_dt - start_date_dt).days
|
||||
|
||||
# Fetch in 60-day chunks moving backwards from end_date
|
||||
for i in range(0, days_diff + 1, 60):
|
||||
target_date = (end_date_dt - timedelta(days=i)).strftime("%Y-%m-%d")
|
||||
|
||||
# Stop if we've moved past the start date
|
||||
current_dt = end_date_dt - timedelta(days=i)
|
||||
if current_dt < start_date_dt - timedelta(days=60):
|
||||
break
|
||||
|
||||
url = f"{BASE_URL}/actualshifts/past/{target_date}"
|
||||
try:
|
||||
response = requests.get(url, headers=headers)
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if isinstance(data, list):
|
||||
all_shifts.extend(data)
|
||||
elif response.status_code == 401:
|
||||
response = requests.get(url, params={"securitytoken": self.session_token})
|
||||
if response.status_code == 200:
|
||||
all_shifts.extend(response.json())
|
||||
except Exception as e:
|
||||
console.print(f"[dim]Failed to fetch chunk at {target_date}: {e}[/dim]")
|
||||
|
||||
# Supplement with /shifts/period/
|
||||
start_str = start_date_dt.strftime("%Y-%m-%d")
|
||||
end_str = end_date_dt.strftime("%Y-%m-%d")
|
||||
url_period = f"{BASE_URL}/shifts/period/{start_str}/{end_str}"
|
||||
try:
|
||||
response = requests.get(url_period, headers=headers)
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if isinstance(data, list):
|
||||
all_shifts.extend(data)
|
||||
except:
|
||||
pass
|
||||
|
||||
return all_shifts
|
||||
|
||||
def calculate_checkins(client):
|
||||
range_choice = questionary.select(
|
||||
"Select period:",
|
||||
choices=[
|
||||
"Last 365 days",
|
||||
"Last 30 days",
|
||||
"This Month",
|
||||
"This Year",
|
||||
"Custom range..."
|
||||
]
|
||||
).ask()
|
||||
|
||||
end_date_dt = datetime.now()
|
||||
|
||||
if range_choice == "Last 365 days":
|
||||
start_date_dt = end_date_dt - timedelta(days=365)
|
||||
elif range_choice == "Last 30 days":
|
||||
start_date_dt = end_date_dt - timedelta(days=30)
|
||||
elif range_choice == "This Month":
|
||||
start_date_dt = end_date_dt.replace(day=1)
|
||||
elif range_choice == "This Year":
|
||||
start_date_dt = end_date_dt.replace(month=1, day=1)
|
||||
else:
|
||||
# Custom range
|
||||
while True:
|
||||
start_str = questionary.text("Start date (YYYY-MM-DD):", default=(datetime.now() - timedelta(days=30)).strftime("%Y-%m-%d")).ask()
|
||||
end_str = questionary.text("End date (YYYY-MM-DD):", default=datetime.now().strftime("%Y-%m-%d")).ask()
|
||||
try:
|
||||
start_date_dt = datetime.strptime(start_str, "%Y-%m-%d")
|
||||
end_date_dt = datetime.strptime(end_str, "%Y-%m-%d")
|
||||
if start_date_dt > end_date_dt:
|
||||
console.print("[red]Start date must be before end date![/red]")
|
||||
continue
|
||||
break
|
||||
except ValueError:
|
||||
console.print("[red]Invalid format. Please use YYYY-MM-DD.[/red]")
|
||||
|
||||
with console.status(f"[bold green]Fetching work history from {start_date_dt.strftime('%Y-%m-%d')} to {end_date_dt.strftime('%Y-%m-%d')}..."):
|
||||
data = client.get_employee_actual_shifts(start_date_dt, end_date_dt)
|
||||
|
||||
if data:
|
||||
work_days = {} # date_str -> {hours, text}
|
||||
|
||||
# Filter data to ensure it's strictly within our requested range
|
||||
# (Since the API chunks might return slightly more)
|
||||
requested_start = start_date_dt.date()
|
||||
requested_end = end_date_dt.date()
|
||||
|
||||
for item in data:
|
||||
raw_date = item.get("Date") or item.get("StartTime") or item.get("CheckInTime")
|
||||
dt = parse_tamigo_date(raw_date)
|
||||
|
||||
if dt:
|
||||
item_date = dt.date()
|
||||
if not (requested_start <= item_date <= requested_end):
|
||||
continue
|
||||
|
||||
date_str = dt.strftime("%Y-%m-%d")
|
||||
is_absent = item.get("IsAbsent", False)
|
||||
|
||||
hours = (item.get("ActualShiftHours") or
|
||||
item.get("CheckInOutShiftHours") or
|
||||
item.get("Hours") or 0)
|
||||
|
||||
if hours == 0 and item.get("StartTime") and item.get("EndTime"):
|
||||
st = parse_tamigo_date(item.get("StartTime"))
|
||||
et = parse_tamigo_date(item.get("EndTime"))
|
||||
if st and et:
|
||||
hours = (et - st).total_seconds() / 3600.0
|
||||
|
||||
has_actual = False
|
||||
if hours > 0 or item.get("CheckInTime") or item.get("ActualStartTime"):
|
||||
has_actual = True
|
||||
if item.get("ActualShift") and item["ActualShift"].get("Shift", 0) > 0:
|
||||
has_actual = True
|
||||
hours = item["ActualShift"]["Shift"]
|
||||
|
||||
if item.get("StartTime") and not is_absent:
|
||||
has_actual = True
|
||||
|
||||
if has_actual and not is_absent:
|
||||
if date_str not in work_days:
|
||||
work_days[date_str] = {
|
||||
"hours": float(hours),
|
||||
"text": item.get("ActualShiftText") or item.get("ActivityName") or item.get("DepartmentName") or "Worked"
|
||||
}
|
||||
else:
|
||||
work_days[date_str]["hours"] += float(hours)
|
||||
|
||||
if not work_days:
|
||||
console.print("[yellow]No work records found for this period.[/yellow]")
|
||||
return
|
||||
|
||||
all_dates = sorted(work_days.keys(), reverse=True)
|
||||
|
||||
table = Table(title="Worked Days in Period", show_header=True, header_style="bold magenta")
|
||||
table.add_column("Date", style="cyan")
|
||||
table.add_column("Hours", justify="right")
|
||||
table.add_column("Details", style="dim")
|
||||
|
||||
for day in all_dates:
|
||||
info = work_days[day]
|
||||
table.add_row(day, f"{info['hours']:.2f}", str(info['text']))
|
||||
|
||||
console.print(table)
|
||||
|
||||
console.print(f"\n[bold]Work Statistics:[/bold]")
|
||||
console.print(f" - Period: [cyan]{start_date_dt.strftime('%Y-%m-%d')} to {end_date_dt.strftime('%Y-%m-%d')}[/cyan]")
|
||||
console.print(f" - Days Worked: [bold green]{len(all_dates)}[/bold green]")
|
||||
total_hours = sum(d['hours'] for d in work_days.values())
|
||||
console.print(f" - Total Hours: [bold green]{total_hours:.2f}[/bold green]")
|
||||
else:
|
||||
console.print("[yellow]Could not retrieve any shift data for this period.[/yellow]")
|
||||
|
||||
|
||||
def show_profile(client):
|
||||
if client.user_info:
|
||||
console.print_json(data=client.user_info)
|
||||
else:
|
||||
console.print("[yellow]No profile info available. Are you logged in?[/yellow]")
|
||||
|
||||
|
||||
def main():
|
||||
client = TamigoClient()
|
||||
|
||||
console.print("[bold blue]Welcome to Tamigo CLI[/bold blue]")
|
||||
|
||||
email = os.getenv("TAMIGO_EMAIL")
|
||||
if not email:
|
||||
email = questionary.text("Email:").ask()
|
||||
|
||||
password = os.getenv("TAMIGO_PASSWORD")
|
||||
if not password:
|
||||
password = questionary.password("Password:").ask()
|
||||
|
||||
if not email or not password:
|
||||
return
|
||||
|
||||
with console.status("[bold green]Logging in..."):
|
||||
success = client.login(email, password)
|
||||
|
||||
if success:
|
||||
console.print("[bold green]Login successful![/bold green]")
|
||||
menu(client)
|
||||
else:
|
||||
console.print("[bold red]Login failed. Please check your credentials.[/bold red]")
|
||||
|
||||
def menu(client):
|
||||
while True:
|
||||
choice = questionary.select(
|
||||
"What would you like to do?",
|
||||
choices=[
|
||||
"Calculate actual work days",
|
||||
"Show profile info",
|
||||
"Logout and Exit"
|
||||
]
|
||||
).ask()
|
||||
|
||||
if choice == "Calculate actual work days":
|
||||
calculate_checkins(client)
|
||||
elif choice == "Show profile info":
|
||||
show_profile(client)
|
||||
else:
|
||||
break
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
247
venv/bin/Activate.ps1
Normal file
247
venv/bin/Activate.ps1
Normal file
@@ -0,0 +1,247 @@
|
||||
<#
|
||||
.Synopsis
|
||||
Activate a Python virtual environment for the current PowerShell session.
|
||||
|
||||
.Description
|
||||
Pushes the python executable for a virtual environment to the front of the
|
||||
$Env:PATH environment variable and sets the prompt to signify that you are
|
||||
in a Python virtual environment. Makes use of the command line switches as
|
||||
well as the `pyvenv.cfg` file values present in the virtual environment.
|
||||
|
||||
.Parameter VenvDir
|
||||
Path to the directory that contains the virtual environment to activate. The
|
||||
default value for this is the parent of the directory that the Activate.ps1
|
||||
script is located within.
|
||||
|
||||
.Parameter Prompt
|
||||
The prompt prefix to display when this virtual environment is activated. By
|
||||
default, this prompt is the name of the virtual environment folder (VenvDir)
|
||||
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
|
||||
|
||||
.Example
|
||||
Activate.ps1
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -Verbose
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script,
|
||||
and shows extra information about the activation as it executes.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
|
||||
Activates the Python virtual environment located in the specified location.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -Prompt "MyPython"
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script,
|
||||
and prefixes the current prompt with the specified string (surrounded in
|
||||
parentheses) while the virtual environment is active.
|
||||
|
||||
.Notes
|
||||
On Windows, it may be required to enable this Activate.ps1 script by setting the
|
||||
execution policy for the user. You can do this by issuing the following PowerShell
|
||||
command:
|
||||
|
||||
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
|
||||
|
||||
For more information on Execution Policies:
|
||||
https://go.microsoft.com/fwlink/?LinkID=135170
|
||||
|
||||
#>
|
||||
Param(
|
||||
[Parameter(Mandatory = $false)]
|
||||
[String]
|
||||
$VenvDir,
|
||||
[Parameter(Mandatory = $false)]
|
||||
[String]
|
||||
$Prompt
|
||||
)
|
||||
|
||||
<# Function declarations --------------------------------------------------- #>
|
||||
|
||||
<#
|
||||
.Synopsis
|
||||
Remove all shell session elements added by the Activate script, including the
|
||||
addition of the virtual environment's Python executable from the beginning of
|
||||
the PATH variable.
|
||||
|
||||
.Parameter NonDestructive
|
||||
If present, do not remove this function from the global namespace for the
|
||||
session.
|
||||
|
||||
#>
|
||||
function global:deactivate ([switch]$NonDestructive) {
|
||||
# Revert to original values
|
||||
|
||||
# The prior prompt:
|
||||
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
|
||||
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
|
||||
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
|
||||
}
|
||||
|
||||
# The prior PYTHONHOME:
|
||||
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
|
||||
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
|
||||
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
|
||||
}
|
||||
|
||||
# The prior PATH:
|
||||
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
|
||||
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
|
||||
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
|
||||
}
|
||||
|
||||
# Just remove the VIRTUAL_ENV altogether:
|
||||
if (Test-Path -Path Env:VIRTUAL_ENV) {
|
||||
Remove-Item -Path env:VIRTUAL_ENV
|
||||
}
|
||||
|
||||
# Just remove VIRTUAL_ENV_PROMPT altogether.
|
||||
if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
|
||||
Remove-Item -Path env:VIRTUAL_ENV_PROMPT
|
||||
}
|
||||
|
||||
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
|
||||
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
|
||||
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
|
||||
}
|
||||
|
||||
# Leave deactivate function in the global namespace if requested:
|
||||
if (-not $NonDestructive) {
|
||||
Remove-Item -Path function:deactivate
|
||||
}
|
||||
}
|
||||
|
||||
<#
|
||||
.Description
|
||||
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
|
||||
given folder, and returns them in a map.
|
||||
|
||||
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
|
||||
two strings separated by `=` (with any amount of whitespace surrounding the =)
|
||||
then it is considered a `key = value` line. The left hand string is the key,
|
||||
the right hand is the value.
|
||||
|
||||
If the value starts with a `'` or a `"` then the first and last character is
|
||||
stripped from the value before being captured.
|
||||
|
||||
.Parameter ConfigDir
|
||||
Path to the directory that contains the `pyvenv.cfg` file.
|
||||
#>
|
||||
function Get-PyVenvConfig(
|
||||
[String]
|
||||
$ConfigDir
|
||||
) {
|
||||
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
|
||||
|
||||
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
|
||||
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
|
||||
|
||||
# An empty map will be returned if no config file is found.
|
||||
$pyvenvConfig = @{ }
|
||||
|
||||
if ($pyvenvConfigPath) {
|
||||
|
||||
Write-Verbose "File exists, parse `key = value` lines"
|
||||
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
|
||||
|
||||
$pyvenvConfigContent | ForEach-Object {
|
||||
$keyval = $PSItem -split "\s*=\s*", 2
|
||||
if ($keyval[0] -and $keyval[1]) {
|
||||
$val = $keyval[1]
|
||||
|
||||
# Remove extraneous quotations around a string value.
|
||||
if ("'""".Contains($val.Substring(0, 1))) {
|
||||
$val = $val.Substring(1, $val.Length - 2)
|
||||
}
|
||||
|
||||
$pyvenvConfig[$keyval[0]] = $val
|
||||
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
|
||||
}
|
||||
}
|
||||
}
|
||||
return $pyvenvConfig
|
||||
}
|
||||
|
||||
|
||||
<# Begin Activate script --------------------------------------------------- #>
|
||||
|
||||
# Determine the containing directory of this script
|
||||
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
|
||||
$VenvExecDir = Get-Item -Path $VenvExecPath
|
||||
|
||||
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
|
||||
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
|
||||
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
|
||||
|
||||
# Set values required in priority: CmdLine, ConfigFile, Default
|
||||
# First, get the location of the virtual environment, it might not be
|
||||
# VenvExecDir if specified on the command line.
|
||||
if ($VenvDir) {
|
||||
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
|
||||
}
|
||||
else {
|
||||
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
|
||||
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
|
||||
Write-Verbose "VenvDir=$VenvDir"
|
||||
}
|
||||
|
||||
# Next, read the `pyvenv.cfg` file to determine any required value such
|
||||
# as `prompt`.
|
||||
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
|
||||
|
||||
# Next, set the prompt from the command line, or the config file, or
|
||||
# just use the name of the virtual environment folder.
|
||||
if ($Prompt) {
|
||||
Write-Verbose "Prompt specified as argument, using '$Prompt'"
|
||||
}
|
||||
else {
|
||||
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
|
||||
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
|
||||
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
|
||||
$Prompt = $pyvenvCfg['prompt'];
|
||||
}
|
||||
else {
|
||||
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
|
||||
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
|
||||
$Prompt = Split-Path -Path $venvDir -Leaf
|
||||
}
|
||||
}
|
||||
|
||||
Write-Verbose "Prompt = '$Prompt'"
|
||||
Write-Verbose "VenvDir='$VenvDir'"
|
||||
|
||||
# Deactivate any currently active virtual environment, but leave the
|
||||
# deactivate function in place.
|
||||
deactivate -nondestructive
|
||||
|
||||
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
|
||||
# that there is an activated venv.
|
||||
$env:VIRTUAL_ENV = $VenvDir
|
||||
|
||||
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
|
||||
|
||||
Write-Verbose "Setting prompt to '$Prompt'"
|
||||
|
||||
# Set the prompt to include the env name
|
||||
# Make sure _OLD_VIRTUAL_PROMPT is global
|
||||
function global:_OLD_VIRTUAL_PROMPT { "" }
|
||||
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
|
||||
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
|
||||
|
||||
function global:prompt {
|
||||
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
|
||||
_OLD_VIRTUAL_PROMPT
|
||||
}
|
||||
$env:VIRTUAL_ENV_PROMPT = $Prompt
|
||||
}
|
||||
|
||||
# Clear PYTHONHOME
|
||||
if (Test-Path -Path Env:PYTHONHOME) {
|
||||
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
|
||||
Remove-Item -Path Env:PYTHONHOME
|
||||
}
|
||||
|
||||
# Add the venv to the PATH
|
||||
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
|
||||
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
|
||||
70
venv/bin/activate
Normal file
70
venv/bin/activate
Normal file
@@ -0,0 +1,70 @@
|
||||
# This file must be used with "source bin/activate" *from bash*
|
||||
# You cannot run it directly
|
||||
|
||||
deactivate () {
|
||||
# reset old environment variables
|
||||
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
|
||||
PATH="${_OLD_VIRTUAL_PATH:-}"
|
||||
export PATH
|
||||
unset _OLD_VIRTUAL_PATH
|
||||
fi
|
||||
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
|
||||
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
|
||||
export PYTHONHOME
|
||||
unset _OLD_VIRTUAL_PYTHONHOME
|
||||
fi
|
||||
|
||||
# Call hash to forget past commands. Without forgetting
|
||||
# past commands the $PATH changes we made may not be respected
|
||||
hash -r 2> /dev/null
|
||||
|
||||
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
|
||||
PS1="${_OLD_VIRTUAL_PS1:-}"
|
||||
export PS1
|
||||
unset _OLD_VIRTUAL_PS1
|
||||
fi
|
||||
|
||||
unset VIRTUAL_ENV
|
||||
unset VIRTUAL_ENV_PROMPT
|
||||
if [ ! "${1:-}" = "nondestructive" ] ; then
|
||||
# Self destruct!
|
||||
unset -f deactivate
|
||||
fi
|
||||
}
|
||||
|
||||
# unset irrelevant variables
|
||||
deactivate nondestructive
|
||||
|
||||
# on Windows, a path can contain colons and backslashes and has to be converted:
|
||||
if [ "${OSTYPE:-}" = "cygwin" ] || [ "${OSTYPE:-}" = "msys" ] ; then
|
||||
# transform D:\path\to\venv to /d/path/to/venv on MSYS
|
||||
# and to /cygdrive/d/path/to/venv on Cygwin
|
||||
export VIRTUAL_ENV=$(cygpath /home/daniel/Projects/tamigo-cli/venv)
|
||||
else
|
||||
# use the path as-is
|
||||
export VIRTUAL_ENV=/home/daniel/Projects/tamigo-cli/venv
|
||||
fi
|
||||
|
||||
_OLD_VIRTUAL_PATH="$PATH"
|
||||
PATH="$VIRTUAL_ENV/"bin":$PATH"
|
||||
export PATH
|
||||
|
||||
# unset PYTHONHOME if set
|
||||
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
|
||||
# could use `if (set -u; : $PYTHONHOME) ;` in bash
|
||||
if [ -n "${PYTHONHOME:-}" ] ; then
|
||||
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
|
||||
unset PYTHONHOME
|
||||
fi
|
||||
|
||||
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
|
||||
_OLD_VIRTUAL_PS1="${PS1:-}"
|
||||
PS1='(venv) '"${PS1:-}"
|
||||
export PS1
|
||||
VIRTUAL_ENV_PROMPT='(venv) '
|
||||
export VIRTUAL_ENV_PROMPT
|
||||
fi
|
||||
|
||||
# Call hash to forget past commands. Without forgetting
|
||||
# past commands the $PATH changes we made may not be respected
|
||||
hash -r 2> /dev/null
|
||||
27
venv/bin/activate.csh
Normal file
27
venv/bin/activate.csh
Normal file
@@ -0,0 +1,27 @@
|
||||
# This file must be used with "source bin/activate.csh" *from csh*.
|
||||
# You cannot run it directly.
|
||||
|
||||
# Created by Davide Di Blasi <davidedb@gmail.com>.
|
||||
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
|
||||
|
||||
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
|
||||
|
||||
# Unset irrelevant variables.
|
||||
deactivate nondestructive
|
||||
|
||||
setenv VIRTUAL_ENV /home/daniel/Projects/tamigo-cli/venv
|
||||
|
||||
set _OLD_VIRTUAL_PATH="$PATH"
|
||||
setenv PATH "$VIRTUAL_ENV/"bin":$PATH"
|
||||
|
||||
|
||||
set _OLD_VIRTUAL_PROMPT="$prompt"
|
||||
|
||||
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
|
||||
set prompt = '(venv) '"$prompt"
|
||||
setenv VIRTUAL_ENV_PROMPT '(venv) '
|
||||
endif
|
||||
|
||||
alias pydoc python -m pydoc
|
||||
|
||||
rehash
|
||||
69
venv/bin/activate.fish
Normal file
69
venv/bin/activate.fish
Normal file
@@ -0,0 +1,69 @@
|
||||
# This file must be used with "source <venv>/bin/activate.fish" *from fish*
|
||||
# (https://fishshell.com/). You cannot run it directly.
|
||||
|
||||
function deactivate -d "Exit virtual environment and return to normal shell environment"
|
||||
# reset old environment variables
|
||||
if test -n "$_OLD_VIRTUAL_PATH"
|
||||
set -gx PATH $_OLD_VIRTUAL_PATH
|
||||
set -e _OLD_VIRTUAL_PATH
|
||||
end
|
||||
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
|
||||
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
|
||||
set -e _OLD_VIRTUAL_PYTHONHOME
|
||||
end
|
||||
|
||||
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
|
||||
set -e _OLD_FISH_PROMPT_OVERRIDE
|
||||
# prevents error when using nested fish instances (Issue #93858)
|
||||
if functions -q _old_fish_prompt
|
||||
functions -e fish_prompt
|
||||
functions -c _old_fish_prompt fish_prompt
|
||||
functions -e _old_fish_prompt
|
||||
end
|
||||
end
|
||||
|
||||
set -e VIRTUAL_ENV
|
||||
set -e VIRTUAL_ENV_PROMPT
|
||||
if test "$argv[1]" != "nondestructive"
|
||||
# Self-destruct!
|
||||
functions -e deactivate
|
||||
end
|
||||
end
|
||||
|
||||
# Unset irrelevant variables.
|
||||
deactivate nondestructive
|
||||
|
||||
set -gx VIRTUAL_ENV /home/daniel/Projects/tamigo-cli/venv
|
||||
|
||||
set -gx _OLD_VIRTUAL_PATH $PATH
|
||||
set -gx PATH "$VIRTUAL_ENV/"bin $PATH
|
||||
|
||||
# Unset PYTHONHOME if set.
|
||||
if set -q PYTHONHOME
|
||||
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
|
||||
set -e PYTHONHOME
|
||||
end
|
||||
|
||||
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
|
||||
# fish uses a function instead of an env var to generate the prompt.
|
||||
|
||||
# Save the current fish_prompt function as the function _old_fish_prompt.
|
||||
functions -c fish_prompt _old_fish_prompt
|
||||
|
||||
# With the original prompt function renamed, we can override with our own.
|
||||
function fish_prompt
|
||||
# Save the return status of the last command.
|
||||
set -l old_status $status
|
||||
|
||||
# Output the venv prompt; color taken from the blue of the Python logo.
|
||||
printf "%s%s%s" (set_color 4B8BBE) '(venv) ' (set_color normal)
|
||||
|
||||
# Restore the return status of the previous command.
|
||||
echo "exit $old_status" | .
|
||||
# Output the original/"old" prompt.
|
||||
_old_fish_prompt
|
||||
end
|
||||
|
||||
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
|
||||
set -gx VIRTUAL_ENV_PROMPT '(venv) '
|
||||
end
|
||||
8
venv/bin/dotenv
Executable file
8
venv/bin/dotenv
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/daniel/Projects/tamigo-cli/venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from dotenv.__main__ import cli
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(cli())
|
||||
8
venv/bin/markdown-it
Executable file
8
venv/bin/markdown-it
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/daniel/Projects/tamigo-cli/venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from markdown_it.cli.parse import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
venv/bin/normalizer
Executable file
8
venv/bin/normalizer
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/daniel/Projects/tamigo-cli/venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from charset_normalizer.cli import cli_detect
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(cli_detect())
|
||||
8
venv/bin/pip
Executable file
8
venv/bin/pip
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/daniel/Projects/tamigo-cli/venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
venv/bin/pip3
Executable file
8
venv/bin/pip3
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/daniel/Projects/tamigo-cli/venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
venv/bin/pip3.12
Executable file
8
venv/bin/pip3.12
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/daniel/Projects/tamigo-cli/venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
venv/bin/pygmentize
Executable file
8
venv/bin/pygmentize
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/home/daniel/Projects/tamigo-cli/venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pygments.cmdline import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
1
venv/bin/python
Symbolic link
1
venv/bin/python
Symbolic link
@@ -0,0 +1 @@
|
||||
python3
|
||||
1
venv/bin/python3
Symbolic link
1
venv/bin/python3
Symbolic link
@@ -0,0 +1 @@
|
||||
/usr/bin/python3
|
||||
1
venv/bin/python3.12
Symbolic link
1
venv/bin/python3.12
Symbolic link
@@ -0,0 +1 @@
|
||||
python3
|
||||
Binary file not shown.
@@ -0,0 +1 @@
|
||||
pip
|
||||
@@ -0,0 +1,78 @@
|
||||
Metadata-Version: 2.4
|
||||
Name: certifi
|
||||
Version: 2026.2.25
|
||||
Summary: Python package for providing Mozilla's CA Bundle.
|
||||
Home-page: https://github.com/certifi/python-certifi
|
||||
Author: Kenneth Reitz
|
||||
Author-email: me@kennethreitz.com
|
||||
License: MPL-2.0
|
||||
Project-URL: Source, https://github.com/certifi/python-certifi
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
|
||||
Classifier: Natural Language :: English
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3 :: Only
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Classifier: Programming Language :: Python :: 3.13
|
||||
Classifier: Programming Language :: Python :: 3.14
|
||||
Requires-Python: >=3.7
|
||||
License-File: LICENSE
|
||||
Dynamic: author
|
||||
Dynamic: author-email
|
||||
Dynamic: classifier
|
||||
Dynamic: description
|
||||
Dynamic: home-page
|
||||
Dynamic: license
|
||||
Dynamic: license-file
|
||||
Dynamic: project-url
|
||||
Dynamic: requires-python
|
||||
Dynamic: summary
|
||||
|
||||
Certifi: Python SSL Certificates
|
||||
================================
|
||||
|
||||
Certifi provides Mozilla's carefully curated collection of Root Certificates for
|
||||
validating the trustworthiness of SSL certificates while verifying the identity
|
||||
of TLS hosts. It has been extracted from the `Requests`_ project.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
``certifi`` is available on PyPI. Simply install it with ``pip``::
|
||||
|
||||
$ pip install certifi
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
To reference the installed certificate authority (CA) bundle, you can use the
|
||||
built-in function::
|
||||
|
||||
>>> import certifi
|
||||
|
||||
>>> certifi.where()
|
||||
'/usr/local/lib/python3.7/site-packages/certifi/cacert.pem'
|
||||
|
||||
Or from the command line::
|
||||
|
||||
$ python -m certifi
|
||||
/usr/local/lib/python3.7/site-packages/certifi/cacert.pem
|
||||
|
||||
Enjoy!
|
||||
|
||||
.. _`Requests`: https://requests.readthedocs.io/en/master/
|
||||
|
||||
Addition/Removal of Certificates
|
||||
--------------------------------
|
||||
|
||||
Certifi does not support any addition/removal or other modification of the
|
||||
CA trust store content. This project is intended to provide a reliable and
|
||||
highly portable root of trust to python deployments. Look to upstream projects
|
||||
for methods to use alternate trust.
|
||||
@@ -0,0 +1,14 @@
|
||||
certifi-2026.2.25.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
certifi-2026.2.25.dist-info/METADATA,sha256=4NMuGXdg_hBiRA3paKVXYcDmE3VXEBWxTvCL2xlDyPU,2474
|
||||
certifi-2026.2.25.dist-info/RECORD,,
|
||||
certifi-2026.2.25.dist-info/WHEEL,sha256=YCfwYGOYMi5Jhw2fU4yNgwErybb2IX5PEwBKV4ZbdBo,91
|
||||
certifi-2026.2.25.dist-info/licenses/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989
|
||||
certifi-2026.2.25.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8
|
||||
certifi/__init__.py,sha256=c9eaYufv1pSLl0Q8QNcMiMLLH4WquDcxdPyKjmI4opY,94
|
||||
certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243
|
||||
certifi/__pycache__/__init__.cpython-312.pyc,,
|
||||
certifi/__pycache__/__main__.cpython-312.pyc,,
|
||||
certifi/__pycache__/core.cpython-312.pyc,,
|
||||
certifi/cacert.pem,sha256=_JFloSQDJj5-v72te-ej6sD6XTJdPHBGXyjTaQByyig,272441
|
||||
certifi/core.py,sha256=XFXycndG5pf37ayeF8N32HUuDafsyhkVMbO4BAPWHa0,3394
|
||||
certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
@@ -0,0 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: setuptools (82.0.0)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
This package contains a modified version of ca-bundle.crt:
|
||||
|
||||
ca-bundle.crt -- Bundle of CA Root Certificates
|
||||
|
||||
This is a bundle of X.509 certificates of public Certificate Authorities
|
||||
(CA). These were automatically extracted from Mozilla's root certificates
|
||||
file (certdata.txt). This file can be found in the mozilla source tree:
|
||||
https://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt
|
||||
It contains the certificates in PEM format and therefore
|
||||
can be directly used with curl / libcurl / php_curl, or with
|
||||
an Apache+mod_ssl webserver for SSL client authentication.
|
||||
Just configure this file as the SSLCACertificateFile.#
|
||||
|
||||
***** BEGIN LICENSE BLOCK *****
|
||||
This Source Code Form is subject to the terms of the Mozilla Public License,
|
||||
v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
|
||||
one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
***** END LICENSE BLOCK *****
|
||||
@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $
|
||||
@@ -0,0 +1 @@
|
||||
certifi
|
||||
4
venv/lib/python3.12/site-packages/certifi/__init__.py
Normal file
4
venv/lib/python3.12/site-packages/certifi/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from .core import contents, where
|
||||
|
||||
__all__ = ["contents", "where"]
|
||||
__version__ = "2026.02.25"
|
||||
12
venv/lib/python3.12/site-packages/certifi/__main__.py
Normal file
12
venv/lib/python3.12/site-packages/certifi/__main__.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import argparse
|
||||
|
||||
from certifi import contents, where
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-c", "--contents", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.contents:
|
||||
print(contents())
|
||||
else:
|
||||
print(where())
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
4494
venv/lib/python3.12/site-packages/certifi/cacert.pem
Normal file
4494
venv/lib/python3.12/site-packages/certifi/cacert.pem
Normal file
File diff suppressed because it is too large
Load Diff
83
venv/lib/python3.12/site-packages/certifi/core.py
Normal file
83
venv/lib/python3.12/site-packages/certifi/core.py
Normal file
@@ -0,0 +1,83 @@
|
||||
"""
|
||||
certifi.py
|
||||
~~~~~~~~~~
|
||||
|
||||
This module returns the installation location of cacert.pem or its contents.
|
||||
"""
|
||||
import sys
|
||||
import atexit
|
||||
|
||||
def exit_cacert_ctx() -> None:
|
||||
_CACERT_CTX.__exit__(None, None, None) # type: ignore[union-attr]
|
||||
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
|
||||
from importlib.resources import as_file, files
|
||||
|
||||
_CACERT_CTX = None
|
||||
_CACERT_PATH = None
|
||||
|
||||
def where() -> str:
|
||||
# This is slightly terrible, but we want to delay extracting the file
|
||||
# in cases where we're inside of a zipimport situation until someone
|
||||
# actually calls where(), but we don't want to re-extract the file
|
||||
# on every call of where(), so we'll do it once then store it in a
|
||||
# global variable.
|
||||
global _CACERT_CTX
|
||||
global _CACERT_PATH
|
||||
if _CACERT_PATH is None:
|
||||
# This is slightly janky, the importlib.resources API wants you to
|
||||
# manage the cleanup of this file, so it doesn't actually return a
|
||||
# path, it returns a context manager that will give you the path
|
||||
# when you enter it and will do any cleanup when you leave it. In
|
||||
# the common case of not needing a temporary file, it will just
|
||||
# return the file system location and the __exit__() is a no-op.
|
||||
#
|
||||
# We also have to hold onto the actual context manager, because
|
||||
# it will do the cleanup whenever it gets garbage collected, so
|
||||
# we will also store that at the global level as well.
|
||||
_CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem"))
|
||||
_CACERT_PATH = str(_CACERT_CTX.__enter__())
|
||||
atexit.register(exit_cacert_ctx)
|
||||
|
||||
return _CACERT_PATH
|
||||
|
||||
def contents() -> str:
|
||||
return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii")
|
||||
|
||||
else:
|
||||
|
||||
from importlib.resources import path as get_path, read_text
|
||||
|
||||
_CACERT_CTX = None
|
||||
_CACERT_PATH = None
|
||||
|
||||
def where() -> str:
|
||||
# This is slightly terrible, but we want to delay extracting the
|
||||
# file in cases where we're inside of a zipimport situation until
|
||||
# someone actually calls where(), but we don't want to re-extract
|
||||
# the file on every call of where(), so we'll do it once then store
|
||||
# it in a global variable.
|
||||
global _CACERT_CTX
|
||||
global _CACERT_PATH
|
||||
if _CACERT_PATH is None:
|
||||
# This is slightly janky, the importlib.resources API wants you
|
||||
# to manage the cleanup of this file, so it doesn't actually
|
||||
# return a path, it returns a context manager that will give
|
||||
# you the path when you enter it and will do any cleanup when
|
||||
# you leave it. In the common case of not needing a temporary
|
||||
# file, it will just return the file system location and the
|
||||
# __exit__() is a no-op.
|
||||
#
|
||||
# We also have to hold onto the actual context manager, because
|
||||
# it will do the cleanup whenever it gets garbage collected, so
|
||||
# we will also store that at the global level as well.
|
||||
_CACERT_CTX = get_path("certifi", "cacert.pem")
|
||||
_CACERT_PATH = str(_CACERT_CTX.__enter__())
|
||||
atexit.register(exit_cacert_ctx)
|
||||
|
||||
return _CACERT_PATH
|
||||
|
||||
def contents() -> str:
|
||||
return read_text("certifi", "cacert.pem", encoding="ascii")
|
||||
0
venv/lib/python3.12/site-packages/certifi/py.typed
Normal file
0
venv/lib/python3.12/site-packages/certifi/py.typed
Normal file
@@ -0,0 +1 @@
|
||||
pip
|
||||
@@ -0,0 +1,782 @@
|
||||
Metadata-Version: 2.4
|
||||
Name: charset-normalizer
|
||||
Version: 3.4.5
|
||||
Summary: The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet.
|
||||
Author-email: "Ahmed R. TAHRI" <tahri.ahmed@proton.me>
|
||||
Maintainer-email: "Ahmed R. TAHRI" <tahri.ahmed@proton.me>
|
||||
License: MIT
|
||||
Project-URL: Changelog, https://github.com/jawah/charset_normalizer/blob/master/CHANGELOG.md
|
||||
Project-URL: Documentation, https://charset-normalizer.readthedocs.io/
|
||||
Project-URL: Code, https://github.com/jawah/charset_normalizer
|
||||
Project-URL: Issue tracker, https://github.com/jawah/charset_normalizer/issues
|
||||
Keywords: encoding,charset,charset-detector,detector,normalization,unicode,chardet,detect
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Classifier: Programming Language :: Python :: 3.13
|
||||
Classifier: Programming Language :: Python :: 3.14
|
||||
Classifier: Programming Language :: Python :: 3 :: Only
|
||||
Classifier: Programming Language :: Python :: Implementation :: CPython
|
||||
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
||||
Classifier: Topic :: Text Processing :: Linguistic
|
||||
Classifier: Topic :: Utilities
|
||||
Classifier: Typing :: Typed
|
||||
Requires-Python: >=3.7
|
||||
Description-Content-Type: text/markdown
|
||||
License-File: LICENSE
|
||||
Provides-Extra: unicode-backport
|
||||
Dynamic: license-file
|
||||
|
||||
<h1 align="center">Charset Detection, for Everyone 👋</h1>
|
||||
|
||||
<p align="center">
|
||||
<sup>The Real First Universal Charset Detector</sup><br>
|
||||
<a href="https://pypi.org/project/charset-normalizer">
|
||||
<img src="https://img.shields.io/pypi/pyversions/charset_normalizer.svg?orange=blue" />
|
||||
</a>
|
||||
<a href="https://pepy.tech/project/charset-normalizer/">
|
||||
<img alt="Download Count Total" src="https://static.pepy.tech/badge/charset-normalizer/month" />
|
||||
</a>
|
||||
<a href="https://bestpractices.coreinfrastructure.org/projects/7297">
|
||||
<img src="https://bestpractices.coreinfrastructure.org/projects/7297/badge">
|
||||
</a>
|
||||
</p>
|
||||
<p align="center">
|
||||
<sup><i>Featured Packages</i></sup><br>
|
||||
<a href="https://github.com/jawah/niquests">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Niquests-Most_Advanced_HTTP_Client-cyan">
|
||||
</a>
|
||||
<a href="https://github.com/jawah/wassima">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Wassima-Certifi_Replacement-cyan">
|
||||
</a>
|
||||
</p>
|
||||
<p align="center">
|
||||
<sup><i>In other language (unofficial port - by the community)</i></sup><br>
|
||||
<a href="https://github.com/nickspring/charset-normalizer-rs">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Rust-red">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
> A library that helps you read text from an unknown charset encoding.<br /> Motivated by `chardet`,
|
||||
> I'm trying to resolve the issue by taking a new approach.
|
||||
> All IANA character set names for which the Python core library provides codecs are supported.
|
||||
|
||||
<p align="center">
|
||||
>>>>> <a href="https://charsetnormalizerweb.ousret.now.sh" target="_blank">👉 Try Me Online Now, Then Adopt Me 👈 </a> <<<<<
|
||||
</p>
|
||||
|
||||
This project offers you an alternative to **Universal Charset Encoding Detector**, also known as **Chardet**.
|
||||
|
||||
| Feature | [Chardet](https://github.com/chardet/chardet) | Charset Normalizer | [cChardet](https://github.com/PyYoshi/cChardet) |
|
||||
|--------------------------------------------------|:-----------------------------------------------------------:|:-----------------------------------------------------------------------------------------------:|:-----------------------------------------------:|
|
||||
| `Fast` | ✅ | ✅ | ✅ |
|
||||
| `Universal**` | ❌ | ✅ | ❌ |
|
||||
| `Reliable` **without** distinguishable standards | ✅ | ✅ | ✅ |
|
||||
| `Reliable` **with** distinguishable standards | ✅ | ✅ | ✅ |
|
||||
| `License` | _Public Domain_<br>and/or<br>_LGPL-2.1_***<br>_restrictive_ | MIT | MPL-1.1<br>_restrictive_ |
|
||||
| `Native Python` | ✅ | ✅ | ❌ |
|
||||
| `Detect spoken language` | ✅ | ✅ | N/A |
|
||||
| `UnicodeDecodeError Safety` | ❌ | ✅ | ❌ |
|
||||
| `Whl Size (min)` | 500 kB | 150 kB | ~200 kB |
|
||||
| `Supported Encoding` | 99 | [99](https://charset-normalizer.readthedocs.io/en/latest/user/support.html#supported-encodings) | 40 |
|
||||
| `Can register custom encoding` | ❌ | ✅ | ❌ |
|
||||
|
||||
<p align="center">
|
||||
<img src="https://i.imgflip.com/373iay.gif" alt="Reading Normalized Text" width="226"/><img src="https://media.tenor.com/images/c0180f70732a18b4965448d33adba3d0/tenor.gif" alt="Cat Reading Text" width="200"/>
|
||||
</p>
|
||||
|
||||
*\*\* : They are clearly using specific code for a specific encoding even if covering most of used one.*<br>
|
||||
*\*\*\* : The vast majority of the code is issued from an LLM agent (Claude), even if the author label this project now as MIT in his own name, it's clearly debatable. Most jurisdictions on copyright laws would nullify the license. With my personal education, **Public Domain or/and LGPL-2.1** is the most likely one based on Anthropic declarations about how they train their LLMs and the LGPL-2.1 itself (the original license as it's still the same statistical principle behind the scene, hugely refactored).*<br>
|
||||
|
||||
## ⚡ Performance
|
||||
|
||||
This package offer acceptable performances against Chardet. Here are some numbers.
|
||||
|
||||
| Package | Accuracy | Mean per file (ms) | File per sec (est) |
|
||||
|-------------------------------------------------|:--------:|:------------------:|:------------------:|
|
||||
| [chardet 7](https://github.com/chardet/chardet) | 89 % | **5 ms** | 200 file/sec |
|
||||
| charset-normalizer | **97 %** | 8 ms | 125 file/sec |
|
||||
|
||||
| Package | 99th percentile | 95th percentile | 50th percentile |
|
||||
|-------------------------------------------------|:---------------:|:---------------:|:---------------:|
|
||||
| [chardet 7](https://github.com/chardet/chardet) | 32 ms | 17 ms | 1 ms |
|
||||
| charset-normalizer | 63 ms | 29 ms | 3 ms |
|
||||
|
||||
_updated as of Mars 2026 using CPython 3.12, and Chardet 7_
|
||||
|
||||
~Chardet's performance on larger file (1MB+) are very poor. Expect huge difference on large payload.~ No longer the case since Chardet 7.0+
|
||||
|
||||
> Stats are generated using 400+ files using default parameters. More details on used files, see GHA workflows.
|
||||
> And yes, these results might change at any time. The dataset can be updated to include more files.
|
||||
> The actual delays heavily depends on your CPU capabilities. The factors should remain the same.
|
||||
> Chardet claims on his documentation to have a greater accuracy than us based on the dataset they trained Chardet on(...)
|
||||
> Well, it's normal, the opposite would have been worrying. Whereas charset-normalizer don't train on anything, our solution
|
||||
> is based on a completely different algorithm, still heuristic through, it does not need weights across every encoding tables.
|
||||
|
||||
## ✨ Installation
|
||||
|
||||
Using pip:
|
||||
|
||||
```sh
|
||||
pip install charset-normalizer -U
|
||||
```
|
||||
|
||||
## 🚀 Basic Usage
|
||||
|
||||
### CLI
|
||||
This package comes with a CLI.
|
||||
|
||||
```
|
||||
usage: normalizer [-h] [-v] [-a] [-n] [-m] [-r] [-f] [-t THRESHOLD]
|
||||
file [file ...]
|
||||
|
||||
The Real First Universal Charset Detector. Discover originating encoding used
|
||||
on text file. Normalize text to unicode.
|
||||
|
||||
positional arguments:
|
||||
files File(s) to be analysed
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-v, --verbose Display complementary information about file if any.
|
||||
Stdout will contain logs about the detection process.
|
||||
-a, --with-alternative
|
||||
Output complementary possibilities if any. Top-level
|
||||
JSON WILL be a list.
|
||||
-n, --normalize Permit to normalize input file. If not set, program
|
||||
does not write anything.
|
||||
-m, --minimal Only output the charset detected to STDOUT. Disabling
|
||||
JSON output.
|
||||
-r, --replace Replace file when trying to normalize it instead of
|
||||
creating a new one.
|
||||
-f, --force Replace file without asking if you are sure, use this
|
||||
flag with caution.
|
||||
-t THRESHOLD, --threshold THRESHOLD
|
||||
Define a custom maximum amount of chaos allowed in
|
||||
decoded content. 0. <= chaos <= 1.
|
||||
--version Show version information and exit.
|
||||
```
|
||||
|
||||
```bash
|
||||
normalizer ./data/sample.1.fr.srt
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```bash
|
||||
python -m charset_normalizer ./data/sample.1.fr.srt
|
||||
```
|
||||
|
||||
🎉 Since version 1.4.0 the CLI produce easily usable stdout result in JSON format.
|
||||
|
||||
```json
|
||||
{
|
||||
"path": "/home/default/projects/charset_normalizer/data/sample.1.fr.srt",
|
||||
"encoding": "cp1252",
|
||||
"encoding_aliases": [
|
||||
"1252",
|
||||
"windows_1252"
|
||||
],
|
||||
"alternative_encodings": [
|
||||
"cp1254",
|
||||
"cp1256",
|
||||
"cp1258",
|
||||
"iso8859_14",
|
||||
"iso8859_15",
|
||||
"iso8859_16",
|
||||
"iso8859_3",
|
||||
"iso8859_9",
|
||||
"latin_1",
|
||||
"mbcs"
|
||||
],
|
||||
"language": "French",
|
||||
"alphabets": [
|
||||
"Basic Latin",
|
||||
"Latin-1 Supplement"
|
||||
],
|
||||
"has_sig_or_bom": false,
|
||||
"chaos": 0.149,
|
||||
"coherence": 97.152,
|
||||
"unicode_path": null,
|
||||
"is_preferred": true
|
||||
}
|
||||
```
|
||||
|
||||
### Python
|
||||
*Just print out normalized text*
|
||||
```python
|
||||
from charset_normalizer import from_path
|
||||
|
||||
results = from_path('./my_subtitle.srt')
|
||||
|
||||
print(str(results.best()))
|
||||
```
|
||||
|
||||
*Upgrade your code without effort*
|
||||
```python
|
||||
from charset_normalizer import detect
|
||||
```
|
||||
|
||||
The above code will behave the same as **chardet**. We ensure that we offer the best (reasonable) BC result possible.
|
||||
|
||||
See the docs for advanced usage : [readthedocs.io](https://charset-normalizer.readthedocs.io/en/latest/)
|
||||
|
||||
## 😇 Why
|
||||
|
||||
When I started using Chardet, I noticed that it was not suited to my expectations, and I wanted to propose a
|
||||
reliable alternative using a completely different method. Also! I never back down on a good challenge!
|
||||
|
||||
I **don't care** about the **originating charset** encoding, because **two different tables** can
|
||||
produce **two identical rendered string.**
|
||||
What I want is to get readable text, the best I can.
|
||||
|
||||
In a way, **I'm brute forcing text decoding.** How cool is that ? 😎
|
||||
|
||||
Don't confuse package **ftfy** with charset-normalizer or chardet. ftfy goal is to repair Unicode string whereas charset-normalizer to convert raw file in unknown encoding to unicode.
|
||||
|
||||
## 🍰 How
|
||||
|
||||
- Discard all charset encoding table that could not fit the binary content.
|
||||
- Measure noise, or the mess once opened (by chunks) with a corresponding charset encoding.
|
||||
- Extract matches with the lowest mess detected.
|
||||
- Additionally, we measure coherence / probe for a language.
|
||||
|
||||
**Wait a minute**, what is noise/mess and coherence according to **YOU ?**
|
||||
|
||||
*Noise :* I opened hundred of text files, **written by humans**, with the wrong encoding table. **I observed**, then
|
||||
**I established** some ground rules about **what is obvious** when **it seems like** a mess (aka. defining noise in rendered text).
|
||||
I know that my interpretation of what is noise is probably incomplete, feel free to contribute in order to
|
||||
improve or rewrite it.
|
||||
|
||||
*Coherence :* For each language there is on earth, we have computed ranked letter appearance occurrences (the best we can). So I thought
|
||||
that intel is worth something here. So I use those records against decoded text to check if I can detect intelligent design.
|
||||
|
||||
## ⚡ Known limitations
|
||||
|
||||
- Language detection is unreliable when text contains two or more languages sharing identical letters. (eg. HTML (english tags) + Turkish content (Sharing Latin characters))
|
||||
- Every charset detector heavily depends on sufficient content. In common cases, do not bother run detection on very tiny content.
|
||||
|
||||
## ⚠️ About Python EOLs
|
||||
|
||||
**If you are running:**
|
||||
|
||||
- Python >=2.7,<3.5: Unsupported
|
||||
- Python 3.5: charset-normalizer < 2.1
|
||||
- Python 3.6: charset-normalizer < 3.1
|
||||
|
||||
Upgrade your Python interpreter as soon as possible.
|
||||
|
||||
## 👤 Contributing
|
||||
|
||||
Contributions, issues and feature requests are very much welcome.<br />
|
||||
Feel free to check [issues page](https://github.com/ousret/charset_normalizer/issues) if you want to contribute.
|
||||
|
||||
## 📝 License
|
||||
|
||||
Copyright © [Ahmed TAHRI @Ousret](https://github.com/Ousret).<br />
|
||||
This project is [MIT](https://github.com/Ousret/charset_normalizer/blob/master/LICENSE) licensed.
|
||||
|
||||
Characters frequencies used in this project © 2012 [Denny Vrandečić](http://simia.net/letters/)
|
||||
|
||||
## 💼 For Enterprise
|
||||
|
||||
Professional support for charset-normalizer is available as part of the [Tidelift
|
||||
Subscription][1]. Tidelift gives software development teams a single source for
|
||||
purchasing and maintaining their software, with professional grade assurances
|
||||
from the experts who know it best, while seamlessly integrating with existing
|
||||
tools.
|
||||
|
||||
[1]: https://tidelift.com/subscription/pkg/pypi-charset-normalizer?utm_source=pypi-charset-normalizer&utm_medium=readme
|
||||
|
||||
[](https://www.bestpractices.dev/projects/7297)
|
||||
|
||||
# Changelog
|
||||
All notable changes to charset-normalizer will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
||||
|
||||
## [3.4.5](https://github.com/Ousret/charset_normalizer/compare/3.4.4...3.4.5) (2026-03-06)
|
||||
|
||||
### Changed
|
||||
- Update `setuptools` constraint to `setuptools>=68,<=82`.
|
||||
- Raised upper bound of mypyc for the optional pre-built extension to v1.19.1
|
||||
|
||||
### Fixed
|
||||
- Add explicit link to lib math in our optimized build. (#692)
|
||||
- Logger level not restored correctly for empty byte sequences. (#701)
|
||||
- TypeError when passing bytearray to from_bytes. (#703)
|
||||
|
||||
### Misc
|
||||
- Applied safe micro-optimizations in both our noise detector and language detector.
|
||||
- Rewrote the `query_yes_no` function (inside CLI) to avoid using ambiguous licensed code.
|
||||
- Added `cd.py` submodule into mypyc optional compilation to reduce further the performance impact.
|
||||
|
||||
## [3.4.4](https://github.com/Ousret/charset_normalizer/compare/3.4.2...3.4.4) (2025-10-13)
|
||||
|
||||
### Changed
|
||||
- Bound `setuptools` to a specific constraint `setuptools>=68,<=81`.
|
||||
- Raised upper bound of mypyc for the optional pre-built extension to v1.18.2
|
||||
|
||||
### Removed
|
||||
- `setuptools-scm` as a build dependency.
|
||||
|
||||
### Misc
|
||||
- Enforced hashes in `dev-requirements.txt` and created `ci-requirements.txt` for security purposes.
|
||||
- Additional pre-built wheels for riscv64, s390x, and armv7l architectures.
|
||||
- Restore ` multiple.intoto.jsonl` in GitHub releases in addition to individual attestation file per wheel.
|
||||
|
||||
## [3.4.3](https://github.com/Ousret/charset_normalizer/compare/3.4.2...3.4.3) (2025-08-09)
|
||||
|
||||
### Changed
|
||||
- mypy(c) is no longer a required dependency at build time if `CHARSET_NORMALIZER_USE_MYPYC` isn't set to `1`. (#595) (#583)
|
||||
- automatically lower confidence on small bytes samples that are not Unicode in `detect` output legacy function. (#391)
|
||||
|
||||
### Added
|
||||
- Custom build backend to overcome inability to mark mypy as an optional dependency in the build phase.
|
||||
- Support for Python 3.14
|
||||
|
||||
### Fixed
|
||||
- sdist archive contained useless directories.
|
||||
- automatically fallback on valid UTF-16 or UTF-32 even if the md says it's noisy. (#633)
|
||||
|
||||
### Misc
|
||||
- SBOM are automatically published to the relevant GitHub release to comply with regulatory changes.
|
||||
Each published wheel comes with its SBOM. We choose CycloneDX as the format.
|
||||
- Prebuilt optimized wheel are no longer distributed by default for CPython 3.7 due to a change in cibuildwheel.
|
||||
|
||||
## [3.4.2](https://github.com/Ousret/charset_normalizer/compare/3.4.1...3.4.2) (2025-05-02)
|
||||
|
||||
### Fixed
|
||||
- Addressed the DeprecationWarning in our CLI regarding `argparse.FileType` by backporting the target class into the package. (#591)
|
||||
- Improved the overall reliability of the detector with CJK Ideographs. (#605) (#587)
|
||||
|
||||
### Changed
|
||||
- Optional mypyc compilation upgraded to version 1.15 for Python >= 3.8
|
||||
|
||||
## [3.4.1](https://github.com/Ousret/charset_normalizer/compare/3.4.0...3.4.1) (2024-12-24)
|
||||
|
||||
### Changed
|
||||
- Project metadata are now stored using `pyproject.toml` instead of `setup.cfg` using setuptools as the build backend.
|
||||
- Enforce annotation delayed loading for a simpler and consistent types in the project.
|
||||
- Optional mypyc compilation upgraded to version 1.14 for Python >= 3.8
|
||||
|
||||
### Added
|
||||
- pre-commit configuration.
|
||||
- noxfile.
|
||||
|
||||
### Removed
|
||||
- `build-requirements.txt` as per using `pyproject.toml` native build configuration.
|
||||
- `bin/integration.py` and `bin/serve.py` in favor of downstream integration test (see noxfile).
|
||||
- `setup.cfg` in favor of `pyproject.toml` metadata configuration.
|
||||
- Unused `utils.range_scan` function.
|
||||
|
||||
### Fixed
|
||||
- Converting content to Unicode bytes may insert `utf_8` instead of preferred `utf-8`. (#572)
|
||||
- Deprecation warning "'count' is passed as positional argument" when converting to Unicode bytes on Python 3.13+
|
||||
|
||||
## [3.4.0](https://github.com/Ousret/charset_normalizer/compare/3.3.2...3.4.0) (2024-10-08)
|
||||
|
||||
### Added
|
||||
- Argument `--no-preemptive` in the CLI to prevent the detector to search for hints.
|
||||
- Support for Python 3.13 (#512)
|
||||
|
||||
### Fixed
|
||||
- Relax the TypeError exception thrown when trying to compare a CharsetMatch with anything else than a CharsetMatch.
|
||||
- Improved the general reliability of the detector based on user feedbacks. (#520) (#509) (#498) (#407) (#537)
|
||||
- Declared charset in content (preemptive detection) not changed when converting to utf-8 bytes. (#381)
|
||||
|
||||
## [3.3.2](https://github.com/Ousret/charset_normalizer/compare/3.3.1...3.3.2) (2023-10-31)
|
||||
|
||||
### Fixed
|
||||
- Unintentional memory usage regression when using large payload that match several encoding (#376)
|
||||
- Regression on some detection case showcased in the documentation (#371)
|
||||
|
||||
### Added
|
||||
- Noise (md) probe that identify malformed arabic representation due to the presence of letters in isolated form (credit to my wife)
|
||||
|
||||
## [3.3.1](https://github.com/Ousret/charset_normalizer/compare/3.3.0...3.3.1) (2023-10-22)
|
||||
|
||||
### Changed
|
||||
- Optional mypyc compilation upgraded to version 1.6.1 for Python >= 3.8
|
||||
- Improved the general detection reliability based on reports from the community
|
||||
|
||||
## [3.3.0](https://github.com/Ousret/charset_normalizer/compare/3.2.0...3.3.0) (2023-09-30)
|
||||
|
||||
### Added
|
||||
- Allow to execute the CLI (e.g. normalizer) through `python -m charset_normalizer.cli` or `python -m charset_normalizer`
|
||||
- Support for 9 forgotten encoding that are supported by Python but unlisted in `encoding.aliases` as they have no alias (#323)
|
||||
|
||||
### Removed
|
||||
- (internal) Redundant utils.is_ascii function and unused function is_private_use_only
|
||||
- (internal) charset_normalizer.assets is moved inside charset_normalizer.constant
|
||||
|
||||
### Changed
|
||||
- (internal) Unicode code blocks in constants are updated using the latest v15.0.0 definition to improve detection
|
||||
- Optional mypyc compilation upgraded to version 1.5.1 for Python >= 3.8
|
||||
|
||||
### Fixed
|
||||
- Unable to properly sort CharsetMatch when both chaos/noise and coherence were close due to an unreachable condition in \_\_lt\_\_ (#350)
|
||||
|
||||
## [3.2.0](https://github.com/Ousret/charset_normalizer/compare/3.1.0...3.2.0) (2023-06-07)
|
||||
|
||||
### Changed
|
||||
- Typehint for function `from_path` no longer enforce `PathLike` as its first argument
|
||||
- Minor improvement over the global detection reliability
|
||||
|
||||
### Added
|
||||
- Introduce function `is_binary` that relies on main capabilities, and optimized to detect binaries
|
||||
- Propagate `enable_fallback` argument throughout `from_bytes`, `from_path`, and `from_fp` that allow a deeper control over the detection (default True)
|
||||
- Explicit support for Python 3.12
|
||||
|
||||
### Fixed
|
||||
- Edge case detection failure where a file would contain 'very-long' camel cased word (Issue #289)
|
||||
|
||||
## [3.1.0](https://github.com/Ousret/charset_normalizer/compare/3.0.1...3.1.0) (2023-03-06)
|
||||
|
||||
### Added
|
||||
- Argument `should_rename_legacy` for legacy function `detect` and disregard any new arguments without errors (PR #262)
|
||||
|
||||
### Removed
|
||||
- Support for Python 3.6 (PR #260)
|
||||
|
||||
### Changed
|
||||
- Optional speedup provided by mypy/c 1.0.1
|
||||
|
||||
## [3.0.1](https://github.com/Ousret/charset_normalizer/compare/3.0.0...3.0.1) (2022-11-18)
|
||||
|
||||
### Fixed
|
||||
- Multi-bytes cutter/chunk generator did not always cut correctly (PR #233)
|
||||
|
||||
### Changed
|
||||
- Speedup provided by mypy/c 0.990 on Python >= 3.7
|
||||
|
||||
## [3.0.0](https://github.com/Ousret/charset_normalizer/compare/2.1.1...3.0.0) (2022-10-20)
|
||||
|
||||
### Added
|
||||
- Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results
|
||||
- Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES
|
||||
- Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio
|
||||
- `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl)
|
||||
|
||||
### Changed
|
||||
- Build with static metadata using 'build' frontend
|
||||
- Make the language detection stricter
|
||||
- Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1
|
||||
|
||||
### Fixed
|
||||
- CLI with opt --normalize fail when using full path for files
|
||||
- TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it
|
||||
- Sphinx warnings when generating the documentation
|
||||
|
||||
### Removed
|
||||
- Coherence detector no longer return 'Simple English' instead return 'English'
|
||||
- Coherence detector no longer return 'Classical Chinese' instead return 'Chinese'
|
||||
- Breaking: Method `first()` and `best()` from CharsetMatch
|
||||
- UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII)
|
||||
- Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches
|
||||
- Breaking: Top-level function `normalize`
|
||||
- Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch
|
||||
- Support for the backport `unicodedata2`
|
||||
|
||||
## [3.0.0rc1](https://github.com/Ousret/charset_normalizer/compare/3.0.0b2...3.0.0rc1) (2022-10-18)
|
||||
|
||||
### Added
|
||||
- Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results
|
||||
- Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES
|
||||
- Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio
|
||||
|
||||
### Changed
|
||||
- Build with static metadata using 'build' frontend
|
||||
- Make the language detection stricter
|
||||
|
||||
### Fixed
|
||||
- CLI with opt --normalize fail when using full path for files
|
||||
- TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it
|
||||
|
||||
### Removed
|
||||
- Coherence detector no longer return 'Simple English' instead return 'English'
|
||||
- Coherence detector no longer return 'Classical Chinese' instead return 'Chinese'
|
||||
|
||||
## [3.0.0b2](https://github.com/Ousret/charset_normalizer/compare/3.0.0b1...3.0.0b2) (2022-08-21)
|
||||
|
||||
### Added
|
||||
- `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl)
|
||||
|
||||
### Removed
|
||||
- Breaking: Method `first()` and `best()` from CharsetMatch
|
||||
- UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII)
|
||||
|
||||
### Fixed
|
||||
- Sphinx warnings when generating the documentation
|
||||
|
||||
## [3.0.0b1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...3.0.0b1) (2022-08-15)
|
||||
|
||||
### Changed
|
||||
- Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1
|
||||
|
||||
### Removed
|
||||
- Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches
|
||||
- Breaking: Top-level function `normalize`
|
||||
- Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch
|
||||
- Support for the backport `unicodedata2`
|
||||
|
||||
## [2.1.1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...2.1.1) (2022-08-19)
|
||||
|
||||
### Deprecated
|
||||
- Function `normalize` scheduled for removal in 3.0
|
||||
|
||||
### Changed
|
||||
- Removed useless call to decode in fn is_unprintable (#206)
|
||||
|
||||
### Fixed
|
||||
- Third-party library (i18n xgettext) crashing not recognizing utf_8 (PEP 263) with underscore from [@aleksandernovikov](https://github.com/aleksandernovikov) (#204)
|
||||
|
||||
## [2.1.0](https://github.com/Ousret/charset_normalizer/compare/2.0.12...2.1.0) (2022-06-19)
|
||||
|
||||
### Added
|
||||
- Output the Unicode table version when running the CLI with `--version` (PR #194)
|
||||
|
||||
### Changed
|
||||
- Re-use decoded buffer for single byte character sets from [@nijel](https://github.com/nijel) (PR #175)
|
||||
- Fixing some performance bottlenecks from [@deedy5](https://github.com/deedy5) (PR #183)
|
||||
|
||||
### Fixed
|
||||
- Workaround potential bug in cpython with Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space (PR #175)
|
||||
- CLI default threshold aligned with the API threshold from [@oleksandr-kuzmenko](https://github.com/oleksandr-kuzmenko) (PR #181)
|
||||
|
||||
### Removed
|
||||
- Support for Python 3.5 (PR #192)
|
||||
|
||||
### Deprecated
|
||||
- Use of backport unicodedata from `unicodedata2` as Python is quickly catching up, scheduled for removal in 3.0 (PR #194)
|
||||
|
||||
## [2.0.12](https://github.com/Ousret/charset_normalizer/compare/2.0.11...2.0.12) (2022-02-12)
|
||||
|
||||
### Fixed
|
||||
- ASCII miss-detection on rare cases (PR #170)
|
||||
|
||||
## [2.0.11](https://github.com/Ousret/charset_normalizer/compare/2.0.10...2.0.11) (2022-01-30)
|
||||
|
||||
### Added
|
||||
- Explicit support for Python 3.11 (PR #164)
|
||||
|
||||
### Changed
|
||||
- The logging behavior have been completely reviewed, now using only TRACE and DEBUG levels (PR #163 #165)
|
||||
|
||||
## [2.0.10](https://github.com/Ousret/charset_normalizer/compare/2.0.9...2.0.10) (2022-01-04)
|
||||
|
||||
### Fixed
|
||||
- Fallback match entries might lead to UnicodeDecodeError for large bytes sequence (PR #154)
|
||||
|
||||
### Changed
|
||||
- Skipping the language-detection (CD) on ASCII (PR #155)
|
||||
|
||||
## [2.0.9](https://github.com/Ousret/charset_normalizer/compare/2.0.8...2.0.9) (2021-12-03)
|
||||
|
||||
### Changed
|
||||
- Moderating the logging impact (since 2.0.8) for specific environments (PR #147)
|
||||
|
||||
### Fixed
|
||||
- Wrong logging level applied when setting kwarg `explain` to True (PR #146)
|
||||
|
||||
## [2.0.8](https://github.com/Ousret/charset_normalizer/compare/2.0.7...2.0.8) (2021-11-24)
|
||||
### Changed
|
||||
- Improvement over Vietnamese detection (PR #126)
|
||||
- MD improvement on trailing data and long foreign (non-pure latin) data (PR #124)
|
||||
- Efficiency improvements in cd/alphabet_languages from [@adbar](https://github.com/adbar) (PR #122)
|
||||
- call sum() without an intermediary list following PEP 289 recommendations from [@adbar](https://github.com/adbar) (PR #129)
|
||||
- Code style as refactored by Sourcery-AI (PR #131)
|
||||
- Minor adjustment on the MD around european words (PR #133)
|
||||
- Remove and replace SRTs from assets / tests (PR #139)
|
||||
- Initialize the library logger with a `NullHandler` by default from [@nmaynes](https://github.com/nmaynes) (PR #135)
|
||||
- Setting kwarg `explain` to True will add provisionally (bounded to function lifespan) a specific stream handler (PR #135)
|
||||
|
||||
### Fixed
|
||||
- Fix large (misleading) sequence giving UnicodeDecodeError (PR #137)
|
||||
- Avoid using too insignificant chunk (PR #137)
|
||||
|
||||
### Added
|
||||
- Add and expose function `set_logging_handler` to configure a specific StreamHandler from [@nmaynes](https://github.com/nmaynes) (PR #135)
|
||||
- Add `CHANGELOG.md` entries, format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) (PR #141)
|
||||
|
||||
## [2.0.7](https://github.com/Ousret/charset_normalizer/compare/2.0.6...2.0.7) (2021-10-11)
|
||||
### Added
|
||||
- Add support for Kazakh (Cyrillic) language detection (PR #109)
|
||||
|
||||
### Changed
|
||||
- Further, improve inferring the language from a given single-byte code page (PR #112)
|
||||
- Vainly trying to leverage PEP263 when PEP3120 is not supported (PR #116)
|
||||
- Refactoring for potential performance improvements in loops from [@adbar](https://github.com/adbar) (PR #113)
|
||||
- Various detection improvement (MD+CD) (PR #117)
|
||||
|
||||
### Removed
|
||||
- Remove redundant logging entry about detected language(s) (PR #115)
|
||||
|
||||
### Fixed
|
||||
- Fix a minor inconsistency between Python 3.5 and other versions regarding language detection (PR #117 #102)
|
||||
|
||||
## [2.0.6](https://github.com/Ousret/charset_normalizer/compare/2.0.5...2.0.6) (2021-09-18)
|
||||
### Fixed
|
||||
- Unforeseen regression with the loss of the backward-compatibility with some older minor of Python 3.5.x (PR #100)
|
||||
- Fix CLI crash when using --minimal output in certain cases (PR #103)
|
||||
|
||||
### Changed
|
||||
- Minor improvement to the detection efficiency (less than 1%) (PR #106 #101)
|
||||
|
||||
## [2.0.5](https://github.com/Ousret/charset_normalizer/compare/2.0.4...2.0.5) (2021-09-14)
|
||||
### Changed
|
||||
- The project now comply with: flake8, mypy, isort and black to ensure a better overall quality (PR #81)
|
||||
- The BC-support with v1.x was improved, the old staticmethods are restored (PR #82)
|
||||
- The Unicode detection is slightly improved (PR #93)
|
||||
- Add syntax sugar \_\_bool\_\_ for results CharsetMatches list-container (PR #91)
|
||||
|
||||
### Removed
|
||||
- The project no longer raise warning on tiny content given for detection, will be simply logged as warning instead (PR #92)
|
||||
|
||||
### Fixed
|
||||
- In some rare case, the chunks extractor could cut in the middle of a multi-byte character and could mislead the mess detection (PR #95)
|
||||
- Some rare 'space' characters could trip up the UnprintablePlugin/Mess detection (PR #96)
|
||||
- The MANIFEST.in was not exhaustive (PR #78)
|
||||
|
||||
## [2.0.4](https://github.com/Ousret/charset_normalizer/compare/2.0.3...2.0.4) (2021-07-30)
|
||||
### Fixed
|
||||
- The CLI no longer raise an unexpected exception when no encoding has been found (PR #70)
|
||||
- Fix accessing the 'alphabets' property when the payload contains surrogate characters (PR #68)
|
||||
- The logger could mislead (explain=True) on detected languages and the impact of one MBCS match (PR #72)
|
||||
- Submatch factoring could be wrong in rare edge cases (PR #72)
|
||||
- Multiple files given to the CLI were ignored when publishing results to STDOUT. (After the first path) (PR #72)
|
||||
- Fix line endings from CRLF to LF for certain project files (PR #67)
|
||||
|
||||
### Changed
|
||||
- Adjust the MD to lower the sensitivity, thus improving the global detection reliability (PR #69 #76)
|
||||
- Allow fallback on specified encoding if any (PR #71)
|
||||
|
||||
## [2.0.3](https://github.com/Ousret/charset_normalizer/compare/2.0.2...2.0.3) (2021-07-16)
|
||||
### Changed
|
||||
- Part of the detection mechanism has been improved to be less sensitive, resulting in more accurate detection results. Especially ASCII. (PR #63)
|
||||
- According to the community wishes, the detection will fall back on ASCII or UTF-8 in a last-resort case. (PR #64)
|
||||
|
||||
## [2.0.2](https://github.com/Ousret/charset_normalizer/compare/2.0.1...2.0.2) (2021-07-15)
|
||||
### Fixed
|
||||
- Empty/Too small JSON payload miss-detection fixed. Report from [@tseaver](https://github.com/tseaver) (PR #59)
|
||||
|
||||
### Changed
|
||||
- Don't inject unicodedata2 into sys.modules from [@akx](https://github.com/akx) (PR #57)
|
||||
|
||||
## [2.0.1](https://github.com/Ousret/charset_normalizer/compare/2.0.0...2.0.1) (2021-07-13)
|
||||
### Fixed
|
||||
- Make it work where there isn't a filesystem available, dropping assets frequencies.json. Report from [@sethmlarson](https://github.com/sethmlarson). (PR #55)
|
||||
- Using explain=False permanently disable the verbose output in the current runtime (PR #47)
|
||||
- One log entry (language target preemptive) was not show in logs when using explain=True (PR #47)
|
||||
- Fix undesired exception (ValueError) on getitem of instance CharsetMatches (PR #52)
|
||||
|
||||
### Changed
|
||||
- Public function normalize default args values were not aligned with from_bytes (PR #53)
|
||||
|
||||
### Added
|
||||
- You may now use charset aliases in cp_isolation and cp_exclusion arguments (PR #47)
|
||||
|
||||
## [2.0.0](https://github.com/Ousret/charset_normalizer/compare/1.4.1...2.0.0) (2021-07-02)
|
||||
### Changed
|
||||
- 4x to 5 times faster than the previous 1.4.0 release. At least 2x faster than Chardet.
|
||||
- Accent has been made on UTF-8 detection, should perform rather instantaneous.
|
||||
- The backward compatibility with Chardet has been greatly improved. The legacy detect function returns an identical charset name whenever possible.
|
||||
- The detection mechanism has been slightly improved, now Turkish content is detected correctly (most of the time)
|
||||
- The program has been rewritten to ease the readability and maintainability. (+Using static typing)+
|
||||
- utf_7 detection has been reinstated.
|
||||
|
||||
### Removed
|
||||
- This package no longer require anything when used with Python 3.5 (Dropped cached_property)
|
||||
- Removed support for these languages: Catalan, Esperanto, Kazakh, Baque, Volapük, Azeri, Galician, Nynorsk, Macedonian, and Serbocroatian.
|
||||
- The exception hook on UnicodeDecodeError has been removed.
|
||||
|
||||
### Deprecated
|
||||
- Methods coherence_non_latin, w_counter, chaos_secondary_pass of the class CharsetMatch are now deprecated and scheduled for removal in v3.0
|
||||
|
||||
### Fixed
|
||||
- The CLI output used the relative path of the file(s). Should be absolute.
|
||||
|
||||
## [1.4.1](https://github.com/Ousret/charset_normalizer/compare/1.4.0...1.4.1) (2021-05-28)
|
||||
### Fixed
|
||||
- Logger configuration/usage no longer conflict with others (PR #44)
|
||||
|
||||
## [1.4.0](https://github.com/Ousret/charset_normalizer/compare/1.3.9...1.4.0) (2021-05-21)
|
||||
### Removed
|
||||
- Using standard logging instead of using the package loguru.
|
||||
- Dropping nose test framework in favor of the maintained pytest.
|
||||
- Choose to not use dragonmapper package to help with gibberish Chinese/CJK text.
|
||||
- Require cached_property only for Python 3.5 due to constraint. Dropping for every other interpreter version.
|
||||
- Stop support for UTF-7 that does not contain a SIG.
|
||||
- Dropping PrettyTable, replaced with pure JSON output in CLI.
|
||||
|
||||
### Fixed
|
||||
- BOM marker in a CharsetNormalizerMatch instance could be False in rare cases even if obviously present. Due to the sub-match factoring process.
|
||||
- Not searching properly for the BOM when trying utf32/16 parent codec.
|
||||
|
||||
### Changed
|
||||
- Improving the package final size by compressing frequencies.json.
|
||||
- Huge improvement over the larges payload.
|
||||
|
||||
### Added
|
||||
- CLI now produces JSON consumable output.
|
||||
- Return ASCII if given sequences fit. Given reasonable confidence.
|
||||
|
||||
## [1.3.9](https://github.com/Ousret/charset_normalizer/compare/1.3.8...1.3.9) (2021-05-13)
|
||||
|
||||
### Fixed
|
||||
- In some very rare cases, you may end up getting encode/decode errors due to a bad bytes payload (PR #40)
|
||||
|
||||
## [1.3.8](https://github.com/Ousret/charset_normalizer/compare/1.3.7...1.3.8) (2021-05-12)
|
||||
|
||||
### Fixed
|
||||
- Empty given payload for detection may cause an exception if trying to access the `alphabets` property. (PR #39)
|
||||
|
||||
## [1.3.7](https://github.com/Ousret/charset_normalizer/compare/1.3.6...1.3.7) (2021-05-12)
|
||||
|
||||
### Fixed
|
||||
- The legacy detect function should return UTF-8-SIG if sig is present in the payload. (PR #38)
|
||||
|
||||
## [1.3.6](https://github.com/Ousret/charset_normalizer/compare/1.3.5...1.3.6) (2021-02-09)
|
||||
|
||||
### Changed
|
||||
- Amend the previous release to allow prettytable 2.0 (PR #35)
|
||||
|
||||
## [1.3.5](https://github.com/Ousret/charset_normalizer/compare/1.3.4...1.3.5) (2021-02-08)
|
||||
|
||||
### Fixed
|
||||
- Fix error while using the package with a python pre-release interpreter (PR #33)
|
||||
|
||||
### Changed
|
||||
- Dependencies refactoring, constraints revised.
|
||||
|
||||
### Added
|
||||
- Add python 3.9 and 3.10 to the supported interpreters
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 TAHRI Ahmed R.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -0,0 +1,36 @@
|
||||
../../../bin/normalizer,sha256=4nbFeY8b_phVKfTPTKpxz9aCKXid4J4_hjLlDDqTp7k,266
|
||||
81d243bd2c585b0f4821__mypyc.cpython-312-x86_64-linux-gnu.so,sha256=ol-F_r6abrxGpRlWxSI7_945wGWttVZG8eyeELYEYIg,400712
|
||||
charset_normalizer-3.4.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
charset_normalizer-3.4.5.dist-info/METADATA,sha256=EzjXveXyFIsZbPId0RGpMGQjUc-R5UKvA2WAtej3jDs,39168
|
||||
charset_normalizer-3.4.5.dist-info/RECORD,,
|
||||
charset_normalizer-3.4.5.dist-info/WHEEL,sha256=ND-iKo1q8cWcsXAKXBKvQvxAFH_GQvThPpjreAAS6vI,190
|
||||
charset_normalizer-3.4.5.dist-info/entry_points.txt,sha256=ADSTKrkXZ3hhdOVFi6DcUEHQRS0xfxDIE_pEz4wLIXA,65
|
||||
charset_normalizer-3.4.5.dist-info/licenses/LICENSE,sha256=bQ1Bv-FwrGx9wkjJpj4lTQ-0WmDVCoJX0K-SxuJJuIc,1071
|
||||
charset_normalizer-3.4.5.dist-info/top_level.txt,sha256=c_vZbitqecT2GfK3zdxSTLCn8C-6pGnHQY5o_5Y32M0,47
|
||||
charset_normalizer/__init__.py,sha256=OKRxRv2Zhnqk00tqkN0c1BtJjm165fWXLydE52IKuHc,1590
|
||||
charset_normalizer/__main__.py,sha256=yzYxMR-IhKRHYwcSlavEv8oGdwxsR89mr2X09qXGdps,109
|
||||
charset_normalizer/__pycache__/__init__.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/__main__.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/api.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/cd.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/constant.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/legacy.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/md.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/models.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/utils.cpython-312.pyc,,
|
||||
charset_normalizer/__pycache__/version.cpython-312.pyc,,
|
||||
charset_normalizer/api.py,sha256=TRSCafKaFBsVb58qldkSVKjMaVLRoO2K50HlnoVJS8k,22719
|
||||
charset_normalizer/cd.cpython-312-x86_64-linux-gnu.so,sha256=KRs9UiPDxL5LmvMpsgOxjVpbPowT3L9yjkDmyP34GPI,15912
|
||||
charset_normalizer/cd.py,sha256=4uPHsbkK1JMivEyl7IQX_sc1hmvos1GWClziah0eUmE,13639
|
||||
charset_normalizer/cli/__init__.py,sha256=D8I86lFk2-py45JvqxniTirSj_sFyE6sjaY_0-G1shc,136
|
||||
charset_normalizer/cli/__main__.py,sha256=YKJSlTzvQc5-bV2GiZa0YuENaQqBqu2o9-GfKeXpcv8,11946
|
||||
charset_normalizer/cli/__pycache__/__init__.cpython-312.pyc,,
|
||||
charset_normalizer/cli/__pycache__/__main__.cpython-312.pyc,,
|
||||
charset_normalizer/constant.py,sha256=zxhEL0g0eGpeYSRzjDV712fChwyvzu2buf3_iFm5erU,43552
|
||||
charset_normalizer/legacy.py,sha256=sYBzSpzsRrg_wF4LP536pG64BItw7Tqtc3SMQAHvFLM,2731
|
||||
charset_normalizer/md.cpython-312-x86_64-linux-gnu.so,sha256=VaxqqP3RbmelZISMwRb2ahdtqNdbXBgN56dIZuZ-tfw,15912
|
||||
charset_normalizer/md.py,sha256=8GHs8v9PRvpQptX1WMu9i-m8O1Iz8CMVpBCz3bVhfxk,23023
|
||||
charset_normalizer/models.py,sha256=QSS3cmSdh4J4URyNOhyBdgDEbrHIY33eCuP-CNAjtGI,12320
|
||||
charset_normalizer/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
charset_normalizer/utils.py,sha256=Gnwvl3uLekr-I4ynGDT0BtCiDfh9QANfF6UWLWKjeYw,12240
|
||||
charset_normalizer/version.py,sha256=cz8kM4B70kP_YEpiel3hbt_h-D9zYjlRRcwxzSBc378,115
|
||||
@@ -0,0 +1,7 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: setuptools (82.0.0)
|
||||
Root-Is-Purelib: false
|
||||
Tag: cp312-cp312-manylinux_2_17_x86_64
|
||||
Tag: cp312-cp312-manylinux2014_x86_64
|
||||
Tag: cp312-cp312-manylinux_2_28_x86_64
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
[console_scripts]
|
||||
normalizer = charset_normalizer.cli:cli_detect
|
||||
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 TAHRI Ahmed R.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -0,0 +1,2 @@
|
||||
81d243bd2c585b0f4821__mypyc
|
||||
charset_normalizer
|
||||
@@ -0,0 +1,48 @@
|
||||
"""
|
||||
Charset-Normalizer
|
||||
~~~~~~~~~~~~~~
|
||||
The Real First Universal Charset Detector.
|
||||
A library that helps you read text from an unknown charset encoding.
|
||||
Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
|
||||
All IANA character set names for which the Python core library provides codecs are supported.
|
||||
|
||||
Basic usage:
|
||||
>>> from charset_normalizer import from_bytes
|
||||
>>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
|
||||
>>> best_guess = results.best()
|
||||
>>> str(best_guess)
|
||||
'Bсеки човек има право на образование. Oбразованието!'
|
||||
|
||||
Others methods and usages are available - see the full documentation
|
||||
at <https://github.com/Ousret/charset_normalizer>.
|
||||
:copyright: (c) 2021 by Ahmed TAHRI
|
||||
:license: MIT, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
from .api import from_bytes, from_fp, from_path, is_binary
|
||||
from .legacy import detect
|
||||
from .models import CharsetMatch, CharsetMatches
|
||||
from .utils import set_logging_handler
|
||||
from .version import VERSION, __version__
|
||||
|
||||
__all__ = (
|
||||
"from_fp",
|
||||
"from_path",
|
||||
"from_bytes",
|
||||
"is_binary",
|
||||
"detect",
|
||||
"CharsetMatch",
|
||||
"CharsetMatches",
|
||||
"__version__",
|
||||
"VERSION",
|
||||
"set_logging_handler",
|
||||
)
|
||||
|
||||
# Attach a NullHandler to the top level logger by default
|
||||
# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
|
||||
|
||||
logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
|
||||
@@ -0,0 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from .cli import cli_detect
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli_detect()
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
671
venv/lib/python3.12/site-packages/charset_normalizer/api.py
Normal file
671
venv/lib/python3.12/site-packages/charset_normalizer/api.py
Normal file
@@ -0,0 +1,671 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from os import PathLike
|
||||
from typing import BinaryIO
|
||||
|
||||
from .cd import (
|
||||
coherence_ratio,
|
||||
encoding_languages,
|
||||
mb_encoding_languages,
|
||||
merge_coherence_ratios,
|
||||
)
|
||||
from .constant import (
|
||||
IANA_SUPPORTED,
|
||||
IANA_SUPPORTED_SIMILAR,
|
||||
TOO_BIG_SEQUENCE,
|
||||
TOO_SMALL_SEQUENCE,
|
||||
TRACE,
|
||||
)
|
||||
from .md import mess_ratio
|
||||
from .models import CharsetMatch, CharsetMatches
|
||||
from .utils import (
|
||||
any_specified_encoding,
|
||||
cut_sequence_chunks,
|
||||
iana_name,
|
||||
identify_sig_or_bom,
|
||||
is_multi_byte_encoding,
|
||||
should_strip_sig_or_bom,
|
||||
)
|
||||
|
||||
logger = logging.getLogger("charset_normalizer")
|
||||
explain_handler = logging.StreamHandler()
|
||||
explain_handler.setFormatter(
|
||||
logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
|
||||
)
|
||||
|
||||
|
||||
def from_bytes(
|
||||
sequences: bytes | bytearray,
|
||||
steps: int = 5,
|
||||
chunk_size: int = 512,
|
||||
threshold: float = 0.2,
|
||||
cp_isolation: list[str] | None = None,
|
||||
cp_exclusion: list[str] | None = None,
|
||||
preemptive_behaviour: bool = True,
|
||||
explain: bool = False,
|
||||
language_threshold: float = 0.1,
|
||||
enable_fallback: bool = True,
|
||||
) -> CharsetMatches:
|
||||
"""
|
||||
Given a raw bytes sequence, return the best possibles charset usable to render str objects.
|
||||
If there is no results, it is a strong indicator that the source is binary/not text.
|
||||
By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence.
|
||||
And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
|
||||
|
||||
The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
|
||||
but never take it for granted. Can improve the performance.
|
||||
|
||||
You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
|
||||
purpose.
|
||||
|
||||
This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
|
||||
By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
|
||||
toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
|
||||
Custom logging format and handler can be set manually.
|
||||
"""
|
||||
|
||||
if not isinstance(sequences, (bytearray, bytes)):
|
||||
raise TypeError(
|
||||
"Expected object of type bytes or bytearray, got: {}".format(
|
||||
type(sequences)
|
||||
)
|
||||
)
|
||||
|
||||
if explain:
|
||||
previous_logger_level: int = logger.level
|
||||
logger.addHandler(explain_handler)
|
||||
logger.setLevel(TRACE)
|
||||
|
||||
length: int = len(sequences)
|
||||
|
||||
if length == 0:
|
||||
logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
|
||||
if explain: # Defensive: ensure exit path clean handler
|
||||
logger.removeHandler(explain_handler)
|
||||
logger.setLevel(previous_logger_level)
|
||||
return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
|
||||
|
||||
if cp_isolation is not None:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"cp_isolation is set. use this flag for debugging purpose. "
|
||||
"limited list of encoding allowed : %s.",
|
||||
", ".join(cp_isolation),
|
||||
)
|
||||
cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
|
||||
else:
|
||||
cp_isolation = []
|
||||
|
||||
if cp_exclusion is not None:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"cp_exclusion is set. use this flag for debugging purpose. "
|
||||
"limited list of encoding excluded : %s.",
|
||||
", ".join(cp_exclusion),
|
||||
)
|
||||
cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
|
||||
else:
|
||||
cp_exclusion = []
|
||||
|
||||
if length <= (chunk_size * steps):
|
||||
logger.log(
|
||||
TRACE,
|
||||
"override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
|
||||
steps,
|
||||
chunk_size,
|
||||
length,
|
||||
)
|
||||
steps = 1
|
||||
chunk_size = length
|
||||
|
||||
if steps > 1 and length / steps < chunk_size:
|
||||
chunk_size = int(length / steps)
|
||||
|
||||
is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE
|
||||
is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE
|
||||
|
||||
if is_too_small_sequence:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
|
||||
length
|
||||
),
|
||||
)
|
||||
elif is_too_large_sequence:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
|
||||
length
|
||||
),
|
||||
)
|
||||
|
||||
prioritized_encodings: list[str] = []
|
||||
|
||||
specified_encoding: str | None = (
|
||||
any_specified_encoding(sequences) if preemptive_behaviour else None
|
||||
)
|
||||
|
||||
if specified_encoding is not None:
|
||||
prioritized_encodings.append(specified_encoding)
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Detected declarative mark in sequence. Priority +1 given for %s.",
|
||||
specified_encoding,
|
||||
)
|
||||
|
||||
tested: set[str] = set()
|
||||
tested_but_hard_failure: list[str] = []
|
||||
tested_but_soft_failure: list[str] = []
|
||||
soft_failure_skip: set[str] = set()
|
||||
|
||||
fallback_ascii: CharsetMatch | None = None
|
||||
fallback_u8: CharsetMatch | None = None
|
||||
fallback_specified: CharsetMatch | None = None
|
||||
|
||||
results: CharsetMatches = CharsetMatches()
|
||||
|
||||
early_stop_results: CharsetMatches = CharsetMatches()
|
||||
|
||||
sig_encoding, sig_payload = identify_sig_or_bom(sequences)
|
||||
|
||||
if sig_encoding is not None:
|
||||
prioritized_encodings.append(sig_encoding)
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
|
||||
len(sig_payload),
|
||||
sig_encoding,
|
||||
)
|
||||
|
||||
prioritized_encodings.append("ascii")
|
||||
|
||||
if "utf_8" not in prioritized_encodings:
|
||||
prioritized_encodings.append("utf_8")
|
||||
|
||||
for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
|
||||
if cp_isolation and encoding_iana not in cp_isolation:
|
||||
continue
|
||||
|
||||
if cp_exclusion and encoding_iana in cp_exclusion:
|
||||
continue
|
||||
|
||||
if encoding_iana in tested:
|
||||
continue
|
||||
|
||||
tested.add(encoding_iana)
|
||||
|
||||
decoded_payload: str | None = None
|
||||
bom_or_sig_available: bool = sig_encoding == encoding_iana
|
||||
strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom(
|
||||
encoding_iana
|
||||
)
|
||||
|
||||
if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
|
||||
encoding_iana,
|
||||
)
|
||||
continue
|
||||
if encoding_iana in {"utf_7"} and not bom_or_sig_available:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.",
|
||||
encoding_iana,
|
||||
)
|
||||
continue
|
||||
|
||||
# Skip encodings similar to ones that already soft-failed (high mess ratio).
|
||||
# Checked BEFORE the expensive decode attempt.
|
||||
if encoding_iana in soft_failure_skip:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"%s is deemed too similar to a code page that was already considered unsuited. Continuing!",
|
||||
encoding_iana,
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana)
|
||||
except (ModuleNotFoundError, ImportError):
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Encoding %s does not provide an IncrementalDecoder",
|
||||
encoding_iana,
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
if is_too_large_sequence and is_multi_byte_decoder is False:
|
||||
str(
|
||||
(
|
||||
sequences[: int(50e4)]
|
||||
if strip_sig_or_bom is False
|
||||
else sequences[len(sig_payload) : int(50e4)]
|
||||
),
|
||||
encoding=encoding_iana,
|
||||
)
|
||||
else:
|
||||
decoded_payload = str(
|
||||
(
|
||||
sequences
|
||||
if strip_sig_or_bom is False
|
||||
else sequences[len(sig_payload) :]
|
||||
),
|
||||
encoding=encoding_iana,
|
||||
)
|
||||
except (UnicodeDecodeError, LookupError) as e:
|
||||
if not isinstance(e, LookupError):
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Code page %s does not fit given bytes sequence at ALL. %s",
|
||||
encoding_iana,
|
||||
str(e),
|
||||
)
|
||||
tested_but_hard_failure.append(encoding_iana)
|
||||
continue
|
||||
|
||||
r_ = range(
|
||||
0 if not bom_or_sig_available else len(sig_payload),
|
||||
length,
|
||||
int(length / steps),
|
||||
)
|
||||
|
||||
multi_byte_bonus: bool = (
|
||||
is_multi_byte_decoder
|
||||
and decoded_payload is not None
|
||||
and len(decoded_payload) < length
|
||||
)
|
||||
|
||||
if multi_byte_bonus:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Code page %s is a multi byte encoding table and it appear that at least one character "
|
||||
"was encoded using n-bytes.",
|
||||
encoding_iana,
|
||||
)
|
||||
|
||||
max_chunk_gave_up: int = int(len(r_) / 4)
|
||||
|
||||
max_chunk_gave_up = max(max_chunk_gave_up, 2)
|
||||
early_stop_count: int = 0
|
||||
lazy_str_hard_failure = False
|
||||
|
||||
md_chunks: list[str] = []
|
||||
md_ratios = []
|
||||
|
||||
try:
|
||||
for chunk in cut_sequence_chunks(
|
||||
sequences,
|
||||
encoding_iana,
|
||||
r_,
|
||||
chunk_size,
|
||||
bom_or_sig_available,
|
||||
strip_sig_or_bom,
|
||||
sig_payload,
|
||||
is_multi_byte_decoder,
|
||||
decoded_payload,
|
||||
):
|
||||
md_chunks.append(chunk)
|
||||
|
||||
md_ratios.append(
|
||||
mess_ratio(
|
||||
chunk,
|
||||
threshold,
|
||||
explain is True and 1 <= len(cp_isolation) <= 2,
|
||||
)
|
||||
)
|
||||
|
||||
if md_ratios[-1] >= threshold:
|
||||
early_stop_count += 1
|
||||
|
||||
if (early_stop_count >= max_chunk_gave_up) or (
|
||||
bom_or_sig_available and strip_sig_or_bom is False
|
||||
):
|
||||
break
|
||||
except (
|
||||
UnicodeDecodeError
|
||||
) as e: # Lazy str loading may have missed something there
|
||||
logger.log(
|
||||
TRACE,
|
||||
"LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
|
||||
encoding_iana,
|
||||
str(e),
|
||||
)
|
||||
early_stop_count = max_chunk_gave_up
|
||||
lazy_str_hard_failure = True
|
||||
|
||||
# We might want to check the sequence again with the whole content
|
||||
# Only if initial MD tests passes
|
||||
if (
|
||||
not lazy_str_hard_failure
|
||||
and is_too_large_sequence
|
||||
and not is_multi_byte_decoder
|
||||
):
|
||||
try:
|
||||
sequences[int(50e3) :].decode(encoding_iana, errors="strict")
|
||||
except UnicodeDecodeError as e:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
|
||||
encoding_iana,
|
||||
str(e),
|
||||
)
|
||||
tested_but_hard_failure.append(encoding_iana)
|
||||
continue
|
||||
|
||||
mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
|
||||
if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
|
||||
tested_but_soft_failure.append(encoding_iana)
|
||||
if encoding_iana in IANA_SUPPORTED_SIMILAR:
|
||||
soft_failure_skip.update(IANA_SUPPORTED_SIMILAR[encoding_iana])
|
||||
logger.log(
|
||||
TRACE,
|
||||
"%s was excluded because of initial chaos probing. Gave up %i time(s). "
|
||||
"Computed mean chaos is %f %%.",
|
||||
encoding_iana,
|
||||
early_stop_count,
|
||||
round(mean_mess_ratio * 100, ndigits=3),
|
||||
)
|
||||
# Preparing those fallbacks in case we got nothing.
|
||||
if (
|
||||
enable_fallback
|
||||
and encoding_iana
|
||||
in ["ascii", "utf_8", specified_encoding, "utf_16", "utf_32"]
|
||||
and not lazy_str_hard_failure
|
||||
):
|
||||
fallback_entry = CharsetMatch(
|
||||
sequences,
|
||||
encoding_iana,
|
||||
threshold,
|
||||
bom_or_sig_available,
|
||||
[],
|
||||
decoded_payload,
|
||||
preemptive_declaration=specified_encoding,
|
||||
)
|
||||
if encoding_iana == specified_encoding:
|
||||
fallback_specified = fallback_entry
|
||||
elif encoding_iana == "ascii":
|
||||
fallback_ascii = fallback_entry
|
||||
else:
|
||||
fallback_u8 = fallback_entry
|
||||
continue
|
||||
|
||||
logger.log(
|
||||
TRACE,
|
||||
"%s passed initial chaos probing. Mean measured chaos is %f %%",
|
||||
encoding_iana,
|
||||
round(mean_mess_ratio * 100, ndigits=3),
|
||||
)
|
||||
|
||||
if not is_multi_byte_decoder:
|
||||
target_languages: list[str] = encoding_languages(encoding_iana)
|
||||
else:
|
||||
target_languages = mb_encoding_languages(encoding_iana)
|
||||
|
||||
if target_languages:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"{} should target any language(s) of {}".format(
|
||||
encoding_iana, str(target_languages)
|
||||
),
|
||||
)
|
||||
|
||||
cd_ratios = []
|
||||
|
||||
# We shall skip the CD when its about ASCII
|
||||
# Most of the time its not relevant to run "language-detection" on it.
|
||||
if encoding_iana != "ascii":
|
||||
for chunk in md_chunks:
|
||||
chunk_languages = coherence_ratio(
|
||||
chunk,
|
||||
language_threshold,
|
||||
",".join(target_languages) if target_languages else None,
|
||||
)
|
||||
|
||||
cd_ratios.append(chunk_languages)
|
||||
|
||||
cd_ratios_merged = merge_coherence_ratios(cd_ratios)
|
||||
|
||||
if cd_ratios_merged:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"We detected language {} using {}".format(
|
||||
cd_ratios_merged, encoding_iana
|
||||
),
|
||||
)
|
||||
|
||||
current_match = CharsetMatch(
|
||||
sequences,
|
||||
encoding_iana,
|
||||
mean_mess_ratio,
|
||||
bom_or_sig_available,
|
||||
cd_ratios_merged,
|
||||
(
|
||||
decoded_payload
|
||||
if (
|
||||
is_too_large_sequence is False
|
||||
or encoding_iana in [specified_encoding, "ascii", "utf_8"]
|
||||
)
|
||||
else None
|
||||
),
|
||||
preemptive_declaration=specified_encoding,
|
||||
)
|
||||
|
||||
results.append(current_match)
|
||||
|
||||
if (
|
||||
encoding_iana in [specified_encoding, "ascii", "utf_8"]
|
||||
and mean_mess_ratio < 0.1
|
||||
):
|
||||
# If md says nothing to worry about, then... stop immediately!
|
||||
if mean_mess_ratio == 0.0:
|
||||
logger.debug(
|
||||
"Encoding detection: %s is most likely the one.",
|
||||
current_match.encoding,
|
||||
)
|
||||
if explain: # Defensive: ensure exit path clean handler
|
||||
logger.removeHandler(explain_handler)
|
||||
logger.setLevel(previous_logger_level)
|
||||
return CharsetMatches([current_match])
|
||||
|
||||
early_stop_results.append(current_match)
|
||||
|
||||
if (
|
||||
len(early_stop_results)
|
||||
and (specified_encoding is None or specified_encoding in tested)
|
||||
and "ascii" in tested
|
||||
and "utf_8" in tested
|
||||
):
|
||||
probable_result: CharsetMatch = early_stop_results.best() # type: ignore[assignment]
|
||||
logger.debug(
|
||||
"Encoding detection: %s is most likely the one.",
|
||||
probable_result.encoding,
|
||||
)
|
||||
if explain: # Defensive: ensure exit path clean handler
|
||||
logger.removeHandler(explain_handler)
|
||||
logger.setLevel(previous_logger_level)
|
||||
|
||||
return CharsetMatches([probable_result])
|
||||
|
||||
if encoding_iana == sig_encoding:
|
||||
logger.debug(
|
||||
"Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
|
||||
"the beginning of the sequence.",
|
||||
encoding_iana,
|
||||
)
|
||||
if explain: # Defensive: ensure exit path clean handler
|
||||
logger.removeHandler(explain_handler)
|
||||
logger.setLevel(previous_logger_level)
|
||||
return CharsetMatches([results[encoding_iana]])
|
||||
|
||||
if len(results) == 0:
|
||||
if fallback_u8 or fallback_ascii or fallback_specified:
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
|
||||
)
|
||||
|
||||
if fallback_specified:
|
||||
logger.debug(
|
||||
"Encoding detection: %s will be used as a fallback match",
|
||||
fallback_specified.encoding,
|
||||
)
|
||||
results.append(fallback_specified)
|
||||
elif (
|
||||
(fallback_u8 and fallback_ascii is None)
|
||||
or (
|
||||
fallback_u8
|
||||
and fallback_ascii
|
||||
and fallback_u8.fingerprint != fallback_ascii.fingerprint
|
||||
)
|
||||
or (fallback_u8 is not None)
|
||||
):
|
||||
logger.debug("Encoding detection: utf_8 will be used as a fallback match")
|
||||
results.append(fallback_u8)
|
||||
elif fallback_ascii:
|
||||
logger.debug("Encoding detection: ascii will be used as a fallback match")
|
||||
results.append(fallback_ascii)
|
||||
|
||||
if results:
|
||||
logger.debug(
|
||||
"Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
|
||||
results.best().encoding, # type: ignore
|
||||
len(results) - 1,
|
||||
)
|
||||
else:
|
||||
logger.debug("Encoding detection: Unable to determine any suitable charset.")
|
||||
|
||||
if explain:
|
||||
logger.removeHandler(explain_handler)
|
||||
logger.setLevel(previous_logger_level)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def from_fp(
|
||||
fp: BinaryIO,
|
||||
steps: int = 5,
|
||||
chunk_size: int = 512,
|
||||
threshold: float = 0.20,
|
||||
cp_isolation: list[str] | None = None,
|
||||
cp_exclusion: list[str] | None = None,
|
||||
preemptive_behaviour: bool = True,
|
||||
explain: bool = False,
|
||||
language_threshold: float = 0.1,
|
||||
enable_fallback: bool = True,
|
||||
) -> CharsetMatches:
|
||||
"""
|
||||
Same thing than the function from_bytes but using a file pointer that is already ready.
|
||||
Will not close the file pointer.
|
||||
"""
|
||||
return from_bytes(
|
||||
fp.read(),
|
||||
steps,
|
||||
chunk_size,
|
||||
threshold,
|
||||
cp_isolation,
|
||||
cp_exclusion,
|
||||
preemptive_behaviour,
|
||||
explain,
|
||||
language_threshold,
|
||||
enable_fallback,
|
||||
)
|
||||
|
||||
|
||||
def from_path(
|
||||
path: str | bytes | PathLike, # type: ignore[type-arg]
|
||||
steps: int = 5,
|
||||
chunk_size: int = 512,
|
||||
threshold: float = 0.20,
|
||||
cp_isolation: list[str] | None = None,
|
||||
cp_exclusion: list[str] | None = None,
|
||||
preemptive_behaviour: bool = True,
|
||||
explain: bool = False,
|
||||
language_threshold: float = 0.1,
|
||||
enable_fallback: bool = True,
|
||||
) -> CharsetMatches:
|
||||
"""
|
||||
Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
|
||||
Can raise IOError.
|
||||
"""
|
||||
with open(path, "rb") as fp:
|
||||
return from_fp(
|
||||
fp,
|
||||
steps,
|
||||
chunk_size,
|
||||
threshold,
|
||||
cp_isolation,
|
||||
cp_exclusion,
|
||||
preemptive_behaviour,
|
||||
explain,
|
||||
language_threshold,
|
||||
enable_fallback,
|
||||
)
|
||||
|
||||
|
||||
def is_binary(
|
||||
fp_or_path_or_payload: PathLike | str | BinaryIO | bytes, # type: ignore[type-arg]
|
||||
steps: int = 5,
|
||||
chunk_size: int = 512,
|
||||
threshold: float = 0.20,
|
||||
cp_isolation: list[str] | None = None,
|
||||
cp_exclusion: list[str] | None = None,
|
||||
preemptive_behaviour: bool = True,
|
||||
explain: bool = False,
|
||||
language_threshold: float = 0.1,
|
||||
enable_fallback: bool = False,
|
||||
) -> bool:
|
||||
"""
|
||||
Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string.
|
||||
Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match
|
||||
are disabled to be stricter around ASCII-compatible but unlikely to be a string.
|
||||
"""
|
||||
if isinstance(fp_or_path_or_payload, (str, PathLike)):
|
||||
guesses = from_path(
|
||||
fp_or_path_or_payload,
|
||||
steps=steps,
|
||||
chunk_size=chunk_size,
|
||||
threshold=threshold,
|
||||
cp_isolation=cp_isolation,
|
||||
cp_exclusion=cp_exclusion,
|
||||
preemptive_behaviour=preemptive_behaviour,
|
||||
explain=explain,
|
||||
language_threshold=language_threshold,
|
||||
enable_fallback=enable_fallback,
|
||||
)
|
||||
elif isinstance(
|
||||
fp_or_path_or_payload,
|
||||
(
|
||||
bytes,
|
||||
bytearray,
|
||||
),
|
||||
):
|
||||
guesses = from_bytes(
|
||||
fp_or_path_or_payload,
|
||||
steps=steps,
|
||||
chunk_size=chunk_size,
|
||||
threshold=threshold,
|
||||
cp_isolation=cp_isolation,
|
||||
cp_exclusion=cp_exclusion,
|
||||
preemptive_behaviour=preemptive_behaviour,
|
||||
explain=explain,
|
||||
language_threshold=language_threshold,
|
||||
enable_fallback=enable_fallback,
|
||||
)
|
||||
else:
|
||||
guesses = from_fp(
|
||||
fp_or_path_or_payload,
|
||||
steps=steps,
|
||||
chunk_size=chunk_size,
|
||||
threshold=threshold,
|
||||
cp_isolation=cp_isolation,
|
||||
cp_exclusion=cp_exclusion,
|
||||
preemptive_behaviour=preemptive_behaviour,
|
||||
explain=explain,
|
||||
language_threshold=language_threshold,
|
||||
enable_fallback=enable_fallback,
|
||||
)
|
||||
|
||||
return not guesses
|
||||
Binary file not shown.
421
venv/lib/python3.12/site-packages/charset_normalizer/cd.py
Normal file
421
venv/lib/python3.12/site-packages/charset_normalizer/cd.py
Normal file
@@ -0,0 +1,421 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib
|
||||
from codecs import IncrementalDecoder
|
||||
from collections import Counter
|
||||
from functools import lru_cache
|
||||
from typing import Counter as TypeCounter
|
||||
|
||||
from .constant import (
|
||||
FREQUENCIES,
|
||||
KO_NAMES,
|
||||
LANGUAGE_SUPPORTED_COUNT,
|
||||
TOO_SMALL_SEQUENCE,
|
||||
ZH_NAMES,
|
||||
_FREQUENCIES_SET,
|
||||
_FREQUENCIES_RANK,
|
||||
)
|
||||
from .md import is_suspiciously_successive_range
|
||||
from .models import CoherenceMatches
|
||||
from .utils import (
|
||||
is_accentuated,
|
||||
is_latin,
|
||||
is_multi_byte_encoding,
|
||||
is_unicode_range_secondary,
|
||||
unicode_range,
|
||||
)
|
||||
|
||||
|
||||
def encoding_unicode_range(iana_name: str) -> list[str]:
|
||||
"""
|
||||
Return associated unicode ranges in a single byte code page.
|
||||
"""
|
||||
if is_multi_byte_encoding(iana_name):
|
||||
raise OSError("Function not supported on multi-byte code page")
|
||||
|
||||
decoder = importlib.import_module(f"encodings.{iana_name}").IncrementalDecoder
|
||||
|
||||
p: IncrementalDecoder = decoder(errors="ignore")
|
||||
seen_ranges: dict[str, int] = {}
|
||||
character_count: int = 0
|
||||
|
||||
for i in range(0x40, 0xFF):
|
||||
chunk: str = p.decode(bytes([i]))
|
||||
|
||||
if chunk:
|
||||
character_range: str | None = unicode_range(chunk)
|
||||
|
||||
if character_range is None:
|
||||
continue
|
||||
|
||||
if is_unicode_range_secondary(character_range) is False:
|
||||
if character_range not in seen_ranges:
|
||||
seen_ranges[character_range] = 0
|
||||
seen_ranges[character_range] += 1
|
||||
character_count += 1
|
||||
|
||||
return sorted(
|
||||
[
|
||||
character_range
|
||||
for character_range in seen_ranges
|
||||
if seen_ranges[character_range] / character_count >= 0.15
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def unicode_range_languages(primary_range: str) -> list[str]:
|
||||
"""
|
||||
Return inferred languages used with a unicode range.
|
||||
"""
|
||||
languages: list[str] = []
|
||||
|
||||
for language, characters in FREQUENCIES.items():
|
||||
for character in characters:
|
||||
if unicode_range(character) == primary_range:
|
||||
languages.append(language)
|
||||
break
|
||||
|
||||
return languages
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def encoding_languages(iana_name: str) -> list[str]:
|
||||
"""
|
||||
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
|
||||
This function does the correspondence.
|
||||
"""
|
||||
unicode_ranges: list[str] = encoding_unicode_range(iana_name)
|
||||
primary_range: str | None = None
|
||||
|
||||
for specified_range in unicode_ranges:
|
||||
if "Latin" not in specified_range:
|
||||
primary_range = specified_range
|
||||
break
|
||||
|
||||
if primary_range is None:
|
||||
return ["Latin Based"]
|
||||
|
||||
return unicode_range_languages(primary_range)
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def mb_encoding_languages(iana_name: str) -> list[str]:
|
||||
"""
|
||||
Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
|
||||
This function does the correspondence.
|
||||
"""
|
||||
if (
|
||||
iana_name.startswith("shift_")
|
||||
or iana_name.startswith("iso2022_jp")
|
||||
or iana_name.startswith("euc_j")
|
||||
or iana_name == "cp932"
|
||||
):
|
||||
return ["Japanese"]
|
||||
if iana_name.startswith("gb") or iana_name in ZH_NAMES:
|
||||
return ["Chinese"]
|
||||
if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
|
||||
return ["Korean"]
|
||||
|
||||
return []
|
||||
|
||||
|
||||
@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
|
||||
def get_target_features(language: str) -> tuple[bool, bool]:
|
||||
"""
|
||||
Determine main aspects from a supported language if it contains accents and if is pure Latin.
|
||||
"""
|
||||
target_have_accents: bool = False
|
||||
target_pure_latin: bool = True
|
||||
|
||||
for character in FREQUENCIES[language]:
|
||||
if not target_have_accents and is_accentuated(character):
|
||||
target_have_accents = True
|
||||
if target_pure_latin and is_latin(character) is False:
|
||||
target_pure_latin = False
|
||||
|
||||
return target_have_accents, target_pure_latin
|
||||
|
||||
|
||||
def alphabet_languages(
|
||||
characters: list[str], ignore_non_latin: bool = False
|
||||
) -> list[str]:
|
||||
"""
|
||||
Return associated languages associated to given characters.
|
||||
"""
|
||||
languages: list[tuple[str, float]] = []
|
||||
|
||||
characters_set: frozenset[str] = frozenset(characters)
|
||||
source_have_accents = any(is_accentuated(character) for character in characters)
|
||||
|
||||
for language, language_characters in FREQUENCIES.items():
|
||||
target_have_accents, target_pure_latin = get_target_features(language)
|
||||
|
||||
if ignore_non_latin and target_pure_latin is False:
|
||||
continue
|
||||
|
||||
if target_have_accents is False and source_have_accents:
|
||||
continue
|
||||
|
||||
character_count: int = len(language_characters)
|
||||
|
||||
character_match_count: int = len(_FREQUENCIES_SET[language] & characters_set)
|
||||
|
||||
ratio: float = character_match_count / character_count
|
||||
|
||||
if ratio >= 0.2:
|
||||
languages.append((language, ratio))
|
||||
|
||||
languages = sorted(languages, key=lambda x: x[1], reverse=True)
|
||||
|
||||
return [compatible_language[0] for compatible_language in languages]
|
||||
|
||||
|
||||
def characters_popularity_compare(
|
||||
language: str, ordered_characters: list[str]
|
||||
) -> float:
|
||||
"""
|
||||
Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
|
||||
The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
|
||||
Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
|
||||
"""
|
||||
if language not in FREQUENCIES:
|
||||
raise ValueError(f"{language} not available")
|
||||
|
||||
character_approved_count: int = 0
|
||||
frequencies_language_set: frozenset[str] = _FREQUENCIES_SET[language]
|
||||
lang_rank: dict[str, int] = _FREQUENCIES_RANK[language]
|
||||
|
||||
ordered_characters_count: int = len(ordered_characters)
|
||||
target_language_characters_count: int = len(FREQUENCIES[language])
|
||||
|
||||
large_alphabet: bool = target_language_characters_count > 26
|
||||
|
||||
expected_projection_ratio: float = (
|
||||
target_language_characters_count / ordered_characters_count
|
||||
)
|
||||
|
||||
# Pre-built rank dict for ordered_characters (avoids repeated list slicing).
|
||||
ordered_rank: dict[str, int] = {
|
||||
char: rank for rank, char in enumerate(ordered_characters)
|
||||
}
|
||||
|
||||
# Pre-compute characters common to both orderings.
|
||||
# Avoids repeated `c in ordered_rank` dict lookups in the inner counts.
|
||||
common_chars: list[tuple[int, int]] = [
|
||||
(lr, ordered_rank[c]) for c, lr in lang_rank.items() if c in ordered_rank
|
||||
]
|
||||
|
||||
for character, character_rank in zip(
|
||||
ordered_characters, range(0, ordered_characters_count)
|
||||
):
|
||||
if character not in frequencies_language_set:
|
||||
continue
|
||||
|
||||
character_rank_in_language: int = lang_rank[character]
|
||||
character_rank_projection: int = int(character_rank * expected_projection_ratio)
|
||||
|
||||
if (
|
||||
large_alphabet is False
|
||||
and abs(character_rank_projection - character_rank_in_language) > 4
|
||||
):
|
||||
continue
|
||||
|
||||
if (
|
||||
large_alphabet is True
|
||||
and abs(character_rank_projection - character_rank_in_language)
|
||||
< target_language_characters_count / 3
|
||||
):
|
||||
character_approved_count += 1
|
||||
continue
|
||||
|
||||
# Count how many characters appear "before" in both orderings,
|
||||
# and how many appear "at or after" in both orderings.
|
||||
before_match_count: int = sum(
|
||||
1
|
||||
for lr, orr in common_chars
|
||||
if lr < character_rank_in_language and orr < character_rank
|
||||
)
|
||||
|
||||
after_len: int = target_language_characters_count - character_rank_in_language
|
||||
after_match_count: int = sum(
|
||||
1
|
||||
for lr, orr in common_chars
|
||||
if lr >= character_rank_in_language and orr >= character_rank
|
||||
)
|
||||
|
||||
if character_rank_in_language == 0 and before_match_count <= 4:
|
||||
character_approved_count += 1
|
||||
continue
|
||||
|
||||
if after_len == 0 and after_match_count <= 4:
|
||||
character_approved_count += 1
|
||||
continue
|
||||
|
||||
if (
|
||||
character_rank_in_language > 0
|
||||
and before_match_count / character_rank_in_language >= 0.4
|
||||
) or (after_len > 0 and after_match_count / after_len >= 0.4):
|
||||
character_approved_count += 1
|
||||
continue
|
||||
|
||||
return character_approved_count / len(ordered_characters)
|
||||
|
||||
|
||||
def alpha_unicode_split(decoded_sequence: str) -> list[str]:
|
||||
"""
|
||||
Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
|
||||
Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
|
||||
One containing the latin letters and the other hebrew.
|
||||
"""
|
||||
layers: dict[str, list[str]] = {}
|
||||
|
||||
# Fast path: track single-layer key to skip dict iteration for single-script text.
|
||||
single_layer_key: str | None = None
|
||||
multi_layer: bool = False
|
||||
|
||||
for character in decoded_sequence:
|
||||
if character.isalpha() is False:
|
||||
continue
|
||||
|
||||
character_range: str | None = unicode_range(character)
|
||||
|
||||
if character_range is None:
|
||||
continue
|
||||
|
||||
layer_target_range: str | None = None
|
||||
|
||||
if multi_layer:
|
||||
for discovered_range in layers:
|
||||
if (
|
||||
is_suspiciously_successive_range(discovered_range, character_range)
|
||||
is False
|
||||
):
|
||||
layer_target_range = discovered_range
|
||||
break
|
||||
elif single_layer_key is not None:
|
||||
if (
|
||||
is_suspiciously_successive_range(single_layer_key, character_range)
|
||||
is False
|
||||
):
|
||||
layer_target_range = single_layer_key
|
||||
|
||||
if layer_target_range is None:
|
||||
layer_target_range = character_range
|
||||
|
||||
if layer_target_range not in layers:
|
||||
layers[layer_target_range] = []
|
||||
if single_layer_key is None:
|
||||
single_layer_key = layer_target_range
|
||||
else:
|
||||
multi_layer = True
|
||||
|
||||
layers[layer_target_range].append(character)
|
||||
|
||||
return ["".join(chars).lower() for chars in layers.values()]
|
||||
|
||||
|
||||
def merge_coherence_ratios(results: list[CoherenceMatches]) -> CoherenceMatches:
|
||||
"""
|
||||
This function merge results previously given by the function coherence_ratio.
|
||||
The return type is the same as coherence_ratio.
|
||||
"""
|
||||
per_language_ratios: dict[str, list[float]] = {}
|
||||
for result in results:
|
||||
for sub_result in result:
|
||||
language, ratio = sub_result
|
||||
if language not in per_language_ratios:
|
||||
per_language_ratios[language] = [ratio]
|
||||
continue
|
||||
per_language_ratios[language].append(ratio)
|
||||
|
||||
merge = [
|
||||
(
|
||||
language,
|
||||
round(
|
||||
sum(per_language_ratios[language]) / len(per_language_ratios[language]),
|
||||
4,
|
||||
),
|
||||
)
|
||||
for language in per_language_ratios
|
||||
]
|
||||
|
||||
return sorted(merge, key=lambda x: x[1], reverse=True)
|
||||
|
||||
|
||||
def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches:
|
||||
"""
|
||||
We shall NOT return "English—" in CoherenceMatches because it is an alternative
|
||||
of "English". This function only keeps the best match and remove the em-dash in it.
|
||||
"""
|
||||
index_results: dict[str, list[float]] = dict()
|
||||
|
||||
for result in results:
|
||||
language, ratio = result
|
||||
no_em_name: str = language.replace("—", "")
|
||||
|
||||
if no_em_name not in index_results:
|
||||
index_results[no_em_name] = []
|
||||
|
||||
index_results[no_em_name].append(ratio)
|
||||
|
||||
if any(len(index_results[e]) > 1 for e in index_results):
|
||||
filtered_results: CoherenceMatches = []
|
||||
|
||||
for language in index_results:
|
||||
filtered_results.append((language, max(index_results[language])))
|
||||
|
||||
return filtered_results
|
||||
|
||||
return results
|
||||
|
||||
|
||||
@lru_cache(maxsize=2048)
|
||||
def coherence_ratio(
|
||||
decoded_sequence: str, threshold: float = 0.1, lg_inclusion: str | None = None
|
||||
) -> CoherenceMatches:
|
||||
"""
|
||||
Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
|
||||
A layer = Character extraction by alphabets/ranges.
|
||||
"""
|
||||
|
||||
results: list[tuple[str, float]] = []
|
||||
ignore_non_latin: bool = False
|
||||
|
||||
sufficient_match_count: int = 0
|
||||
|
||||
lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
|
||||
if "Latin Based" in lg_inclusion_list:
|
||||
ignore_non_latin = True
|
||||
lg_inclusion_list.remove("Latin Based")
|
||||
|
||||
for layer in alpha_unicode_split(decoded_sequence):
|
||||
sequence_frequencies: TypeCounter[str] = Counter(layer)
|
||||
most_common = sequence_frequencies.most_common()
|
||||
|
||||
character_count: int = len(layer)
|
||||
|
||||
if character_count <= TOO_SMALL_SEQUENCE:
|
||||
continue
|
||||
|
||||
popular_character_ordered: list[str] = [c for c, o in most_common]
|
||||
|
||||
for language in lg_inclusion_list or alphabet_languages(
|
||||
popular_character_ordered, ignore_non_latin
|
||||
):
|
||||
ratio: float = characters_popularity_compare(
|
||||
language, popular_character_ordered
|
||||
)
|
||||
|
||||
if ratio < threshold:
|
||||
continue
|
||||
elif ratio >= 0.8:
|
||||
sufficient_match_count += 1
|
||||
|
||||
results.append((language, round(ratio, 4)))
|
||||
|
||||
if sufficient_match_count >= 3:
|
||||
break
|
||||
|
||||
return sorted(
|
||||
filter_alt_coherence_matches(results), key=lambda x: x[1], reverse=True
|
||||
)
|
||||
@@ -0,0 +1,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from .__main__ import cli_detect, query_yes_no
|
||||
|
||||
__all__ = (
|
||||
"cli_detect",
|
||||
"query_yes_no",
|
||||
)
|
||||
@@ -0,0 +1,363 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import typing
|
||||
from json import dumps
|
||||
from os.path import abspath, basename, dirname, join, realpath
|
||||
from platform import python_version
|
||||
from unicodedata import unidata_version
|
||||
|
||||
import charset_normalizer.md as md_module
|
||||
from charset_normalizer import from_fp
|
||||
from charset_normalizer.models import CliDetectionResult
|
||||
from charset_normalizer.version import __version__
|
||||
|
||||
|
||||
def query_yes_no(question: str, default: str = "yes") -> bool:
|
||||
"""Ask a yes/no question via input() and return the answer as a bool."""
|
||||
prompt = " [Y/n] " if default == "yes" else " [y/N] "
|
||||
|
||||
while True:
|
||||
choice = input(question + prompt).strip().lower()
|
||||
if not choice:
|
||||
return default == "yes"
|
||||
if choice in ("y", "yes"):
|
||||
return True
|
||||
if choice in ("n", "no"):
|
||||
return False
|
||||
print("Please respond with 'y' or 'n'.")
|
||||
|
||||
|
||||
class FileType:
|
||||
"""Factory for creating file object types
|
||||
|
||||
Instances of FileType are typically passed as type= arguments to the
|
||||
ArgumentParser add_argument() method.
|
||||
|
||||
Keyword Arguments:
|
||||
- mode -- A string indicating how the file is to be opened. Accepts the
|
||||
same values as the builtin open() function.
|
||||
- bufsize -- The file's desired buffer size. Accepts the same values as
|
||||
the builtin open() function.
|
||||
- encoding -- The file's encoding. Accepts the same values as the
|
||||
builtin open() function.
|
||||
- errors -- A string indicating how encoding and decoding errors are to
|
||||
be handled. Accepts the same value as the builtin open() function.
|
||||
|
||||
Backported from CPython 3.12
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
mode: str = "r",
|
||||
bufsize: int = -1,
|
||||
encoding: str | None = None,
|
||||
errors: str | None = None,
|
||||
):
|
||||
self._mode = mode
|
||||
self._bufsize = bufsize
|
||||
self._encoding = encoding
|
||||
self._errors = errors
|
||||
|
||||
def __call__(self, string: str) -> typing.IO: # type: ignore[type-arg]
|
||||
# the special argument "-" means sys.std{in,out}
|
||||
if string == "-":
|
||||
if "r" in self._mode:
|
||||
return sys.stdin.buffer if "b" in self._mode else sys.stdin
|
||||
elif any(c in self._mode for c in "wax"):
|
||||
return sys.stdout.buffer if "b" in self._mode else sys.stdout
|
||||
else:
|
||||
msg = f'argument "-" with mode {self._mode}'
|
||||
raise ValueError(msg)
|
||||
|
||||
# all other arguments are used as file names
|
||||
try:
|
||||
return open(string, self._mode, self._bufsize, self._encoding, self._errors)
|
||||
except OSError as e:
|
||||
message = f"can't open '{string}': {e}"
|
||||
raise argparse.ArgumentTypeError(message)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
args = self._mode, self._bufsize
|
||||
kwargs = [("encoding", self._encoding), ("errors", self._errors)]
|
||||
args_str = ", ".join(
|
||||
[repr(arg) for arg in args if arg != -1]
|
||||
+ [f"{kw}={arg!r}" for kw, arg in kwargs if arg is not None]
|
||||
)
|
||||
return f"{type(self).__name__}({args_str})"
|
||||
|
||||
|
||||
def cli_detect(argv: list[str] | None = None) -> int:
|
||||
"""
|
||||
CLI assistant using ARGV and ArgumentParser
|
||||
:param argv:
|
||||
:return: 0 if everything is fine, anything else equal trouble
|
||||
"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="The Real First Universal Charset Detector. "
|
||||
"Discover originating encoding used on text file. "
|
||||
"Normalize text to unicode."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"files", type=FileType("rb"), nargs="+", help="File(s) to be analysed"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="verbose",
|
||||
help="Display complementary information about file if any. "
|
||||
"Stdout will contain logs about the detection process.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-a",
|
||||
"--with-alternative",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="alternatives",
|
||||
help="Output complementary possibilities if any. Top-level JSON WILL be a list.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-n",
|
||||
"--normalize",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="normalize",
|
||||
help="Permit to normalize input file. If not set, program does not write anything.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-m",
|
||||
"--minimal",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="minimal",
|
||||
help="Only output the charset detected to STDOUT. Disabling JSON output.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-r",
|
||||
"--replace",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="replace",
|
||||
help="Replace file when trying to normalize it instead of creating a new one.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-f",
|
||||
"--force",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="force",
|
||||
help="Replace file without asking if you are sure, use this flag with caution.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-i",
|
||||
"--no-preemptive",
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="no_preemptive",
|
||||
help="Disable looking at a charset declaration to hint the detector.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
"--threshold",
|
||||
action="store",
|
||||
default=0.2,
|
||||
type=float,
|
||||
dest="threshold",
|
||||
help="Define a custom maximum amount of noise allowed in decoded content. 0. <= noise <= 1.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--version",
|
||||
action="version",
|
||||
version="Charset-Normalizer {} - Python {} - Unicode {} - SpeedUp {}".format(
|
||||
__version__,
|
||||
python_version(),
|
||||
unidata_version,
|
||||
"OFF" if md_module.__file__.lower().endswith(".py") else "ON",
|
||||
),
|
||||
help="Show version information and exit.",
|
||||
)
|
||||
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
if args.replace is True and args.normalize is False:
|
||||
if args.files:
|
||||
for my_file in args.files:
|
||||
my_file.close()
|
||||
print("Use --replace in addition of --normalize only.", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
if args.force is True and args.replace is False:
|
||||
if args.files:
|
||||
for my_file in args.files:
|
||||
my_file.close()
|
||||
print("Use --force in addition of --replace only.", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
if args.threshold < 0.0 or args.threshold > 1.0:
|
||||
if args.files:
|
||||
for my_file in args.files:
|
||||
my_file.close()
|
||||
print("--threshold VALUE should be between 0. AND 1.", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
x_ = []
|
||||
|
||||
for my_file in args.files:
|
||||
matches = from_fp(
|
||||
my_file,
|
||||
threshold=args.threshold,
|
||||
explain=args.verbose,
|
||||
preemptive_behaviour=args.no_preemptive is False,
|
||||
)
|
||||
|
||||
best_guess = matches.best()
|
||||
|
||||
if best_guess is None:
|
||||
print(
|
||||
'Unable to identify originating encoding for "{}". {}'.format(
|
||||
my_file.name,
|
||||
(
|
||||
"Maybe try increasing maximum amount of chaos."
|
||||
if args.threshold < 1.0
|
||||
else ""
|
||||
),
|
||||
),
|
||||
file=sys.stderr,
|
||||
)
|
||||
x_.append(
|
||||
CliDetectionResult(
|
||||
abspath(my_file.name),
|
||||
None,
|
||||
[],
|
||||
[],
|
||||
"Unknown",
|
||||
[],
|
||||
False,
|
||||
1.0,
|
||||
0.0,
|
||||
None,
|
||||
True,
|
||||
)
|
||||
)
|
||||
else:
|
||||
x_.append(
|
||||
CliDetectionResult(
|
||||
abspath(my_file.name),
|
||||
best_guess.encoding,
|
||||
best_guess.encoding_aliases,
|
||||
[
|
||||
cp
|
||||
for cp in best_guess.could_be_from_charset
|
||||
if cp != best_guess.encoding
|
||||
],
|
||||
best_guess.language,
|
||||
best_guess.alphabets,
|
||||
best_guess.bom,
|
||||
best_guess.percent_chaos,
|
||||
best_guess.percent_coherence,
|
||||
None,
|
||||
True,
|
||||
)
|
||||
)
|
||||
|
||||
if len(matches) > 1 and args.alternatives:
|
||||
for el in matches:
|
||||
if el != best_guess:
|
||||
x_.append(
|
||||
CliDetectionResult(
|
||||
abspath(my_file.name),
|
||||
el.encoding,
|
||||
el.encoding_aliases,
|
||||
[
|
||||
cp
|
||||
for cp in el.could_be_from_charset
|
||||
if cp != el.encoding
|
||||
],
|
||||
el.language,
|
||||
el.alphabets,
|
||||
el.bom,
|
||||
el.percent_chaos,
|
||||
el.percent_coherence,
|
||||
None,
|
||||
False,
|
||||
)
|
||||
)
|
||||
|
||||
if args.normalize is True:
|
||||
if best_guess.encoding.startswith("utf") is True:
|
||||
print(
|
||||
'"{}" file does not need to be normalized, as it already came from unicode.'.format(
|
||||
my_file.name
|
||||
),
|
||||
file=sys.stderr,
|
||||
)
|
||||
if my_file.closed is False:
|
||||
my_file.close()
|
||||
continue
|
||||
|
||||
dir_path = dirname(realpath(my_file.name))
|
||||
file_name = basename(realpath(my_file.name))
|
||||
|
||||
o_: list[str] = file_name.split(".")
|
||||
|
||||
if args.replace is False:
|
||||
o_.insert(-1, best_guess.encoding)
|
||||
if my_file.closed is False:
|
||||
my_file.close()
|
||||
elif (
|
||||
args.force is False
|
||||
and query_yes_no(
|
||||
'Are you sure to normalize "{}" by replacing it ?'.format(
|
||||
my_file.name
|
||||
),
|
||||
"no",
|
||||
)
|
||||
is False
|
||||
):
|
||||
if my_file.closed is False:
|
||||
my_file.close()
|
||||
continue
|
||||
|
||||
try:
|
||||
x_[0].unicode_path = join(dir_path, ".".join(o_))
|
||||
|
||||
with open(x_[0].unicode_path, "wb") as fp:
|
||||
fp.write(best_guess.output())
|
||||
except OSError as e:
|
||||
print(str(e), file=sys.stderr)
|
||||
if my_file.closed is False:
|
||||
my_file.close()
|
||||
return 2
|
||||
|
||||
if my_file.closed is False:
|
||||
my_file.close()
|
||||
|
||||
if args.minimal is False:
|
||||
print(
|
||||
dumps(
|
||||
[el.__dict__ for el in x_] if len(x_) > 1 else x_[0].__dict__,
|
||||
ensure_ascii=True,
|
||||
indent=4,
|
||||
)
|
||||
)
|
||||
else:
|
||||
for my_file in args.files:
|
||||
print(
|
||||
", ".join(
|
||||
[
|
||||
el.encoding or "undefined"
|
||||
for el in x_
|
||||
if el.path == abspath(my_file.name)
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli_detect()
|
||||
Binary file not shown.
Binary file not shown.
2031
venv/lib/python3.12/site-packages/charset_normalizer/constant.py
Normal file
2031
venv/lib/python3.12/site-packages/charset_normalizer/constant.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,80 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from warnings import warn
|
||||
|
||||
from .api import from_bytes
|
||||
from .constant import CHARDET_CORRESPONDENCE, TOO_SMALL_SEQUENCE
|
||||
|
||||
# TODO: remove this check when dropping Python 3.7 support
|
||||
if TYPE_CHECKING:
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
class ResultDict(TypedDict):
|
||||
encoding: str | None
|
||||
language: str
|
||||
confidence: float | None
|
||||
|
||||
|
||||
def detect(
|
||||
byte_str: bytes, should_rename_legacy: bool = False, **kwargs: Any
|
||||
) -> ResultDict:
|
||||
"""
|
||||
chardet legacy method
|
||||
Detect the encoding of the given byte string. It should be mostly backward-compatible.
|
||||
Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it)
|
||||
This function is deprecated and should be used to migrate your project easily, consult the documentation for
|
||||
further information. Not planned for removal.
|
||||
|
||||
:param byte_str: The byte sequence to examine.
|
||||
:param should_rename_legacy: Should we rename legacy encodings
|
||||
to their more modern equivalents?
|
||||
"""
|
||||
if len(kwargs):
|
||||
warn(
|
||||
f"charset-normalizer disregard arguments '{','.join(list(kwargs.keys()))}' in legacy function detect()"
|
||||
)
|
||||
|
||||
if not isinstance(byte_str, (bytearray, bytes)):
|
||||
raise TypeError( # pragma: nocover
|
||||
f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
|
||||
)
|
||||
|
||||
if isinstance(byte_str, bytearray):
|
||||
byte_str = bytes(byte_str)
|
||||
|
||||
r = from_bytes(byte_str).best()
|
||||
|
||||
encoding = r.encoding if r is not None else None
|
||||
language = r.language if r is not None and r.language != "Unknown" else ""
|
||||
confidence = 1.0 - r.chaos if r is not None else None
|
||||
|
||||
# automatically lower confidence
|
||||
# on small bytes samples.
|
||||
# https://github.com/jawah/charset_normalizer/issues/391
|
||||
if (
|
||||
confidence is not None
|
||||
and confidence >= 0.9
|
||||
and encoding
|
||||
not in {
|
||||
"utf_8",
|
||||
"ascii",
|
||||
}
|
||||
and r.bom is False # type: ignore[union-attr]
|
||||
and len(byte_str) < TOO_SMALL_SEQUENCE
|
||||
):
|
||||
confidence -= 0.2
|
||||
|
||||
# Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process
|
||||
# but chardet does return 'utf-8-sig' and it is a valid codec name.
|
||||
if r is not None and encoding == "utf_8" and r.bom:
|
||||
encoding += "_sig"
|
||||
|
||||
if should_rename_legacy is False and encoding in CHARDET_CORRESPONDENCE:
|
||||
encoding = CHARDET_CORRESPONDENCE[encoding]
|
||||
|
||||
return {
|
||||
"encoding": encoding,
|
||||
"language": language,
|
||||
"confidence": confidence,
|
||||
}
|
||||
Binary file not shown.
744
venv/lib/python3.12/site-packages/charset_normalizer/md.py
Normal file
744
venv/lib/python3.12/site-packages/charset_normalizer/md.py
Normal file
@@ -0,0 +1,744 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from logging import getLogger
|
||||
|
||||
if sys.version_info >= (3, 8):
|
||||
from typing import final
|
||||
else:
|
||||
try:
|
||||
from typing_extensions import final
|
||||
except ImportError:
|
||||
|
||||
def final(cls): # type: ignore[misc,no-untyped-def]
|
||||
return cls
|
||||
|
||||
|
||||
from .constant import (
|
||||
COMMON_SAFE_ASCII_CHARACTERS,
|
||||
TRACE,
|
||||
UNICODE_SECONDARY_RANGE_KEYWORD,
|
||||
_ACCENTUATED,
|
||||
_CJK,
|
||||
_HANGUL,
|
||||
_HIRAGANA,
|
||||
_KATAKANA,
|
||||
_LATIN,
|
||||
_THAI,
|
||||
)
|
||||
from .utils import (
|
||||
_character_flags,
|
||||
is_accentuated,
|
||||
is_arabic,
|
||||
is_arabic_isolated_form,
|
||||
is_case_variable,
|
||||
is_cjk,
|
||||
is_emoticon,
|
||||
is_latin,
|
||||
is_punctuation,
|
||||
is_separator,
|
||||
is_symbol,
|
||||
is_unprintable,
|
||||
remove_accent,
|
||||
unicode_range,
|
||||
is_cjk_uncommon,
|
||||
)
|
||||
|
||||
# Combined bitmask for CJK/Hangul/Katakana/Hiragana/Thai glyph detection.
|
||||
_GLYPH_MASK: int = _CJK | _HANGUL | _KATAKANA | _HIRAGANA | _THAI
|
||||
|
||||
|
||||
class MessDetectorPlugin:
|
||||
"""
|
||||
Base abstract class used for mess detection plugins.
|
||||
All detectors MUST extend and implement given methods.
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
"""
|
||||
Determine if given character should be fed in.
|
||||
"""
|
||||
raise NotImplementedError # pragma: nocover
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
"""
|
||||
The main routine to be executed upon character.
|
||||
Insert the logic in witch the text would be considered chaotic.
|
||||
"""
|
||||
raise NotImplementedError # pragma: nocover
|
||||
|
||||
def reset(self) -> None: # pragma: no cover
|
||||
"""
|
||||
Permit to reset the plugin to the initial state.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
"""
|
||||
Compute the chaos ratio based on what your feed() has seen.
|
||||
Must NOT be lower than 0.; No restriction gt 0.
|
||||
"""
|
||||
raise NotImplementedError # pragma: nocover
|
||||
|
||||
|
||||
@final
|
||||
class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin):
|
||||
__slots__ = (
|
||||
"_punctuation_count",
|
||||
"_symbol_count",
|
||||
"_character_count",
|
||||
"_last_printable_char",
|
||||
"_frenzy_symbol_in_word",
|
||||
)
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._punctuation_count: int = 0
|
||||
self._symbol_count: int = 0
|
||||
self._character_count: int = 0
|
||||
|
||||
self._last_printable_char: str | None = None
|
||||
self._frenzy_symbol_in_word: bool = False
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return character.isprintable()
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
self._character_count += 1
|
||||
|
||||
if (
|
||||
character != self._last_printable_char
|
||||
and character not in COMMON_SAFE_ASCII_CHARACTERS
|
||||
):
|
||||
if is_punctuation(character):
|
||||
self._punctuation_count += 1
|
||||
elif (
|
||||
not character.isdigit()
|
||||
and is_symbol(character)
|
||||
and not is_emoticon(character)
|
||||
):
|
||||
self._symbol_count += 2
|
||||
|
||||
self._last_printable_char = character
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._punctuation_count = 0
|
||||
self._character_count = 0
|
||||
self._symbol_count = 0
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._character_count == 0:
|
||||
return 0.0
|
||||
|
||||
ratio_of_punctuation: float = (
|
||||
self._punctuation_count + self._symbol_count
|
||||
) / self._character_count
|
||||
|
||||
return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0
|
||||
|
||||
|
||||
@final
|
||||
class TooManyAccentuatedPlugin(MessDetectorPlugin):
|
||||
__slots__ = ("_character_count", "_accentuated_count")
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._character_count: int = 0
|
||||
self._accentuated_count: int = 0
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return character.isalpha()
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
self._character_count += 1
|
||||
|
||||
if is_accentuated(character):
|
||||
self._accentuated_count += 1
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._character_count = 0
|
||||
self._accentuated_count = 0
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._character_count < 8:
|
||||
return 0.0
|
||||
|
||||
ratio_of_accentuation: float = self._accentuated_count / self._character_count
|
||||
return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0
|
||||
|
||||
|
||||
@final
|
||||
class UnprintablePlugin(MessDetectorPlugin):
|
||||
__slots__ = ("_unprintable_count", "_character_count")
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._unprintable_count: int = 0
|
||||
self._character_count: int = 0
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return True
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
if is_unprintable(character):
|
||||
self._unprintable_count += 1
|
||||
self._character_count += 1
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._unprintable_count = 0
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._character_count == 0:
|
||||
return 0.0
|
||||
|
||||
return (self._unprintable_count * 8) / self._character_count
|
||||
|
||||
|
||||
@final
|
||||
class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin):
|
||||
__slots__ = (
|
||||
"_successive_count",
|
||||
"_character_count",
|
||||
"_last_latin_character",
|
||||
"_last_was_accentuated",
|
||||
)
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._successive_count: int = 0
|
||||
self._character_count: int = 0
|
||||
|
||||
self._last_latin_character: str | None = None
|
||||
self._last_was_accentuated: bool = False
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return character.isalpha() and is_latin(character)
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
self._character_count += 1
|
||||
current_accentuated: bool = is_accentuated(character)
|
||||
if (
|
||||
self._last_latin_character is not None
|
||||
and current_accentuated
|
||||
and self._last_was_accentuated
|
||||
):
|
||||
if character.isupper() and self._last_latin_character.isupper():
|
||||
self._successive_count += 1
|
||||
# Worse if its the same char duplicated with different accent.
|
||||
if remove_accent(character) == remove_accent(self._last_latin_character):
|
||||
self._successive_count += 1
|
||||
self._last_latin_character = character
|
||||
self._last_was_accentuated = current_accentuated
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._successive_count = 0
|
||||
self._character_count = 0
|
||||
self._last_latin_character = None
|
||||
self._last_was_accentuated = False
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._character_count == 0:
|
||||
return 0.0
|
||||
|
||||
return (self._successive_count * 2) / self._character_count
|
||||
|
||||
|
||||
@final
|
||||
class SuspiciousRange(MessDetectorPlugin):
|
||||
__slots__ = (
|
||||
"_suspicious_successive_range_count",
|
||||
"_character_count",
|
||||
"_last_printable_seen",
|
||||
"_last_printable_range",
|
||||
)
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._suspicious_successive_range_count: int = 0
|
||||
self._character_count: int = 0
|
||||
self._last_printable_seen: str | None = None
|
||||
self._last_printable_range: str | None = None
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return character.isprintable()
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
self._character_count += 1
|
||||
|
||||
if (
|
||||
character.isspace()
|
||||
or is_punctuation(character)
|
||||
or character in COMMON_SAFE_ASCII_CHARACTERS
|
||||
):
|
||||
self._last_printable_seen = None
|
||||
self._last_printable_range = None
|
||||
return
|
||||
|
||||
if self._last_printable_seen is None:
|
||||
self._last_printable_seen = character
|
||||
self._last_printable_range = unicode_range(character)
|
||||
return
|
||||
|
||||
unicode_range_a: str | None = self._last_printable_range
|
||||
unicode_range_b: str | None = unicode_range(character)
|
||||
|
||||
if is_suspiciously_successive_range(unicode_range_a, unicode_range_b):
|
||||
self._suspicious_successive_range_count += 1
|
||||
|
||||
self._last_printable_seen = character
|
||||
self._last_printable_range = unicode_range_b
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._character_count = 0
|
||||
self._suspicious_successive_range_count = 0
|
||||
self._last_printable_seen = None
|
||||
self._last_printable_range = None
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._character_count <= 13:
|
||||
return 0.0
|
||||
|
||||
ratio_of_suspicious_range_usage: float = (
|
||||
self._suspicious_successive_range_count * 2
|
||||
) / self._character_count
|
||||
|
||||
return ratio_of_suspicious_range_usage
|
||||
|
||||
|
||||
@final
|
||||
class SuperWeirdWordPlugin(MessDetectorPlugin):
|
||||
__slots__ = (
|
||||
"_word_count",
|
||||
"_bad_word_count",
|
||||
"_foreign_long_count",
|
||||
"_is_current_word_bad",
|
||||
"_foreign_long_watch",
|
||||
"_character_count",
|
||||
"_bad_character_count",
|
||||
"_buffer_length",
|
||||
"_buffer_last_char",
|
||||
"_buffer_last_char_accentuated",
|
||||
"_buffer_accent_count",
|
||||
"_buffer_glyph_count",
|
||||
"_buffer_upper_count",
|
||||
)
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._word_count: int = 0
|
||||
self._bad_word_count: int = 0
|
||||
self._foreign_long_count: int = 0
|
||||
|
||||
self._is_current_word_bad: bool = False
|
||||
self._foreign_long_watch: bool = False
|
||||
|
||||
self._character_count: int = 0
|
||||
self._bad_character_count: int = 0
|
||||
|
||||
self._buffer_length: int = 0
|
||||
self._buffer_last_char: str | None = None
|
||||
self._buffer_last_char_accentuated: bool = False
|
||||
self._buffer_accent_count: int = 0
|
||||
self._buffer_glyph_count: int = 0
|
||||
self._buffer_upper_count: int = 0
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return True
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
if character.isalpha():
|
||||
self._buffer_length += 1
|
||||
self._buffer_last_char = character
|
||||
|
||||
if character.isupper():
|
||||
self._buffer_upper_count += 1
|
||||
|
||||
flags: int = _character_flags(character)
|
||||
char_accentuated: bool = bool(flags & _ACCENTUATED)
|
||||
self._buffer_last_char_accentuated = char_accentuated
|
||||
|
||||
if char_accentuated:
|
||||
self._buffer_accent_count += 1
|
||||
if (
|
||||
not self._foreign_long_watch
|
||||
and (not (flags & _LATIN) or char_accentuated)
|
||||
and not (flags & _GLYPH_MASK)
|
||||
):
|
||||
self._foreign_long_watch = True
|
||||
if flags & _GLYPH_MASK:
|
||||
self._buffer_glyph_count += 1
|
||||
return
|
||||
if not self._buffer_length:
|
||||
return
|
||||
if (
|
||||
character.isspace() or is_punctuation(character) or is_separator(character)
|
||||
) and self._buffer_length:
|
||||
self._word_count += 1
|
||||
buffer_length: int = self._buffer_length
|
||||
|
||||
self._character_count += buffer_length
|
||||
|
||||
if buffer_length >= 4:
|
||||
if self._buffer_accent_count / buffer_length >= 0.5:
|
||||
self._is_current_word_bad = True
|
||||
# Word/Buffer ending with an upper case accentuated letter are so rare,
|
||||
# that we will consider them all as suspicious. Same weight as foreign_long suspicious.
|
||||
elif (
|
||||
self._buffer_last_char_accentuated
|
||||
and self._buffer_last_char.isupper() # type: ignore[union-attr]
|
||||
and self._buffer_upper_count != buffer_length
|
||||
):
|
||||
self._foreign_long_count += 1
|
||||
self._is_current_word_bad = True
|
||||
elif self._buffer_glyph_count == 1:
|
||||
self._is_current_word_bad = True
|
||||
self._foreign_long_count += 1
|
||||
if buffer_length >= 24 and self._foreign_long_watch:
|
||||
probable_camel_cased: bool = (
|
||||
self._buffer_upper_count > 0
|
||||
and self._buffer_upper_count / buffer_length <= 0.3
|
||||
)
|
||||
|
||||
if not probable_camel_cased:
|
||||
self._foreign_long_count += 1
|
||||
self._is_current_word_bad = True
|
||||
|
||||
if self._is_current_word_bad:
|
||||
self._bad_word_count += 1
|
||||
self._bad_character_count += buffer_length
|
||||
self._is_current_word_bad = False
|
||||
|
||||
self._foreign_long_watch = False
|
||||
self._buffer_length = 0
|
||||
self._buffer_last_char = None
|
||||
self._buffer_last_char_accentuated = False
|
||||
self._buffer_accent_count = 0
|
||||
self._buffer_glyph_count = 0
|
||||
self._buffer_upper_count = 0
|
||||
elif (
|
||||
character not in {"<", ">", "-", "=", "~", "|", "_"}
|
||||
and not character.isdigit()
|
||||
and is_symbol(character)
|
||||
):
|
||||
self._is_current_word_bad = True
|
||||
self._buffer_length += 1
|
||||
self._buffer_last_char = character
|
||||
self._buffer_last_char_accentuated = False
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._buffer_length = 0
|
||||
self._buffer_last_char = None
|
||||
self._buffer_last_char_accentuated = False
|
||||
self._is_current_word_bad = False
|
||||
self._foreign_long_watch = False
|
||||
self._bad_word_count = 0
|
||||
self._word_count = 0
|
||||
self._character_count = 0
|
||||
self._bad_character_count = 0
|
||||
self._foreign_long_count = 0
|
||||
self._buffer_accent_count = 0
|
||||
self._buffer_glyph_count = 0
|
||||
self._buffer_upper_count = 0
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._word_count <= 10 and self._foreign_long_count == 0:
|
||||
return 0.0
|
||||
|
||||
return self._bad_character_count / self._character_count
|
||||
|
||||
|
||||
@final
|
||||
class CjkUncommonPlugin(MessDetectorPlugin):
|
||||
"""
|
||||
Detect messy CJK text that probably means nothing.
|
||||
"""
|
||||
|
||||
__slots__ = ("_character_count", "_uncommon_count")
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._character_count: int = 0
|
||||
self._uncommon_count: int = 0
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return is_cjk(character)
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
self._character_count += 1
|
||||
|
||||
if is_cjk_uncommon(character):
|
||||
self._uncommon_count += 1
|
||||
return
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._character_count = 0
|
||||
self._uncommon_count = 0
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._character_count < 8:
|
||||
return 0.0
|
||||
|
||||
uncommon_form_usage: float = self._uncommon_count / self._character_count
|
||||
|
||||
# we can be pretty sure it's garbage when uncommon characters are widely
|
||||
# used. otherwise it could just be traditional chinese for example.
|
||||
return uncommon_form_usage / 10 if uncommon_form_usage > 0.5 else 0.0
|
||||
|
||||
|
||||
@final
|
||||
class ArchaicUpperLowerPlugin(MessDetectorPlugin):
|
||||
__slots__ = (
|
||||
"_buf",
|
||||
"_character_count_since_last_sep",
|
||||
"_successive_upper_lower_count",
|
||||
"_successive_upper_lower_count_final",
|
||||
"_character_count",
|
||||
"_last_alpha_seen",
|
||||
"_current_ascii_only",
|
||||
)
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._buf: bool = False
|
||||
|
||||
self._character_count_since_last_sep: int = 0
|
||||
|
||||
self._successive_upper_lower_count: int = 0
|
||||
self._successive_upper_lower_count_final: int = 0
|
||||
|
||||
self._character_count: int = 0
|
||||
|
||||
self._last_alpha_seen: str | None = None
|
||||
self._current_ascii_only: bool = True
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return True
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
is_concerned: bool = character.isalpha() and is_case_variable(character)
|
||||
chunk_sep: bool = not is_concerned
|
||||
|
||||
if chunk_sep and self._character_count_since_last_sep > 0:
|
||||
if (
|
||||
self._character_count_since_last_sep <= 64
|
||||
and not character.isdigit()
|
||||
and not self._current_ascii_only
|
||||
):
|
||||
self._successive_upper_lower_count_final += (
|
||||
self._successive_upper_lower_count
|
||||
)
|
||||
|
||||
self._successive_upper_lower_count = 0
|
||||
self._character_count_since_last_sep = 0
|
||||
self._last_alpha_seen = None
|
||||
self._buf = False
|
||||
self._character_count += 1
|
||||
self._current_ascii_only = True
|
||||
|
||||
return
|
||||
|
||||
if self._current_ascii_only and not character.isascii():
|
||||
self._current_ascii_only = False
|
||||
|
||||
if self._last_alpha_seen is not None:
|
||||
if (character.isupper() and self._last_alpha_seen.islower()) or (
|
||||
character.islower() and self._last_alpha_seen.isupper()
|
||||
):
|
||||
if self._buf:
|
||||
self._successive_upper_lower_count += 2
|
||||
self._buf = False
|
||||
else:
|
||||
self._buf = True
|
||||
else:
|
||||
self._buf = False
|
||||
|
||||
self._character_count += 1
|
||||
self._character_count_since_last_sep += 1
|
||||
self._last_alpha_seen = character
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._character_count = 0
|
||||
self._character_count_since_last_sep = 0
|
||||
self._successive_upper_lower_count = 0
|
||||
self._successive_upper_lower_count_final = 0
|
||||
self._last_alpha_seen = None
|
||||
self._buf = False
|
||||
self._current_ascii_only = True
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._character_count == 0:
|
||||
return 0.0
|
||||
|
||||
return self._successive_upper_lower_count_final / self._character_count
|
||||
|
||||
|
||||
@final
|
||||
class ArabicIsolatedFormPlugin(MessDetectorPlugin):
|
||||
__slots__ = ("_character_count", "_isolated_form_count")
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._character_count: int = 0
|
||||
self._isolated_form_count: int = 0
|
||||
|
||||
def reset(self) -> None: # Abstract
|
||||
self._character_count = 0
|
||||
self._isolated_form_count = 0
|
||||
|
||||
def eligible(self, character: str) -> bool:
|
||||
return is_arabic(character)
|
||||
|
||||
def feed(self, character: str) -> None:
|
||||
self._character_count += 1
|
||||
|
||||
if is_arabic_isolated_form(character):
|
||||
self._isolated_form_count += 1
|
||||
|
||||
@property
|
||||
def ratio(self) -> float:
|
||||
if self._character_count < 8:
|
||||
return 0.0
|
||||
|
||||
isolated_form_usage: float = self._isolated_form_count / self._character_count
|
||||
|
||||
return isolated_form_usage
|
||||
|
||||
|
||||
@lru_cache(maxsize=1024)
|
||||
def is_suspiciously_successive_range(
|
||||
unicode_range_a: str | None, unicode_range_b: str | None
|
||||
) -> bool:
|
||||
"""
|
||||
Determine if two Unicode range seen next to each other can be considered as suspicious.
|
||||
"""
|
||||
if unicode_range_a is None or unicode_range_b is None:
|
||||
return True
|
||||
|
||||
if unicode_range_a == unicode_range_b:
|
||||
return False
|
||||
|
||||
if "Latin" in unicode_range_a and "Latin" in unicode_range_b:
|
||||
return False
|
||||
|
||||
if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b:
|
||||
return False
|
||||
|
||||
# Latin characters can be accompanied with a combining diacritical mark
|
||||
# eg. Vietnamese.
|
||||
if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and (
|
||||
"Combining" in unicode_range_a or "Combining" in unicode_range_b
|
||||
):
|
||||
return False
|
||||
|
||||
keywords_range_a, keywords_range_b = (
|
||||
unicode_range_a.split(" "),
|
||||
unicode_range_b.split(" "),
|
||||
)
|
||||
|
||||
for el in keywords_range_a:
|
||||
if el in UNICODE_SECONDARY_RANGE_KEYWORD:
|
||||
continue
|
||||
if el in keywords_range_b:
|
||||
return False
|
||||
|
||||
# Japanese Exception
|
||||
range_a_jp_chars, range_b_jp_chars = (
|
||||
unicode_range_a
|
||||
in (
|
||||
"Hiragana",
|
||||
"Katakana",
|
||||
),
|
||||
unicode_range_b in ("Hiragana", "Katakana"),
|
||||
)
|
||||
if (range_a_jp_chars or range_b_jp_chars) and (
|
||||
"CJK" in unicode_range_a or "CJK" in unicode_range_b
|
||||
):
|
||||
return False
|
||||
if range_a_jp_chars and range_b_jp_chars:
|
||||
return False
|
||||
|
||||
if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b:
|
||||
if "CJK" in unicode_range_a or "CJK" in unicode_range_b:
|
||||
return False
|
||||
if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
|
||||
return False
|
||||
|
||||
# Chinese/Japanese use dedicated range for punctuation and/or separators.
|
||||
if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or (
|
||||
unicode_range_a in ["Katakana", "Hiragana"]
|
||||
and unicode_range_b in ["Katakana", "Hiragana"]
|
||||
):
|
||||
if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b:
|
||||
return False
|
||||
if "Forms" in unicode_range_a or "Forms" in unicode_range_b:
|
||||
return False
|
||||
if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# import time messdetector plugins detection(...)
|
||||
_DETECTOR_CLASSES: tuple[type[MessDetectorPlugin], ...] = tuple(
|
||||
md_class for md_class in MessDetectorPlugin.__subclasses__()
|
||||
)
|
||||
|
||||
|
||||
@lru_cache(maxsize=2048)
|
||||
def mess_ratio(
|
||||
decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False
|
||||
) -> float:
|
||||
"""
|
||||
Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier.
|
||||
"""
|
||||
|
||||
detectors: list[MessDetectorPlugin] = [md_class() for md_class in _DETECTOR_CLASSES]
|
||||
|
||||
mean_mess_ratio: float
|
||||
seq_len: int = len(decoded_sequence)
|
||||
|
||||
if seq_len < 511:
|
||||
step: int = 32
|
||||
elif seq_len < 1024:
|
||||
step = 64
|
||||
else:
|
||||
step = 128
|
||||
|
||||
for block_start in range(0, seq_len, step):
|
||||
for character in decoded_sequence[block_start : block_start + step]:
|
||||
for detector in detectors:
|
||||
if detector.eligible(character):
|
||||
detector.feed(character)
|
||||
|
||||
mean_mess_ratio = sum(dt.ratio for dt in detectors)
|
||||
|
||||
if mean_mess_ratio >= maximum_threshold:
|
||||
break
|
||||
else:
|
||||
# Flush last word buffer in SuperWeirdWordPlugin via trailing newline.
|
||||
for detector in detectors:
|
||||
if detector.eligible("\n"):
|
||||
detector.feed("\n")
|
||||
mean_mess_ratio = sum(dt.ratio for dt in detectors)
|
||||
|
||||
if debug:
|
||||
logger = getLogger("charset_normalizer")
|
||||
|
||||
logger.log(
|
||||
TRACE,
|
||||
"Mess-detector extended-analysis start. "
|
||||
f"intermediary_mean_mess_ratio_calc={step} mean_mess_ratio={mean_mess_ratio} "
|
||||
f"maximum_threshold={maximum_threshold}",
|
||||
)
|
||||
|
||||
if seq_len > 16:
|
||||
logger.log(TRACE, f"Starting with: {decoded_sequence[:16]}")
|
||||
logger.log(TRACE, f"Ending with: {decoded_sequence[-16::]}")
|
||||
|
||||
for dt in detectors:
|
||||
logger.log(TRACE, f"{dt.__class__}: {dt.ratio}")
|
||||
|
||||
return round(mean_mess_ratio, 3)
|
||||
359
venv/lib/python3.12/site-packages/charset_normalizer/models.py
Normal file
359
venv/lib/python3.12/site-packages/charset_normalizer/models.py
Normal file
@@ -0,0 +1,359 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from encodings.aliases import aliases
|
||||
from json import dumps
|
||||
from re import sub
|
||||
from typing import Any, Iterator, List, Tuple
|
||||
|
||||
from .constant import RE_POSSIBLE_ENCODING_INDICATION, TOO_BIG_SEQUENCE
|
||||
from .utils import iana_name, is_multi_byte_encoding, unicode_range
|
||||
|
||||
|
||||
class CharsetMatch:
|
||||
def __init__(
|
||||
self,
|
||||
payload: bytes,
|
||||
guessed_encoding: str,
|
||||
mean_mess_ratio: float,
|
||||
has_sig_or_bom: bool,
|
||||
languages: CoherenceMatches,
|
||||
decoded_payload: str | None = None,
|
||||
preemptive_declaration: str | None = None,
|
||||
):
|
||||
self._payload: bytes = payload
|
||||
|
||||
self._encoding: str = guessed_encoding
|
||||
self._mean_mess_ratio: float = mean_mess_ratio
|
||||
self._languages: CoherenceMatches = languages
|
||||
self._has_sig_or_bom: bool = has_sig_or_bom
|
||||
self._unicode_ranges: list[str] | None = None
|
||||
|
||||
self._leaves: list[CharsetMatch] = []
|
||||
self._mean_coherence_ratio: float = 0.0
|
||||
|
||||
self._output_payload: bytes | None = None
|
||||
self._output_encoding: str | None = None
|
||||
|
||||
self._string: str | None = decoded_payload
|
||||
|
||||
self._preemptive_declaration: str | None = preemptive_declaration
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, CharsetMatch):
|
||||
if isinstance(other, str):
|
||||
return iana_name(other) == self.encoding
|
||||
return False
|
||||
return self.encoding == other.encoding and self.fingerprint == other.fingerprint
|
||||
|
||||
def __lt__(self, other: object) -> bool:
|
||||
"""
|
||||
Implemented to make sorted available upon CharsetMatches items.
|
||||
"""
|
||||
if not isinstance(other, CharsetMatch):
|
||||
raise ValueError
|
||||
|
||||
chaos_difference: float = abs(self.chaos - other.chaos)
|
||||
coherence_difference: float = abs(self.coherence - other.coherence)
|
||||
|
||||
# Below 1% difference --> Use Coherence
|
||||
if chaos_difference < 0.01 and coherence_difference > 0.02:
|
||||
return self.coherence > other.coherence
|
||||
elif chaos_difference < 0.01 and coherence_difference <= 0.02:
|
||||
# When having a difficult decision, use the result that decoded as many multi-byte as possible.
|
||||
# preserve RAM usage!
|
||||
if len(self._payload) >= TOO_BIG_SEQUENCE:
|
||||
return self.chaos < other.chaos
|
||||
return self.multi_byte_usage > other.multi_byte_usage
|
||||
|
||||
return self.chaos < other.chaos
|
||||
|
||||
@property
|
||||
def multi_byte_usage(self) -> float:
|
||||
return 1.0 - (len(str(self)) / len(self.raw))
|
||||
|
||||
def __str__(self) -> str:
|
||||
# Lazy Str Loading
|
||||
if self._string is None:
|
||||
self._string = str(self._payload, self._encoding, "strict")
|
||||
return self._string
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<CharsetMatch '{self.encoding}' fp({self.fingerprint})>"
|
||||
|
||||
def add_submatch(self, other: CharsetMatch) -> None:
|
||||
if not isinstance(other, CharsetMatch) or other == self:
|
||||
raise ValueError(
|
||||
"Unable to add instance <{}> as a submatch of a CharsetMatch".format(
|
||||
other.__class__
|
||||
)
|
||||
)
|
||||
|
||||
other._string = None # Unload RAM usage; dirty trick.
|
||||
self._leaves.append(other)
|
||||
|
||||
@property
|
||||
def encoding(self) -> str:
|
||||
return self._encoding
|
||||
|
||||
@property
|
||||
def encoding_aliases(self) -> list[str]:
|
||||
"""
|
||||
Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
|
||||
"""
|
||||
also_known_as: list[str] = []
|
||||
for u, p in aliases.items():
|
||||
if self.encoding == u:
|
||||
also_known_as.append(p)
|
||||
elif self.encoding == p:
|
||||
also_known_as.append(u)
|
||||
return also_known_as
|
||||
|
||||
@property
|
||||
def bom(self) -> bool:
|
||||
return self._has_sig_or_bom
|
||||
|
||||
@property
|
||||
def byte_order_mark(self) -> bool:
|
||||
return self._has_sig_or_bom
|
||||
|
||||
@property
|
||||
def languages(self) -> list[str]:
|
||||
"""
|
||||
Return the complete list of possible languages found in decoded sequence.
|
||||
Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
|
||||
"""
|
||||
return [e[0] for e in self._languages]
|
||||
|
||||
@property
|
||||
def language(self) -> str:
|
||||
"""
|
||||
Most probable language found in decoded sequence. If none were detected or inferred, the property will return
|
||||
"Unknown".
|
||||
"""
|
||||
if not self._languages:
|
||||
# Trying to infer the language based on the given encoding
|
||||
# Its either English or we should not pronounce ourselves in certain cases.
|
||||
if "ascii" in self.could_be_from_charset:
|
||||
return "English"
|
||||
|
||||
# doing it there to avoid circular import
|
||||
from charset_normalizer.cd import encoding_languages, mb_encoding_languages
|
||||
|
||||
languages = (
|
||||
mb_encoding_languages(self.encoding)
|
||||
if is_multi_byte_encoding(self.encoding)
|
||||
else encoding_languages(self.encoding)
|
||||
)
|
||||
|
||||
if len(languages) == 0 or "Latin Based" in languages:
|
||||
return "Unknown"
|
||||
|
||||
return languages[0]
|
||||
|
||||
return self._languages[0][0]
|
||||
|
||||
@property
|
||||
def chaos(self) -> float:
|
||||
return self._mean_mess_ratio
|
||||
|
||||
@property
|
||||
def coherence(self) -> float:
|
||||
if not self._languages:
|
||||
return 0.0
|
||||
return self._languages[0][1]
|
||||
|
||||
@property
|
||||
def percent_chaos(self) -> float:
|
||||
return round(self.chaos * 100, ndigits=3)
|
||||
|
||||
@property
|
||||
def percent_coherence(self) -> float:
|
||||
return round(self.coherence * 100, ndigits=3)
|
||||
|
||||
@property
|
||||
def raw(self) -> bytes:
|
||||
"""
|
||||
Original untouched bytes.
|
||||
"""
|
||||
return self._payload
|
||||
|
||||
@property
|
||||
def submatch(self) -> list[CharsetMatch]:
|
||||
return self._leaves
|
||||
|
||||
@property
|
||||
def has_submatch(self) -> bool:
|
||||
return len(self._leaves) > 0
|
||||
|
||||
@property
|
||||
def alphabets(self) -> list[str]:
|
||||
if self._unicode_ranges is not None:
|
||||
return self._unicode_ranges
|
||||
# list detected ranges
|
||||
detected_ranges: list[str | None] = [unicode_range(char) for char in str(self)]
|
||||
# filter and sort
|
||||
self._unicode_ranges = sorted(list({r for r in detected_ranges if r}))
|
||||
return self._unicode_ranges
|
||||
|
||||
@property
|
||||
def could_be_from_charset(self) -> list[str]:
|
||||
"""
|
||||
The complete list of encoding that output the exact SAME str result and therefore could be the originating
|
||||
encoding.
|
||||
This list does include the encoding available in property 'encoding'.
|
||||
"""
|
||||
return [self._encoding] + [m.encoding for m in self._leaves]
|
||||
|
||||
def output(self, encoding: str = "utf_8") -> bytes:
|
||||
"""
|
||||
Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
|
||||
Any errors will be simply ignored by the encoder NOT replaced.
|
||||
"""
|
||||
if self._output_encoding is None or self._output_encoding != encoding:
|
||||
self._output_encoding = encoding
|
||||
decoded_string = str(self)
|
||||
if (
|
||||
self._preemptive_declaration is not None
|
||||
and self._preemptive_declaration.lower()
|
||||
not in ["utf-8", "utf8", "utf_8"]
|
||||
):
|
||||
patched_header = sub(
|
||||
RE_POSSIBLE_ENCODING_INDICATION,
|
||||
lambda m: m.string[m.span()[0] : m.span()[1]].replace(
|
||||
m.groups()[0],
|
||||
iana_name(self._output_encoding).replace("_", "-"), # type: ignore[arg-type]
|
||||
),
|
||||
decoded_string[:8192],
|
||||
count=1,
|
||||
)
|
||||
|
||||
decoded_string = patched_header + decoded_string[8192:]
|
||||
|
||||
self._output_payload = decoded_string.encode(encoding, "replace")
|
||||
|
||||
return self._output_payload # type: ignore
|
||||
|
||||
@property
|
||||
def fingerprint(self) -> int:
|
||||
"""
|
||||
Retrieve a hash fingerprint of the decoded payload, used for deduplication.
|
||||
"""
|
||||
return hash(str(self))
|
||||
|
||||
|
||||
class CharsetMatches:
|
||||
"""
|
||||
Container with every CharsetMatch items ordered by default from most probable to the less one.
|
||||
Act like a list(iterable) but does not implements all related methods.
|
||||
"""
|
||||
|
||||
def __init__(self, results: list[CharsetMatch] | None = None):
|
||||
self._results: list[CharsetMatch] = sorted(results) if results else []
|
||||
|
||||
def __iter__(self) -> Iterator[CharsetMatch]:
|
||||
yield from self._results
|
||||
|
||||
def __getitem__(self, item: int | str) -> CharsetMatch:
|
||||
"""
|
||||
Retrieve a single item either by its position or encoding name (alias may be used here).
|
||||
Raise KeyError upon invalid index or encoding not present in results.
|
||||
"""
|
||||
if isinstance(item, int):
|
||||
return self._results[item]
|
||||
if isinstance(item, str):
|
||||
item = iana_name(item, False)
|
||||
for result in self._results:
|
||||
if item in result.could_be_from_charset:
|
||||
return result
|
||||
raise KeyError
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._results)
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return len(self._results) > 0
|
||||
|
||||
def append(self, item: CharsetMatch) -> None:
|
||||
"""
|
||||
Insert a single match. Will be inserted accordingly to preserve sort.
|
||||
Can be inserted as a submatch.
|
||||
"""
|
||||
if not isinstance(item, CharsetMatch):
|
||||
raise ValueError(
|
||||
"Cannot append instance '{}' to CharsetMatches".format(
|
||||
str(item.__class__)
|
||||
)
|
||||
)
|
||||
# We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
|
||||
if len(item.raw) < TOO_BIG_SEQUENCE:
|
||||
for match in self._results:
|
||||
if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
|
||||
match.add_submatch(item)
|
||||
return
|
||||
self._results.append(item)
|
||||
self._results = sorted(self._results)
|
||||
|
||||
def best(self) -> CharsetMatch | None:
|
||||
"""
|
||||
Simply return the first match. Strict equivalent to matches[0].
|
||||
"""
|
||||
if not self._results:
|
||||
return None
|
||||
return self._results[0]
|
||||
|
||||
def first(self) -> CharsetMatch | None:
|
||||
"""
|
||||
Redundant method, call the method best(). Kept for BC reasons.
|
||||
"""
|
||||
return self.best()
|
||||
|
||||
|
||||
CoherenceMatch = Tuple[str, float]
|
||||
CoherenceMatches = List[CoherenceMatch]
|
||||
|
||||
|
||||
class CliDetectionResult:
|
||||
def __init__(
|
||||
self,
|
||||
path: str,
|
||||
encoding: str | None,
|
||||
encoding_aliases: list[str],
|
||||
alternative_encodings: list[str],
|
||||
language: str,
|
||||
alphabets: list[str],
|
||||
has_sig_or_bom: bool,
|
||||
chaos: float,
|
||||
coherence: float,
|
||||
unicode_path: str | None,
|
||||
is_preferred: bool,
|
||||
):
|
||||
self.path: str = path
|
||||
self.unicode_path: str | None = unicode_path
|
||||
self.encoding: str | None = encoding
|
||||
self.encoding_aliases: list[str] = encoding_aliases
|
||||
self.alternative_encodings: list[str] = alternative_encodings
|
||||
self.language: str = language
|
||||
self.alphabets: list[str] = alphabets
|
||||
self.has_sig_or_bom: bool = has_sig_or_bom
|
||||
self.chaos: float = chaos
|
||||
self.coherence: float = coherence
|
||||
self.is_preferred: bool = is_preferred
|
||||
|
||||
@property
|
||||
def __dict__(self) -> dict[str, Any]: # type: ignore
|
||||
return {
|
||||
"path": self.path,
|
||||
"encoding": self.encoding,
|
||||
"encoding_aliases": self.encoding_aliases,
|
||||
"alternative_encodings": self.alternative_encodings,
|
||||
"language": self.language,
|
||||
"alphabets": self.alphabets,
|
||||
"has_sig_or_bom": self.has_sig_or_bom,
|
||||
"chaos": self.chaos,
|
||||
"coherence": self.coherence,
|
||||
"unicode_path": self.unicode_path,
|
||||
"is_preferred": self.is_preferred,
|
||||
}
|
||||
|
||||
def to_json(self) -> str:
|
||||
return dumps(self.__dict__, ensure_ascii=True, indent=4)
|
||||
420
venv/lib/python3.12/site-packages/charset_normalizer/utils.py
Normal file
420
venv/lib/python3.12/site-packages/charset_normalizer/utils.py
Normal file
@@ -0,0 +1,420 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib
|
||||
import logging
|
||||
import unicodedata
|
||||
from bisect import bisect_right
|
||||
from codecs import IncrementalDecoder
|
||||
from encodings.aliases import aliases
|
||||
from functools import lru_cache
|
||||
from re import findall
|
||||
from typing import Generator
|
||||
|
||||
from _multibytecodec import ( # type: ignore[import-not-found,import]
|
||||
MultibyteIncrementalDecoder,
|
||||
)
|
||||
|
||||
from .constant import (
|
||||
ENCODING_MARKS,
|
||||
IANA_SUPPORTED_SIMILAR,
|
||||
RE_POSSIBLE_ENCODING_INDICATION,
|
||||
UNICODE_RANGES_COMBINED,
|
||||
UNICODE_SECONDARY_RANGE_KEYWORD,
|
||||
UTF8_MAXIMAL_ALLOCATION,
|
||||
COMMON_CJK_CHARACTERS,
|
||||
_LATIN,
|
||||
_CJK,
|
||||
_HANGUL,
|
||||
_KATAKANA,
|
||||
_HIRAGANA,
|
||||
_THAI,
|
||||
_ARABIC,
|
||||
_ARABIC_ISOLATED_FORM,
|
||||
_ACCENT_KEYWORDS,
|
||||
_ACCENTUATED,
|
||||
)
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def _character_flags(character: str) -> int:
|
||||
"""Compute all name-based classification flags with a single unicodedata.name() call."""
|
||||
try:
|
||||
desc: str = unicodedata.name(character)
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
flags: int = 0
|
||||
|
||||
if "LATIN" in desc:
|
||||
flags |= _LATIN
|
||||
if "CJK" in desc:
|
||||
flags |= _CJK
|
||||
if "HANGUL" in desc:
|
||||
flags |= _HANGUL
|
||||
if "KATAKANA" in desc:
|
||||
flags |= _KATAKANA
|
||||
if "HIRAGANA" in desc:
|
||||
flags |= _HIRAGANA
|
||||
if "THAI" in desc:
|
||||
flags |= _THAI
|
||||
if "ARABIC" in desc:
|
||||
flags |= _ARABIC
|
||||
if "ISOLATED FORM" in desc:
|
||||
flags |= _ARABIC_ISOLATED_FORM
|
||||
|
||||
for kw in _ACCENT_KEYWORDS:
|
||||
if kw in desc:
|
||||
flags |= _ACCENTUATED
|
||||
break
|
||||
|
||||
return flags
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_accentuated(character: str) -> bool:
|
||||
return bool(_character_flags(character) & _ACCENTUATED)
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def remove_accent(character: str) -> str:
|
||||
decomposed: str = unicodedata.decomposition(character)
|
||||
if not decomposed:
|
||||
return character
|
||||
|
||||
codes: list[str] = decomposed.split(" ")
|
||||
|
||||
return chr(int(codes[0], 16))
|
||||
|
||||
|
||||
# Pre-built sorted lookup table for O(log n) binary search in unicode_range().
|
||||
# Each entry is (range_start, range_end_exclusive, range_name).
|
||||
_UNICODE_RANGES_SORTED: list[tuple[int, int, str]] = sorted(
|
||||
(ord_range.start, ord_range.stop, name)
|
||||
for name, ord_range in UNICODE_RANGES_COMBINED.items()
|
||||
)
|
||||
_UNICODE_RANGE_STARTS: list[int] = [e[0] for e in _UNICODE_RANGES_SORTED]
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def unicode_range(character: str) -> str | None:
|
||||
"""
|
||||
Retrieve the Unicode range official name from a single character.
|
||||
"""
|
||||
character_ord: int = ord(character)
|
||||
|
||||
# Binary search: find the rightmost range whose start <= character_ord
|
||||
idx = bisect_right(_UNICODE_RANGE_STARTS, character_ord) - 1
|
||||
if idx >= 0:
|
||||
start, stop, name = _UNICODE_RANGES_SORTED[idx]
|
||||
if character_ord < stop:
|
||||
return name
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_latin(character: str) -> bool:
|
||||
return bool(_character_flags(character) & _LATIN)
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_punctuation(character: str) -> bool:
|
||||
character_category: str = unicodedata.category(character)
|
||||
|
||||
if "P" in character_category:
|
||||
return True
|
||||
|
||||
character_range: str | None = unicode_range(character)
|
||||
|
||||
if character_range is None:
|
||||
return False
|
||||
|
||||
return "Punctuation" in character_range
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_symbol(character: str) -> bool:
|
||||
character_category: str = unicodedata.category(character)
|
||||
|
||||
if "S" in character_category or "N" in character_category:
|
||||
return True
|
||||
|
||||
character_range: str | None = unicode_range(character)
|
||||
|
||||
if character_range is None:
|
||||
return False
|
||||
|
||||
return "Forms" in character_range and character_category != "Lo"
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_emoticon(character: str) -> bool:
|
||||
character_range: str | None = unicode_range(character)
|
||||
|
||||
if character_range is None:
|
||||
return False
|
||||
|
||||
return "Emoticons" in character_range or "Pictographs" in character_range
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_separator(character: str) -> bool:
|
||||
if character.isspace() or character in {"|", "+", "<", ">"}:
|
||||
return True
|
||||
|
||||
character_category: str = unicodedata.category(character)
|
||||
|
||||
return "Z" in character_category or character_category in {"Po", "Pd", "Pc"}
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_case_variable(character: str) -> bool:
|
||||
return character.islower() != character.isupper()
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_cjk(character: str) -> bool:
|
||||
return bool(_character_flags(character) & _CJK)
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_hiragana(character: str) -> bool:
|
||||
return bool(_character_flags(character) & _HIRAGANA)
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_katakana(character: str) -> bool:
|
||||
return bool(_character_flags(character) & _KATAKANA)
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_hangul(character: str) -> bool:
|
||||
return bool(_character_flags(character) & _HANGUL)
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_thai(character: str) -> bool:
|
||||
return bool(_character_flags(character) & _THAI)
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_arabic(character: str) -> bool:
|
||||
return bool(_character_flags(character) & _ARABIC)
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_arabic_isolated_form(character: str) -> bool:
|
||||
return bool(_character_flags(character) & _ARABIC_ISOLATED_FORM)
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_cjk_uncommon(character: str) -> bool:
|
||||
return character not in COMMON_CJK_CHARACTERS
|
||||
|
||||
|
||||
@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))
|
||||
def is_unicode_range_secondary(range_name: str) -> bool:
|
||||
return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)
|
||||
|
||||
|
||||
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
|
||||
def is_unprintable(character: str) -> bool:
|
||||
return (
|
||||
character.isspace() is False # includes \n \t \r \v
|
||||
and character.isprintable() is False
|
||||
and character != "\x1a" # Why? Its the ASCII substitute character.
|
||||
and character != "\ufeff" # bug discovered in Python,
|
||||
# Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space.
|
||||
)
|
||||
|
||||
|
||||
def any_specified_encoding(sequence: bytes, search_zone: int = 8192) -> str | None:
|
||||
"""
|
||||
Extract using ASCII-only decoder any specified encoding in the first n-bytes.
|
||||
"""
|
||||
if not isinstance(sequence, (bytes, bytearray)):
|
||||
raise TypeError
|
||||
|
||||
seq_len: int = len(sequence)
|
||||
|
||||
results: list[str] = findall(
|
||||
RE_POSSIBLE_ENCODING_INDICATION,
|
||||
sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"),
|
||||
)
|
||||
|
||||
if len(results) == 0:
|
||||
return None
|
||||
|
||||
for specified_encoding in results:
|
||||
specified_encoding = specified_encoding.lower().replace("-", "_")
|
||||
|
||||
encoding_alias: str
|
||||
encoding_iana: str
|
||||
|
||||
for encoding_alias, encoding_iana in aliases.items():
|
||||
if encoding_alias == specified_encoding:
|
||||
return encoding_iana
|
||||
if encoding_iana == specified_encoding:
|
||||
return encoding_iana
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@lru_cache(maxsize=128)
|
||||
def is_multi_byte_encoding(name: str) -> bool:
|
||||
"""
|
||||
Verify is a specific encoding is a multi byte one based on it IANA name
|
||||
"""
|
||||
return name in {
|
||||
"utf_8",
|
||||
"utf_8_sig",
|
||||
"utf_16",
|
||||
"utf_16_be",
|
||||
"utf_16_le",
|
||||
"utf_32",
|
||||
"utf_32_le",
|
||||
"utf_32_be",
|
||||
"utf_7",
|
||||
} or issubclass(
|
||||
importlib.import_module(f"encodings.{name}").IncrementalDecoder,
|
||||
MultibyteIncrementalDecoder,
|
||||
)
|
||||
|
||||
|
||||
def identify_sig_or_bom(sequence: bytes) -> tuple[str | None, bytes]:
|
||||
"""
|
||||
Identify and extract SIG/BOM in given sequence.
|
||||
"""
|
||||
|
||||
for iana_encoding in ENCODING_MARKS:
|
||||
marks: bytes | list[bytes] = ENCODING_MARKS[iana_encoding]
|
||||
|
||||
if isinstance(marks, bytes):
|
||||
marks = [marks]
|
||||
|
||||
for mark in marks:
|
||||
if sequence.startswith(mark):
|
||||
return iana_encoding, mark
|
||||
|
||||
return None, b""
|
||||
|
||||
|
||||
def should_strip_sig_or_bom(iana_encoding: str) -> bool:
|
||||
return iana_encoding not in {"utf_16", "utf_32"}
|
||||
|
||||
|
||||
def iana_name(cp_name: str, strict: bool = True) -> str:
|
||||
"""Returns the Python normalized encoding name (Not the IANA official name)."""
|
||||
cp_name = cp_name.lower().replace("-", "_")
|
||||
|
||||
encoding_alias: str
|
||||
encoding_iana: str
|
||||
|
||||
for encoding_alias, encoding_iana in aliases.items():
|
||||
if cp_name in [encoding_alias, encoding_iana]:
|
||||
return encoding_iana
|
||||
|
||||
if strict:
|
||||
raise ValueError(f"Unable to retrieve IANA for '{cp_name}'")
|
||||
|
||||
return cp_name
|
||||
|
||||
|
||||
def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:
|
||||
if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
|
||||
return 0.0
|
||||
|
||||
decoder_a = importlib.import_module(f"encodings.{iana_name_a}").IncrementalDecoder
|
||||
decoder_b = importlib.import_module(f"encodings.{iana_name_b}").IncrementalDecoder
|
||||
|
||||
id_a: IncrementalDecoder = decoder_a(errors="ignore")
|
||||
id_b: IncrementalDecoder = decoder_b(errors="ignore")
|
||||
|
||||
character_match_count: int = 0
|
||||
|
||||
for i in range(256):
|
||||
to_be_decoded: bytes = bytes([i])
|
||||
if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded):
|
||||
character_match_count += 1
|
||||
|
||||
return character_match_count / 256
|
||||
|
||||
|
||||
def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool:
|
||||
"""
|
||||
Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using
|
||||
the function cp_similarity.
|
||||
"""
|
||||
return (
|
||||
iana_name_a in IANA_SUPPORTED_SIMILAR
|
||||
and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a]
|
||||
)
|
||||
|
||||
|
||||
def set_logging_handler(
|
||||
name: str = "charset_normalizer",
|
||||
level: int = logging.INFO,
|
||||
format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
|
||||
) -> None:
|
||||
logger = logging.getLogger(name)
|
||||
logger.setLevel(level)
|
||||
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(logging.Formatter(format_string))
|
||||
logger.addHandler(handler)
|
||||
|
||||
|
||||
def cut_sequence_chunks(
|
||||
sequences: bytes,
|
||||
encoding_iana: str,
|
||||
offsets: range,
|
||||
chunk_size: int,
|
||||
bom_or_sig_available: bool,
|
||||
strip_sig_or_bom: bool,
|
||||
sig_payload: bytes,
|
||||
is_multi_byte_decoder: bool,
|
||||
decoded_payload: str | None = None,
|
||||
) -> Generator[str, None, None]:
|
||||
if decoded_payload and is_multi_byte_decoder is False:
|
||||
for i in offsets:
|
||||
chunk = decoded_payload[i : i + chunk_size]
|
||||
if not chunk:
|
||||
break
|
||||
yield chunk
|
||||
else:
|
||||
for i in offsets:
|
||||
chunk_end = i + chunk_size
|
||||
if chunk_end > len(sequences) + 8:
|
||||
continue
|
||||
|
||||
cut_sequence = sequences[i : i + chunk_size]
|
||||
|
||||
if bom_or_sig_available and strip_sig_or_bom is False:
|
||||
cut_sequence = sig_payload + cut_sequence
|
||||
|
||||
chunk = cut_sequence.decode(
|
||||
encoding_iana,
|
||||
errors="ignore" if is_multi_byte_decoder else "strict",
|
||||
)
|
||||
|
||||
# multi-byte bad cutting detector and adjustment
|
||||
# not the cleanest way to perform that fix but clever enough for now.
|
||||
if is_multi_byte_decoder and i > 0:
|
||||
chunk_partial_size_chk: int = min(chunk_size, 16)
|
||||
|
||||
if (
|
||||
decoded_payload
|
||||
and chunk[:chunk_partial_size_chk] not in decoded_payload
|
||||
):
|
||||
for j in range(i, i - 4, -1):
|
||||
cut_sequence = sequences[j:chunk_end]
|
||||
|
||||
if bom_or_sig_available and strip_sig_or_bom is False:
|
||||
cut_sequence = sig_payload + cut_sequence
|
||||
|
||||
chunk = cut_sequence.decode(encoding_iana, errors="ignore")
|
||||
|
||||
if chunk[:chunk_partial_size_chk] in decoded_payload:
|
||||
break
|
||||
|
||||
yield chunk
|
||||
@@ -0,0 +1,8 @@
|
||||
"""
|
||||
Expose version
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
__version__ = "3.4.5"
|
||||
VERSION = __version__.split(".")
|
||||
51
venv/lib/python3.12/site-packages/dotenv/__init__.py
Normal file
51
venv/lib/python3.12/site-packages/dotenv/__init__.py
Normal file
@@ -0,0 +1,51 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from .main import dotenv_values, find_dotenv, get_key, load_dotenv, set_key, unset_key
|
||||
|
||||
|
||||
def load_ipython_extension(ipython: Any) -> None:
|
||||
from .ipython import load_ipython_extension
|
||||
|
||||
load_ipython_extension(ipython)
|
||||
|
||||
|
||||
def get_cli_string(
|
||||
path: Optional[str] = None,
|
||||
action: Optional[str] = None,
|
||||
key: Optional[str] = None,
|
||||
value: Optional[str] = None,
|
||||
quote: Optional[str] = None,
|
||||
):
|
||||
"""Returns a string suitable for running as a shell script.
|
||||
|
||||
Useful for converting a arguments passed to a fabric task
|
||||
to be passed to a `local` or `run` command.
|
||||
"""
|
||||
command = ["dotenv"]
|
||||
if quote:
|
||||
command.append(f"-q {quote}")
|
||||
if path:
|
||||
command.append(f"-f {path}")
|
||||
if action:
|
||||
command.append(action)
|
||||
if key:
|
||||
command.append(key)
|
||||
if value:
|
||||
if " " in value:
|
||||
command.append(f'"{value}"')
|
||||
else:
|
||||
command.append(value)
|
||||
|
||||
return " ".join(command).strip()
|
||||
|
||||
|
||||
__all__ = [
|
||||
"get_cli_string",
|
||||
"load_dotenv",
|
||||
"dotenv_values",
|
||||
"get_key",
|
||||
"set_key",
|
||||
"unset_key",
|
||||
"find_dotenv",
|
||||
"load_ipython_extension",
|
||||
]
|
||||
6
venv/lib/python3.12/site-packages/dotenv/__main__.py
Normal file
6
venv/lib/python3.12/site-packages/dotenv/__main__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
"""Entry point for cli, enables execution with `python -m dotenv`"""
|
||||
|
||||
from .cli import cli
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
236
venv/lib/python3.12/site-packages/dotenv/cli.py
Normal file
236
venv/lib/python3.12/site-packages/dotenv/cli.py
Normal file
@@ -0,0 +1,236 @@
|
||||
import json
|
||||
import os
|
||||
import shlex
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
from typing import IO, Any, Dict, Iterator, List, Optional
|
||||
|
||||
if sys.platform == "win32":
|
||||
from subprocess import Popen
|
||||
|
||||
try:
|
||||
import click
|
||||
except ImportError:
|
||||
sys.stderr.write(
|
||||
"It seems python-dotenv is not installed with cli option. \n"
|
||||
'Run pip install "python-dotenv[cli]" to fix this.'
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
from .main import dotenv_values, set_key, unset_key
|
||||
from .version import __version__
|
||||
|
||||
|
||||
def enumerate_env() -> Optional[str]:
|
||||
"""
|
||||
Return a path for the ${pwd}/.env file.
|
||||
|
||||
If pwd does not exist, return None.
|
||||
"""
|
||||
try:
|
||||
cwd = os.getcwd()
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
path = os.path.join(cwd, ".env")
|
||||
return path
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.option(
|
||||
"-f",
|
||||
"--file",
|
||||
default=enumerate_env(),
|
||||
type=click.Path(file_okay=True),
|
||||
help="Location of the .env file, defaults to .env file in current working directory.",
|
||||
)
|
||||
@click.option(
|
||||
"-q",
|
||||
"--quote",
|
||||
default="always",
|
||||
type=click.Choice(["always", "never", "auto"]),
|
||||
help="Whether to quote or not the variable values. Default mode is always. This does not affect parsing.",
|
||||
)
|
||||
@click.option(
|
||||
"-e",
|
||||
"--export",
|
||||
default=False,
|
||||
type=click.BOOL,
|
||||
help="Whether to write the dot file as an executable bash script.",
|
||||
)
|
||||
@click.version_option(version=__version__)
|
||||
@click.pass_context
|
||||
def cli(ctx: click.Context, file: Any, quote: Any, export: Any) -> None:
|
||||
"""This script is used to set, get or unset values from a .env file."""
|
||||
ctx.obj = {"QUOTE": quote, "EXPORT": export, "FILE": file}
|
||||
|
||||
|
||||
@contextmanager
|
||||
def stream_file(path: os.PathLike) -> Iterator[IO[str]]:
|
||||
"""
|
||||
Open a file and yield the corresponding (decoded) stream.
|
||||
|
||||
Exits with error code 2 if the file cannot be opened.
|
||||
"""
|
||||
|
||||
try:
|
||||
with open(path) as stream:
|
||||
yield stream
|
||||
except OSError as exc:
|
||||
print(f"Error opening env file: {exc}", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
@cli.command(name="list")
|
||||
@click.pass_context
|
||||
@click.option(
|
||||
"--format",
|
||||
"output_format",
|
||||
default="simple",
|
||||
type=click.Choice(["simple", "json", "shell", "export"]),
|
||||
help="The format in which to display the list. Default format is simple, "
|
||||
"which displays name=value without quotes.",
|
||||
)
|
||||
def list_values(ctx: click.Context, output_format: str) -> None:
|
||||
"""Display all the stored key/value."""
|
||||
file = ctx.obj["FILE"]
|
||||
|
||||
with stream_file(file) as stream:
|
||||
values = dotenv_values(stream=stream)
|
||||
|
||||
if output_format == "json":
|
||||
click.echo(json.dumps(values, indent=2, sort_keys=True))
|
||||
else:
|
||||
prefix = "export " if output_format == "export" else ""
|
||||
for k in sorted(values):
|
||||
v = values[k]
|
||||
if v is not None:
|
||||
if output_format in ("export", "shell"):
|
||||
v = shlex.quote(v)
|
||||
click.echo(f"{prefix}{k}={v}")
|
||||
|
||||
|
||||
@cli.command(name="set")
|
||||
@click.pass_context
|
||||
@click.argument("key", required=True)
|
||||
@click.argument("value", required=True)
|
||||
def set_value(ctx: click.Context, key: Any, value: Any) -> None:
|
||||
"""
|
||||
Store the given key/value.
|
||||
|
||||
This doesn't follow symlinks, to avoid accidentally modifying a file at a
|
||||
potentially untrusted path.
|
||||
"""
|
||||
|
||||
file = ctx.obj["FILE"]
|
||||
quote = ctx.obj["QUOTE"]
|
||||
export = ctx.obj["EXPORT"]
|
||||
success, key, value = set_key(file, key, value, quote, export)
|
||||
if success:
|
||||
click.echo(f"{key}={value}")
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.pass_context
|
||||
@click.argument("key", required=True)
|
||||
def get(ctx: click.Context, key: Any) -> None:
|
||||
"""Retrieve the value for the given key."""
|
||||
file = ctx.obj["FILE"]
|
||||
|
||||
with stream_file(file) as stream:
|
||||
values = dotenv_values(stream=stream)
|
||||
|
||||
stored_value = values.get(key)
|
||||
if stored_value:
|
||||
click.echo(stored_value)
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.pass_context
|
||||
@click.argument("key", required=True)
|
||||
def unset(ctx: click.Context, key: Any) -> None:
|
||||
"""
|
||||
Removes the given key.
|
||||
|
||||
This doesn't follow symlinks, to avoid accidentally modifying a file at a
|
||||
potentially untrusted path.
|
||||
"""
|
||||
file = ctx.obj["FILE"]
|
||||
quote = ctx.obj["QUOTE"]
|
||||
success, key = unset_key(file, key, quote)
|
||||
if success:
|
||||
click.echo(f"Successfully removed {key}")
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@cli.command(
|
||||
context_settings={
|
||||
"allow_extra_args": True,
|
||||
"allow_interspersed_args": False,
|
||||
"ignore_unknown_options": True,
|
||||
}
|
||||
)
|
||||
@click.pass_context
|
||||
@click.option(
|
||||
"--override/--no-override",
|
||||
default=True,
|
||||
help="Override variables from the environment file with those from the .env file.",
|
||||
)
|
||||
@click.argument("commandline", nargs=-1, type=click.UNPROCESSED)
|
||||
def run(ctx: click.Context, override: bool, commandline: tuple[str, ...]) -> None:
|
||||
"""Run command with environment variables present."""
|
||||
file = ctx.obj["FILE"]
|
||||
if not os.path.isfile(file):
|
||||
raise click.BadParameter(
|
||||
f"Invalid value for '-f' \"{file}\" does not exist.", ctx=ctx
|
||||
)
|
||||
dotenv_as_dict = {
|
||||
k: v
|
||||
for (k, v) in dotenv_values(file).items()
|
||||
if v is not None and (override or k not in os.environ)
|
||||
}
|
||||
|
||||
if not commandline:
|
||||
click.echo("No command given.")
|
||||
sys.exit(1)
|
||||
|
||||
run_command([*commandline, *ctx.args], dotenv_as_dict)
|
||||
|
||||
|
||||
def run_command(command: List[str], env: Dict[str, str]) -> None:
|
||||
"""Replace the current process with the specified command.
|
||||
|
||||
Replaces the current process with the specified command and the variables from `env`
|
||||
added in the current environment variables.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
command: List[str]
|
||||
The command and it's parameters
|
||||
env: Dict
|
||||
The additional environment variables
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
This function does not return any value. It replaces the current process with the new one.
|
||||
|
||||
"""
|
||||
# copy the current environment variables and add the vales from
|
||||
# `env`
|
||||
cmd_env = os.environ.copy()
|
||||
cmd_env.update(env)
|
||||
|
||||
if sys.platform == "win32":
|
||||
# execvpe on Windows returns control immediately
|
||||
# rather than once the command has finished.
|
||||
p = Popen(command, universal_newlines=True, bufsize=0, shell=False, env=cmd_env)
|
||||
_, _ = p.communicate()
|
||||
|
||||
sys.exit(p.returncode)
|
||||
else:
|
||||
os.execvpe(command[0], args=command, env=cmd_env)
|
||||
50
venv/lib/python3.12/site-packages/dotenv/ipython.py
Normal file
50
venv/lib/python3.12/site-packages/dotenv/ipython.py
Normal file
@@ -0,0 +1,50 @@
|
||||
from IPython.core.magic import Magics, line_magic, magics_class # type: ignore
|
||||
from IPython.core.magic_arguments import (
|
||||
argument,
|
||||
magic_arguments,
|
||||
parse_argstring,
|
||||
) # type: ignore
|
||||
|
||||
from .main import find_dotenv, load_dotenv
|
||||
|
||||
|
||||
@magics_class
|
||||
class IPythonDotEnv(Magics):
|
||||
@magic_arguments()
|
||||
@argument(
|
||||
"-o",
|
||||
"--override",
|
||||
action="store_true",
|
||||
help="Indicate to override existing variables",
|
||||
)
|
||||
@argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="store_true",
|
||||
help="Indicate function calls to be verbose",
|
||||
)
|
||||
@argument(
|
||||
"dotenv_path",
|
||||
nargs="?",
|
||||
type=str,
|
||||
default=".env",
|
||||
help="Search in increasingly higher folders for the `dotenv_path`",
|
||||
)
|
||||
@line_magic
|
||||
def dotenv(self, line):
|
||||
args = parse_argstring(self.dotenv, line)
|
||||
# Locate the .env file
|
||||
dotenv_path = args.dotenv_path
|
||||
try:
|
||||
dotenv_path = find_dotenv(dotenv_path, True, True)
|
||||
except IOError:
|
||||
print("cannot find .env file")
|
||||
return
|
||||
|
||||
# Load the .env file
|
||||
load_dotenv(dotenv_path, verbose=args.verbose, override=args.override)
|
||||
|
||||
|
||||
def load_ipython_extension(ipython):
|
||||
"""Register the %dotenv magic."""
|
||||
ipython.register_magics(IPythonDotEnv)
|
||||
480
venv/lib/python3.12/site-packages/dotenv/main.py
Normal file
480
venv/lib/python3.12/site-packages/dotenv/main.py
Normal file
@@ -0,0 +1,480 @@
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import stat
|
||||
import sys
|
||||
import tempfile
|
||||
from collections import OrderedDict
|
||||
from contextlib import contextmanager
|
||||
from typing import IO, Dict, Iterable, Iterator, Mapping, Optional, Tuple, Union
|
||||
|
||||
from .parser import Binding, parse_stream
|
||||
from .variables import parse_variables
|
||||
|
||||
# A type alias for a string path to be used for the paths in this file.
|
||||
# These paths may flow to `open()` and `os.replace()`.
|
||||
StrPath = Union[str, "os.PathLike[str]"]
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _load_dotenv_disabled() -> bool:
|
||||
"""
|
||||
Determine if dotenv loading has been disabled.
|
||||
"""
|
||||
if "PYTHON_DOTENV_DISABLED" not in os.environ:
|
||||
return False
|
||||
value = os.environ["PYTHON_DOTENV_DISABLED"].casefold()
|
||||
return value in {"1", "true", "t", "yes", "y"}
|
||||
|
||||
|
||||
def with_warn_for_invalid_lines(mappings: Iterator[Binding]) -> Iterator[Binding]:
|
||||
for mapping in mappings:
|
||||
if mapping.error:
|
||||
logger.warning(
|
||||
"python-dotenv could not parse statement starting at line %s",
|
||||
mapping.original.line,
|
||||
)
|
||||
yield mapping
|
||||
|
||||
|
||||
class DotEnv:
|
||||
def __init__(
|
||||
self,
|
||||
dotenv_path: Optional[StrPath],
|
||||
stream: Optional[IO[str]] = None,
|
||||
verbose: bool = False,
|
||||
encoding: Optional[str] = None,
|
||||
interpolate: bool = True,
|
||||
override: bool = True,
|
||||
) -> None:
|
||||
self.dotenv_path: Optional[StrPath] = dotenv_path
|
||||
self.stream: Optional[IO[str]] = stream
|
||||
self._dict: Optional[Dict[str, Optional[str]]] = None
|
||||
self.verbose: bool = verbose
|
||||
self.encoding: Optional[str] = encoding
|
||||
self.interpolate: bool = interpolate
|
||||
self.override: bool = override
|
||||
|
||||
@contextmanager
|
||||
def _get_stream(self) -> Iterator[IO[str]]:
|
||||
if self.dotenv_path and _is_file_or_fifo(self.dotenv_path):
|
||||
with open(self.dotenv_path, encoding=self.encoding) as stream:
|
||||
yield stream
|
||||
elif self.stream is not None:
|
||||
yield self.stream
|
||||
else:
|
||||
if self.verbose:
|
||||
logger.info(
|
||||
"python-dotenv could not find configuration file %s.",
|
||||
self.dotenv_path or ".env",
|
||||
)
|
||||
yield io.StringIO("")
|
||||
|
||||
def dict(self) -> Dict[str, Optional[str]]:
|
||||
"""Return dotenv as dict"""
|
||||
if self._dict:
|
||||
return self._dict
|
||||
|
||||
raw_values = self.parse()
|
||||
|
||||
if self.interpolate:
|
||||
self._dict = OrderedDict(
|
||||
resolve_variables(raw_values, override=self.override)
|
||||
)
|
||||
else:
|
||||
self._dict = OrderedDict(raw_values)
|
||||
|
||||
return self._dict
|
||||
|
||||
def parse(self) -> Iterator[Tuple[str, Optional[str]]]:
|
||||
with self._get_stream() as stream:
|
||||
for mapping in with_warn_for_invalid_lines(parse_stream(stream)):
|
||||
if mapping.key is not None:
|
||||
yield mapping.key, mapping.value
|
||||
|
||||
def set_as_environment_variables(self) -> bool:
|
||||
"""
|
||||
Load the current dotenv as system environment variable.
|
||||
"""
|
||||
if not self.dict():
|
||||
return False
|
||||
|
||||
for k, v in self.dict().items():
|
||||
if k in os.environ and not self.override:
|
||||
continue
|
||||
if v is not None:
|
||||
os.environ[k] = v
|
||||
|
||||
return True
|
||||
|
||||
def get(self, key: str) -> Optional[str]:
|
||||
""" """
|
||||
data = self.dict()
|
||||
|
||||
if key in data:
|
||||
return data[key]
|
||||
|
||||
if self.verbose:
|
||||
logger.warning("Key %s not found in %s.", key, self.dotenv_path)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_key(
|
||||
dotenv_path: StrPath,
|
||||
key_to_get: str,
|
||||
encoding: Optional[str] = "utf-8",
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Get the value of a given key from the given .env.
|
||||
|
||||
Returns `None` if the key isn't found or doesn't have a value.
|
||||
"""
|
||||
return DotEnv(dotenv_path, verbose=True, encoding=encoding).get(key_to_get)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def rewrite(
|
||||
path: StrPath,
|
||||
encoding: Optional[str],
|
||||
follow_symlinks: bool = False,
|
||||
) -> Iterator[Tuple[IO[str], IO[str]]]:
|
||||
if follow_symlinks:
|
||||
path = os.path.realpath(path)
|
||||
|
||||
try:
|
||||
source: IO[str] = open(path, encoding=encoding)
|
||||
try:
|
||||
path_stat = os.lstat(path)
|
||||
original_mode: Optional[int] = (
|
||||
stat.S_IMODE(path_stat.st_mode)
|
||||
if stat.S_ISREG(path_stat.st_mode)
|
||||
else None
|
||||
)
|
||||
except BaseException:
|
||||
source.close()
|
||||
raise
|
||||
except FileNotFoundError:
|
||||
source = io.StringIO("")
|
||||
original_mode = None
|
||||
|
||||
with tempfile.NamedTemporaryFile(
|
||||
mode="w",
|
||||
encoding=encoding,
|
||||
delete=False,
|
||||
prefix=".tmp_",
|
||||
dir=os.path.dirname(os.path.abspath(path)),
|
||||
) as dest:
|
||||
dest_path = pathlib.Path(dest.name)
|
||||
error = None
|
||||
|
||||
try:
|
||||
with source:
|
||||
yield (source, dest)
|
||||
except BaseException as err:
|
||||
error = err
|
||||
|
||||
if error is None:
|
||||
try:
|
||||
if original_mode is not None:
|
||||
os.chmod(dest_path, original_mode)
|
||||
|
||||
os.replace(dest_path, path)
|
||||
except BaseException:
|
||||
dest_path.unlink(missing_ok=True)
|
||||
raise
|
||||
else:
|
||||
dest_path.unlink(missing_ok=True)
|
||||
raise error from None
|
||||
|
||||
|
||||
def set_key(
|
||||
dotenv_path: StrPath,
|
||||
key_to_set: str,
|
||||
value_to_set: str,
|
||||
quote_mode: str = "always",
|
||||
export: bool = False,
|
||||
encoding: Optional[str] = "utf-8",
|
||||
follow_symlinks: bool = False,
|
||||
) -> Tuple[Optional[bool], str, str]:
|
||||
"""
|
||||
Adds or Updates a key/value to the given .env
|
||||
|
||||
The target .env file is created if it doesn't exist.
|
||||
|
||||
This function doesn't follow symlinks by default, to avoid accidentally
|
||||
modifying a file at a potentially untrusted path. If you don't need this
|
||||
protection and need symlinks to be followed, use `follow_symlinks`.
|
||||
"""
|
||||
if quote_mode not in ("always", "auto", "never"):
|
||||
raise ValueError(f"Unknown quote_mode: {quote_mode}")
|
||||
|
||||
quote = quote_mode == "always" or (
|
||||
quote_mode == "auto" and not value_to_set.isalnum()
|
||||
)
|
||||
|
||||
if quote:
|
||||
value_out = "'{}'".format(value_to_set.replace("'", "\\'"))
|
||||
else:
|
||||
value_out = value_to_set
|
||||
if export:
|
||||
line_out = f"export {key_to_set}={value_out}\n"
|
||||
else:
|
||||
line_out = f"{key_to_set}={value_out}\n"
|
||||
|
||||
with rewrite(dotenv_path, encoding=encoding, follow_symlinks=follow_symlinks) as (
|
||||
source,
|
||||
dest,
|
||||
):
|
||||
replaced = False
|
||||
missing_newline = False
|
||||
for mapping in with_warn_for_invalid_lines(parse_stream(source)):
|
||||
if mapping.key == key_to_set:
|
||||
dest.write(line_out)
|
||||
replaced = True
|
||||
else:
|
||||
dest.write(mapping.original.string)
|
||||
missing_newline = not mapping.original.string.endswith("\n")
|
||||
if not replaced:
|
||||
if missing_newline:
|
||||
dest.write("\n")
|
||||
dest.write(line_out)
|
||||
|
||||
return True, key_to_set, value_to_set
|
||||
|
||||
|
||||
def unset_key(
|
||||
dotenv_path: StrPath,
|
||||
key_to_unset: str,
|
||||
quote_mode: str = "always",
|
||||
encoding: Optional[str] = "utf-8",
|
||||
follow_symlinks: bool = False,
|
||||
) -> Tuple[Optional[bool], str]:
|
||||
"""
|
||||
Removes a given key from the given `.env` file.
|
||||
|
||||
If the .env path given doesn't exist, fails.
|
||||
If the given key doesn't exist in the .env, fails.
|
||||
|
||||
This function doesn't follow symlinks by default, to avoid accidentally
|
||||
modifying a file at a potentially untrusted path. If you don't need this
|
||||
protection and need symlinks to be followed, use `follow_symlinks`.
|
||||
"""
|
||||
if not os.path.exists(dotenv_path):
|
||||
logger.warning("Can't delete from %s - it doesn't exist.", dotenv_path)
|
||||
return None, key_to_unset
|
||||
|
||||
removed = False
|
||||
with rewrite(dotenv_path, encoding=encoding, follow_symlinks=follow_symlinks) as (
|
||||
source,
|
||||
dest,
|
||||
):
|
||||
for mapping in with_warn_for_invalid_lines(parse_stream(source)):
|
||||
if mapping.key == key_to_unset:
|
||||
removed = True
|
||||
else:
|
||||
dest.write(mapping.original.string)
|
||||
|
||||
if not removed:
|
||||
logger.warning(
|
||||
"Key %s not removed from %s - key doesn't exist.", key_to_unset, dotenv_path
|
||||
)
|
||||
return None, key_to_unset
|
||||
|
||||
return removed, key_to_unset
|
||||
|
||||
|
||||
def resolve_variables(
|
||||
values: Iterable[Tuple[str, Optional[str]]],
|
||||
override: bool,
|
||||
) -> Mapping[str, Optional[str]]:
|
||||
new_values: Dict[str, Optional[str]] = {}
|
||||
|
||||
for name, value in values:
|
||||
if value is None:
|
||||
result = None
|
||||
else:
|
||||
atoms = parse_variables(value)
|
||||
env: Dict[str, Optional[str]] = {}
|
||||
if override:
|
||||
env.update(os.environ) # type: ignore
|
||||
env.update(new_values)
|
||||
else:
|
||||
env.update(new_values)
|
||||
env.update(os.environ) # type: ignore
|
||||
result = "".join(atom.resolve(env) for atom in atoms)
|
||||
|
||||
new_values[name] = result
|
||||
|
||||
return new_values
|
||||
|
||||
|
||||
def _walk_to_root(path: str) -> Iterator[str]:
|
||||
"""
|
||||
Yield directories starting from the given directory up to the root
|
||||
"""
|
||||
if not os.path.exists(path):
|
||||
raise IOError("Starting path not found")
|
||||
|
||||
if os.path.isfile(path):
|
||||
path = os.path.dirname(path)
|
||||
|
||||
last_dir = None
|
||||
current_dir = os.path.abspath(path)
|
||||
while last_dir != current_dir:
|
||||
yield current_dir
|
||||
parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))
|
||||
last_dir, current_dir = current_dir, parent_dir
|
||||
|
||||
|
||||
def find_dotenv(
|
||||
filename: str = ".env",
|
||||
raise_error_if_not_found: bool = False,
|
||||
usecwd: bool = False,
|
||||
) -> str:
|
||||
"""
|
||||
Search in increasingly higher folders for the given file
|
||||
|
||||
Returns path to the file if found, or an empty string otherwise
|
||||
"""
|
||||
|
||||
def _is_interactive():
|
||||
"""Decide whether this is running in a REPL or IPython notebook"""
|
||||
if hasattr(sys, "ps1") or hasattr(sys, "ps2"):
|
||||
return True
|
||||
try:
|
||||
main = __import__("__main__", None, None, fromlist=["__file__"])
|
||||
except ModuleNotFoundError:
|
||||
return False
|
||||
return not hasattr(main, "__file__")
|
||||
|
||||
def _is_debugger():
|
||||
return sys.gettrace() is not None
|
||||
|
||||
if usecwd or _is_interactive() or _is_debugger() or getattr(sys, "frozen", False):
|
||||
# Should work without __file__, e.g. in REPL or IPython notebook.
|
||||
path = os.getcwd()
|
||||
else:
|
||||
# will work for .py files
|
||||
frame = sys._getframe()
|
||||
current_file = __file__
|
||||
|
||||
while frame.f_code.co_filename == current_file or not os.path.exists(
|
||||
frame.f_code.co_filename
|
||||
):
|
||||
assert frame.f_back is not None
|
||||
frame = frame.f_back
|
||||
frame_filename = frame.f_code.co_filename
|
||||
path = os.path.dirname(os.path.abspath(frame_filename))
|
||||
|
||||
for dirname in _walk_to_root(path):
|
||||
check_path = os.path.join(dirname, filename)
|
||||
if _is_file_or_fifo(check_path):
|
||||
return check_path
|
||||
|
||||
if raise_error_if_not_found:
|
||||
raise IOError("File not found")
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
def load_dotenv(
|
||||
dotenv_path: Optional[StrPath] = None,
|
||||
stream: Optional[IO[str]] = None,
|
||||
verbose: bool = False,
|
||||
override: bool = False,
|
||||
interpolate: bool = True,
|
||||
encoding: Optional[str] = "utf-8",
|
||||
) -> bool:
|
||||
"""Parse a .env file and then load all the variables found as environment variables.
|
||||
|
||||
Parameters:
|
||||
dotenv_path: Absolute or relative path to .env file.
|
||||
stream: Text stream (such as `io.StringIO`) with .env content, used if
|
||||
`dotenv_path` is `None`.
|
||||
verbose: Whether to output a warning the .env file is missing.
|
||||
override: Whether to override the system environment variables with the variables
|
||||
from the `.env` file.
|
||||
encoding: Encoding to be used to read the file.
|
||||
Returns:
|
||||
Bool: True if at least one environment variable is set else False
|
||||
|
||||
If both `dotenv_path` and `stream` are `None`, `find_dotenv()` is used to find the
|
||||
.env file with it's default parameters. If you need to change the default parameters
|
||||
of `find_dotenv()`, you can explicitly call `find_dotenv()` and pass the result
|
||||
to this function as `dotenv_path`.
|
||||
|
||||
If the environment variable `PYTHON_DOTENV_DISABLED` is set to a truthy value,
|
||||
.env loading is disabled.
|
||||
"""
|
||||
if _load_dotenv_disabled():
|
||||
logger.debug(
|
||||
"python-dotenv: .env loading disabled by PYTHON_DOTENV_DISABLED environment variable"
|
||||
)
|
||||
return False
|
||||
|
||||
if dotenv_path is None and stream is None:
|
||||
dotenv_path = find_dotenv()
|
||||
|
||||
dotenv = DotEnv(
|
||||
dotenv_path=dotenv_path,
|
||||
stream=stream,
|
||||
verbose=verbose,
|
||||
interpolate=interpolate,
|
||||
override=override,
|
||||
encoding=encoding,
|
||||
)
|
||||
return dotenv.set_as_environment_variables()
|
||||
|
||||
|
||||
def dotenv_values(
|
||||
dotenv_path: Optional[StrPath] = None,
|
||||
stream: Optional[IO[str]] = None,
|
||||
verbose: bool = False,
|
||||
interpolate: bool = True,
|
||||
encoding: Optional[str] = "utf-8",
|
||||
) -> Dict[str, Optional[str]]:
|
||||
"""
|
||||
Parse a .env file and return its content as a dict.
|
||||
|
||||
The returned dict will have `None` values for keys without values in the .env file.
|
||||
For example, `foo=bar` results in `{"foo": "bar"}` whereas `foo` alone results in
|
||||
`{"foo": None}`
|
||||
|
||||
Parameters:
|
||||
dotenv_path: Absolute or relative path to the .env file.
|
||||
stream: `StringIO` object with .env content, used if `dotenv_path` is `None`.
|
||||
verbose: Whether to output a warning if the .env file is missing.
|
||||
encoding: Encoding to be used to read the file.
|
||||
|
||||
If both `dotenv_path` and `stream` are `None`, `find_dotenv()` is used to find the
|
||||
.env file.
|
||||
"""
|
||||
if dotenv_path is None and stream is None:
|
||||
dotenv_path = find_dotenv()
|
||||
|
||||
return DotEnv(
|
||||
dotenv_path=dotenv_path,
|
||||
stream=stream,
|
||||
verbose=verbose,
|
||||
interpolate=interpolate,
|
||||
override=True,
|
||||
encoding=encoding,
|
||||
).dict()
|
||||
|
||||
|
||||
def _is_file_or_fifo(path: StrPath) -> bool:
|
||||
"""
|
||||
Return True if `path` exists and is either a regular file or a FIFO.
|
||||
"""
|
||||
if os.path.isfile(path):
|
||||
return True
|
||||
|
||||
try:
|
||||
st = os.stat(path)
|
||||
except (FileNotFoundError, OSError):
|
||||
return False
|
||||
|
||||
return stat.S_ISFIFO(st.st_mode)
|
||||
182
venv/lib/python3.12/site-packages/dotenv/parser.py
Normal file
182
venv/lib/python3.12/site-packages/dotenv/parser.py
Normal file
@@ -0,0 +1,182 @@
|
||||
import codecs
|
||||
import re
|
||||
from typing import (
|
||||
IO,
|
||||
Iterator,
|
||||
Match,
|
||||
NamedTuple,
|
||||
Optional,
|
||||
Pattern,
|
||||
Sequence,
|
||||
)
|
||||
|
||||
|
||||
def make_regex(string: str, extra_flags: int = 0) -> Pattern[str]:
|
||||
return re.compile(string, re.UNICODE | extra_flags)
|
||||
|
||||
|
||||
_newline = make_regex(r"(\r\n|\n|\r)")
|
||||
_multiline_whitespace = make_regex(r"\s*", extra_flags=re.MULTILINE)
|
||||
_whitespace = make_regex(r"[^\S\r\n]*")
|
||||
_export = make_regex(r"(?:export[^\S\r\n]+)?")
|
||||
_single_quoted_key = make_regex(r"'([^']+)'")
|
||||
_unquoted_key = make_regex(r"([^=\#\s]+)")
|
||||
_equal_sign = make_regex(r"(=[^\S\r\n]*)")
|
||||
_single_quoted_value = make_regex(r"'((?:\\'|[^'])*)'")
|
||||
_double_quoted_value = make_regex(r'"((?:\\"|[^"])*)"')
|
||||
_unquoted_value = make_regex(r"([^\r\n]*)")
|
||||
_comment = make_regex(r"(?:[^\S\r\n]*#[^\r\n]*)?")
|
||||
_end_of_line = make_regex(r"[^\S\r\n]*(?:\r\n|\n|\r|$)")
|
||||
_rest_of_line = make_regex(r"[^\r\n]*(?:\r|\n|\r\n)?")
|
||||
_double_quote_escapes = make_regex(r"\\[\\'\"abfnrtv]")
|
||||
_single_quote_escapes = make_regex(r"\\[\\']")
|
||||
|
||||
|
||||
class Original(NamedTuple):
|
||||
string: str
|
||||
line: int
|
||||
|
||||
|
||||
class Binding(NamedTuple):
|
||||
key: Optional[str]
|
||||
value: Optional[str]
|
||||
original: Original
|
||||
error: bool
|
||||
|
||||
|
||||
class Position:
|
||||
def __init__(self, chars: int, line: int) -> None:
|
||||
self.chars = chars
|
||||
self.line = line
|
||||
|
||||
@classmethod
|
||||
def start(cls) -> "Position":
|
||||
return cls(chars=0, line=1)
|
||||
|
||||
def set(self, other: "Position") -> None:
|
||||
self.chars = other.chars
|
||||
self.line = other.line
|
||||
|
||||
def advance(self, string: str) -> None:
|
||||
self.chars += len(string)
|
||||
self.line += len(re.findall(_newline, string))
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Reader:
|
||||
def __init__(self, stream: IO[str]) -> None:
|
||||
self.string = stream.read()
|
||||
self.position = Position.start()
|
||||
self.mark = Position.start()
|
||||
|
||||
def has_next(self) -> bool:
|
||||
return self.position.chars < len(self.string)
|
||||
|
||||
def set_mark(self) -> None:
|
||||
self.mark.set(self.position)
|
||||
|
||||
def get_marked(self) -> Original:
|
||||
return Original(
|
||||
string=self.string[self.mark.chars : self.position.chars],
|
||||
line=self.mark.line,
|
||||
)
|
||||
|
||||
def peek(self, count: int) -> str:
|
||||
return self.string[self.position.chars : self.position.chars + count]
|
||||
|
||||
def read(self, count: int) -> str:
|
||||
result = self.string[self.position.chars : self.position.chars + count]
|
||||
if len(result) < count:
|
||||
raise Error("read: End of string")
|
||||
self.position.advance(result)
|
||||
return result
|
||||
|
||||
def read_regex(self, regex: Pattern[str]) -> Sequence[str]:
|
||||
match = regex.match(self.string, self.position.chars)
|
||||
if match is None:
|
||||
raise Error("read_regex: Pattern not found")
|
||||
self.position.advance(self.string[match.start() : match.end()])
|
||||
return match.groups()
|
||||
|
||||
|
||||
def decode_escapes(regex: Pattern[str], string: str) -> str:
|
||||
def decode_match(match: Match[str]) -> str:
|
||||
return codecs.decode(match.group(0), "unicode-escape") # type: ignore
|
||||
|
||||
return regex.sub(decode_match, string)
|
||||
|
||||
|
||||
def parse_key(reader: Reader) -> Optional[str]:
|
||||
char = reader.peek(1)
|
||||
if char == "#":
|
||||
return None
|
||||
elif char == "'":
|
||||
(key,) = reader.read_regex(_single_quoted_key)
|
||||
else:
|
||||
(key,) = reader.read_regex(_unquoted_key)
|
||||
return key
|
||||
|
||||
|
||||
def parse_unquoted_value(reader: Reader) -> str:
|
||||
(part,) = reader.read_regex(_unquoted_value)
|
||||
return re.sub(r"\s+#.*", "", part).rstrip()
|
||||
|
||||
|
||||
def parse_value(reader: Reader) -> str:
|
||||
char = reader.peek(1)
|
||||
if char == "'":
|
||||
(value,) = reader.read_regex(_single_quoted_value)
|
||||
return decode_escapes(_single_quote_escapes, value)
|
||||
elif char == '"':
|
||||
(value,) = reader.read_regex(_double_quoted_value)
|
||||
return decode_escapes(_double_quote_escapes, value)
|
||||
elif char in ("", "\n", "\r"):
|
||||
return ""
|
||||
else:
|
||||
return parse_unquoted_value(reader)
|
||||
|
||||
|
||||
def parse_binding(reader: Reader) -> Binding:
|
||||
reader.set_mark()
|
||||
try:
|
||||
reader.read_regex(_multiline_whitespace)
|
||||
if not reader.has_next():
|
||||
return Binding(
|
||||
key=None,
|
||||
value=None,
|
||||
original=reader.get_marked(),
|
||||
error=False,
|
||||
)
|
||||
reader.read_regex(_export)
|
||||
key = parse_key(reader)
|
||||
reader.read_regex(_whitespace)
|
||||
if reader.peek(1) == "=":
|
||||
reader.read_regex(_equal_sign)
|
||||
value: Optional[str] = parse_value(reader)
|
||||
else:
|
||||
value = None
|
||||
reader.read_regex(_comment)
|
||||
reader.read_regex(_end_of_line)
|
||||
return Binding(
|
||||
key=key,
|
||||
value=value,
|
||||
original=reader.get_marked(),
|
||||
error=False,
|
||||
)
|
||||
except Error:
|
||||
reader.read_regex(_rest_of_line)
|
||||
return Binding(
|
||||
key=None,
|
||||
value=None,
|
||||
original=reader.get_marked(),
|
||||
error=True,
|
||||
)
|
||||
|
||||
|
||||
def parse_stream(stream: IO[str]) -> Iterator[Binding]:
|
||||
reader = Reader(stream)
|
||||
while reader.has_next():
|
||||
yield parse_binding(reader)
|
||||
1
venv/lib/python3.12/site-packages/dotenv/py.typed
Normal file
1
venv/lib/python3.12/site-packages/dotenv/py.typed
Normal file
@@ -0,0 +1 @@
|
||||
# Marker file for PEP 561
|
||||
86
venv/lib/python3.12/site-packages/dotenv/variables.py
Normal file
86
venv/lib/python3.12/site-packages/dotenv/variables.py
Normal file
@@ -0,0 +1,86 @@
|
||||
import re
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from typing import Iterator, Mapping, Optional, Pattern
|
||||
|
||||
_posix_variable: Pattern[str] = re.compile(
|
||||
r"""
|
||||
\$\{
|
||||
(?P<name>[^\}:]*)
|
||||
(?::-
|
||||
(?P<default>[^\}]*)
|
||||
)?
|
||||
\}
|
||||
""",
|
||||
re.VERBOSE,
|
||||
)
|
||||
|
||||
|
||||
class Atom(metaclass=ABCMeta):
|
||||
def __ne__(self, other: object) -> bool:
|
||||
result = self.__eq__(other)
|
||||
if result is NotImplemented:
|
||||
return NotImplemented
|
||||
return not result
|
||||
|
||||
@abstractmethod
|
||||
def resolve(self, env: Mapping[str, Optional[str]]) -> str: ...
|
||||
|
||||
|
||||
class Literal(Atom):
|
||||
def __init__(self, value: str) -> None:
|
||||
self.value = value
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"Literal(value={self.value})"
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, self.__class__):
|
||||
return NotImplemented
|
||||
return self.value == other.value
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash((self.__class__, self.value))
|
||||
|
||||
def resolve(self, env: Mapping[str, Optional[str]]) -> str:
|
||||
return self.value
|
||||
|
||||
|
||||
class Variable(Atom):
|
||||
def __init__(self, name: str, default: Optional[str]) -> None:
|
||||
self.name = name
|
||||
self.default = default
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"Variable(name={self.name}, default={self.default})"
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, self.__class__):
|
||||
return NotImplemented
|
||||
return (self.name, self.default) == (other.name, other.default)
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash((self.__class__, self.name, self.default))
|
||||
|
||||
def resolve(self, env: Mapping[str, Optional[str]]) -> str:
|
||||
default = self.default if self.default is not None else ""
|
||||
result = env.get(self.name, default)
|
||||
return result if result is not None else ""
|
||||
|
||||
|
||||
def parse_variables(value: str) -> Iterator[Atom]:
|
||||
cursor = 0
|
||||
|
||||
for match in _posix_variable.finditer(value):
|
||||
(start, end) = match.span()
|
||||
name = match["name"]
|
||||
default = match["default"]
|
||||
|
||||
if start > cursor:
|
||||
yield Literal(value=value[cursor:start])
|
||||
|
||||
yield Variable(name=name, default=default)
|
||||
cursor = end
|
||||
|
||||
length = len(value)
|
||||
if cursor < length:
|
||||
yield Literal(value=value[cursor:length])
|
||||
1
venv/lib/python3.12/site-packages/dotenv/version.py
Normal file
1
venv/lib/python3.12/site-packages/dotenv/version.py
Normal file
@@ -0,0 +1 @@
|
||||
__version__ = "1.2.2"
|
||||
@@ -0,0 +1 @@
|
||||
pip
|
||||
209
venv/lib/python3.12/site-packages/idna-3.11.dist-info/METADATA
Normal file
209
venv/lib/python3.12/site-packages/idna-3.11.dist-info/METADATA
Normal file
@@ -0,0 +1,209 @@
|
||||
Metadata-Version: 2.4
|
||||
Name: idna
|
||||
Version: 3.11
|
||||
Summary: Internationalized Domain Names in Applications (IDNA)
|
||||
Author-email: Kim Davies <kim+pypi@gumleaf.org>
|
||||
Requires-Python: >=3.8
|
||||
Description-Content-Type: text/x-rst
|
||||
License-Expression: BSD-3-Clause
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: Intended Audience :: System Administrators
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3 :: Only
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Classifier: Programming Language :: Python :: 3.13
|
||||
Classifier: Programming Language :: Python :: 3.14
|
||||
Classifier: Programming Language :: Python :: Implementation :: CPython
|
||||
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
||||
Classifier: Topic :: Internet :: Name Service (DNS)
|
||||
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||
Classifier: Topic :: Utilities
|
||||
License-File: LICENSE.md
|
||||
Requires-Dist: ruff >= 0.6.2 ; extra == "all"
|
||||
Requires-Dist: mypy >= 1.11.2 ; extra == "all"
|
||||
Requires-Dist: pytest >= 8.3.2 ; extra == "all"
|
||||
Requires-Dist: flake8 >= 7.1.1 ; extra == "all"
|
||||
Project-URL: Changelog, https://github.com/kjd/idna/blob/master/HISTORY.rst
|
||||
Project-URL: Issue tracker, https://github.com/kjd/idna/issues
|
||||
Project-URL: Source, https://github.com/kjd/idna
|
||||
Provides-Extra: all
|
||||
|
||||
Internationalized Domain Names in Applications (IDNA)
|
||||
=====================================================
|
||||
|
||||
Support for `Internationalized Domain Names in
|
||||
Applications (IDNA) <https://tools.ietf.org/html/rfc5891>`_
|
||||
and `Unicode IDNA Compatibility Processing
|
||||
<https://unicode.org/reports/tr46/>`_.
|
||||
|
||||
The latest versions of these standards supplied here provide
|
||||
more comprehensive language coverage and reduce the potential of
|
||||
allowing domains with known security vulnerabilities. This library
|
||||
is a suitable replacement for the “encodings.idna”
|
||||
module that comes with the Python standard library, but which
|
||||
only supports an older superseded IDNA specification from 2003.
|
||||
|
||||
Basic functions are simply executed:
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> import idna
|
||||
>>> idna.encode('ドメイン.テスト')
|
||||
b'xn--eckwd4c7c.xn--zckzah'
|
||||
>>> print(idna.decode('xn--eckwd4c7c.xn--zckzah'))
|
||||
ドメイン.テスト
|
||||
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
This package is available for installation from PyPI via the
|
||||
typical mechanisms, such as:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ python3 -m pip install idna
|
||||
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
For typical usage, the ``encode`` and ``decode`` functions will take a
|
||||
domain name argument and perform a conversion to ASCII compatible encoding
|
||||
(known as A-labels), or to Unicode strings (known as U-labels)
|
||||
respectively.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> import idna
|
||||
>>> idna.encode('ドメイン.テスト')
|
||||
b'xn--eckwd4c7c.xn--zckzah'
|
||||
>>> print(idna.decode('xn--eckwd4c7c.xn--zckzah'))
|
||||
ドメイン.テスト
|
||||
|
||||
Conversions can be applied at a per-label basis using the ``ulabel`` or
|
||||
``alabel`` functions if necessary:
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> idna.alabel('测试')
|
||||
b'xn--0zwm56d'
|
||||
|
||||
|
||||
Compatibility Mapping (UTS #46)
|
||||
+++++++++++++++++++++++++++++++
|
||||
|
||||
This library provides support for `Unicode IDNA Compatibility
|
||||
Processing <https://unicode.org/reports/tr46/>`_ which normalizes input from
|
||||
different potential ways a user may input a domain prior to performing the IDNA
|
||||
conversion operations. This functionality, known as a
|
||||
`mapping <https://tools.ietf.org/html/rfc5895>`_, is considered by the
|
||||
specification to be a local user-interface issue distinct from IDNA
|
||||
conversion functionality.
|
||||
|
||||
For example, “Königsgäßchen” is not a permissible label as *LATIN
|
||||
CAPITAL LETTER K* is not allowed (nor are capital letters in general).
|
||||
UTS 46 will convert this into lower case prior to applying the IDNA
|
||||
conversion.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> import idna
|
||||
>>> idna.encode('Königsgäßchen')
|
||||
...
|
||||
idna.core.InvalidCodepoint: Codepoint U+004B at position 1 of 'Königsgäßchen' not allowed
|
||||
>>> idna.encode('Königsgäßchen', uts46=True)
|
||||
b'xn--knigsgchen-b4a3dun'
|
||||
>>> print(idna.decode('xn--knigsgchen-b4a3dun'))
|
||||
königsgäßchen
|
||||
|
||||
|
||||
Exceptions
|
||||
----------
|
||||
|
||||
All errors raised during the conversion following the specification
|
||||
should raise an exception derived from the ``idna.IDNAError`` base
|
||||
class.
|
||||
|
||||
More specific exceptions that may be generated as ``idna.IDNABidiError``
|
||||
when the error reflects an illegal combination of left-to-right and
|
||||
right-to-left characters in a label; ``idna.InvalidCodepoint`` when
|
||||
a specific codepoint is an illegal character in an IDN label (i.e.
|
||||
INVALID); and ``idna.InvalidCodepointContext`` when the codepoint is
|
||||
illegal based on its position in the string (i.e. it is CONTEXTO or CONTEXTJ
|
||||
but the contextual requirements are not satisfied.)
|
||||
|
||||
Building and Diagnostics
|
||||
------------------------
|
||||
|
||||
The IDNA and UTS 46 functionality relies upon pre-calculated lookup
|
||||
tables for performance. These tables are derived from computing against
|
||||
eligibility criteria in the respective standards using the command-line
|
||||
script ``tools/idna-data``.
|
||||
|
||||
This tool will fetch relevant codepoint data from the Unicode repository
|
||||
and perform the required calculations to identify eligibility. There are
|
||||
three main modes:
|
||||
|
||||
* ``idna-data make-libdata``. Generates ``idnadata.py`` and
|
||||
``uts46data.py``, the pre-calculated lookup tables used for IDNA and
|
||||
UTS 46 conversions. Implementers who wish to track this library against
|
||||
a different Unicode version may use this tool to manually generate a
|
||||
different version of the ``idnadata.py`` and ``uts46data.py`` files.
|
||||
|
||||
* ``idna-data make-table``. Generate a table of the IDNA disposition
|
||||
(e.g. PVALID, CONTEXTJ, CONTEXTO) in the format found in Appendix
|
||||
B.1 of RFC 5892 and the pre-computed tables published by `IANA
|
||||
<https://www.iana.org/>`_.
|
||||
|
||||
* ``idna-data U+0061``. Prints debugging output on the various
|
||||
properties associated with an individual Unicode codepoint (in this
|
||||
case, U+0061), that are used to assess the IDNA and UTS 46 status of a
|
||||
codepoint. This is helpful in debugging or analysis.
|
||||
|
||||
The tool accepts a number of arguments, described using ``idna-data
|
||||
-h``. Most notably, the ``--version`` argument allows the specification
|
||||
of the version of Unicode to be used in computing the table data. For
|
||||
example, ``idna-data --version 9.0.0 make-libdata`` will generate
|
||||
library data against Unicode 9.0.0.
|
||||
|
||||
|
||||
Additional Notes
|
||||
----------------
|
||||
|
||||
* **Packages**. The latest tagged release version is published in the
|
||||
`Python Package Index <https://pypi.org/project/idna/>`_.
|
||||
|
||||
* **Version support**. This library supports Python 3.8 and higher.
|
||||
As this library serves as a low-level toolkit for a variety of
|
||||
applications, many of which strive for broad compatibility with older
|
||||
Python versions, there is no rush to remove older interpreter support.
|
||||
Support for older versions are likely to be removed from new releases
|
||||
as automated tests can no longer easily be run, i.e. once the Python
|
||||
version is officially end-of-life.
|
||||
|
||||
* **Testing**. The library has a test suite based on each rule of the
|
||||
IDNA specification, as well as tests that are provided as part of the
|
||||
Unicode Technical Standard 46, `Unicode IDNA Compatibility Processing
|
||||
<https://unicode.org/reports/tr46/>`_.
|
||||
|
||||
* **Emoji**. It is an occasional request to support emoji domains in
|
||||
this library. Encoding of symbols like emoji is expressly prohibited by
|
||||
the technical standard IDNA 2008 and emoji domains are broadly phased
|
||||
out across the domain industry due to associated security risks. For
|
||||
now, applications that need to support these non-compliant labels
|
||||
may wish to consider trying the encode/decode operation in this library
|
||||
first, and then falling back to using `encodings.idna`. See `the Github
|
||||
project <https://github.com/kjd/idna/issues/18>`_ for more discussion.
|
||||
|
||||
* **Transitional processing**. Unicode 16.0.0 removed transitional
|
||||
processing so the `transitional` argument for the encode() method
|
||||
no longer has any effect and will be removed at a later date.
|
||||
|
||||
22
venv/lib/python3.12/site-packages/idna-3.11.dist-info/RECORD
Normal file
22
venv/lib/python3.12/site-packages/idna-3.11.dist-info/RECORD
Normal file
@@ -0,0 +1,22 @@
|
||||
idna-3.11.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
idna-3.11.dist-info/METADATA,sha256=fCwSww9SuiN8TIHllFSASUQCW55hAs8dzKnr9RaEEbA,8378
|
||||
idna-3.11.dist-info/RECORD,,
|
||||
idna-3.11.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
|
||||
idna-3.11.dist-info/licenses/LICENSE.md,sha256=t6M2q_OwThgOwGXN0W5wXQeeHMehT5EKpukYfza5zYc,1541
|
||||
idna/__init__.py,sha256=MPqNDLZbXqGaNdXxAFhiqFPKEQXju2jNQhCey6-5eJM,868
|
||||
idna/__pycache__/__init__.cpython-312.pyc,,
|
||||
idna/__pycache__/codec.cpython-312.pyc,,
|
||||
idna/__pycache__/compat.cpython-312.pyc,,
|
||||
idna/__pycache__/core.cpython-312.pyc,,
|
||||
idna/__pycache__/idnadata.cpython-312.pyc,,
|
||||
idna/__pycache__/intranges.cpython-312.pyc,,
|
||||
idna/__pycache__/package_data.cpython-312.pyc,,
|
||||
idna/__pycache__/uts46data.cpython-312.pyc,,
|
||||
idna/codec.py,sha256=M2SGWN7cs_6B32QmKTyTN6xQGZeYQgQ2wiX3_DR6loE,3438
|
||||
idna/compat.py,sha256=RzLy6QQCdl9784aFhb2EX9EKGCJjg0P3PilGdeXXcx8,316
|
||||
idna/core.py,sha256=P26_XVycuMTZ1R2mNK1ZREVzM5mvTzdabBXfyZVU1Lc,13246
|
||||
idna/idnadata.py,sha256=SG8jhaGE53iiD6B49pt2pwTv_UvClciWE-N54oR2p4U,79623
|
||||
idna/intranges.py,sha256=amUtkdhYcQG8Zr-CoMM_kVRacxkivC1WgxN1b63KKdU,1898
|
||||
idna/package_data.py,sha256=_CUavOxobnbyNG2FLyHoN8QHP3QM9W1tKuw7eq9QwBk,21
|
||||
idna/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
idna/uts46data.py,sha256=H9J35VkD0F9L9mKOqjeNGd2A-Va6FlPoz6Jz4K7h-ps,243725
|
||||
@@ -0,0 +1,4 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: flit 3.12.0
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
@@ -0,0 +1,31 @@
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2013-2025, Kim Davies and contributors.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
45
venv/lib/python3.12/site-packages/idna/__init__.py
Normal file
45
venv/lib/python3.12/site-packages/idna/__init__.py
Normal file
@@ -0,0 +1,45 @@
|
||||
from .core import (
|
||||
IDNABidiError,
|
||||
IDNAError,
|
||||
InvalidCodepoint,
|
||||
InvalidCodepointContext,
|
||||
alabel,
|
||||
check_bidi,
|
||||
check_hyphen_ok,
|
||||
check_initial_combiner,
|
||||
check_label,
|
||||
check_nfc,
|
||||
decode,
|
||||
encode,
|
||||
ulabel,
|
||||
uts46_remap,
|
||||
valid_contextj,
|
||||
valid_contexto,
|
||||
valid_label_length,
|
||||
valid_string_length,
|
||||
)
|
||||
from .intranges import intranges_contain
|
||||
from .package_data import __version__
|
||||
|
||||
__all__ = [
|
||||
"__version__",
|
||||
"IDNABidiError",
|
||||
"IDNAError",
|
||||
"InvalidCodepoint",
|
||||
"InvalidCodepointContext",
|
||||
"alabel",
|
||||
"check_bidi",
|
||||
"check_hyphen_ok",
|
||||
"check_initial_combiner",
|
||||
"check_label",
|
||||
"check_nfc",
|
||||
"decode",
|
||||
"encode",
|
||||
"intranges_contain",
|
||||
"ulabel",
|
||||
"uts46_remap",
|
||||
"valid_contextj",
|
||||
"valid_contexto",
|
||||
"valid_label_length",
|
||||
"valid_string_length",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user