Code
This commit is contained in:
2
Code/venv/.gitignore
vendored
Normal file
2
Code/venv/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
# Created by venv; see https://docs.python.org/3/library/venv.html
|
||||
*
|
||||
248
Code/venv/bin/Activate.ps1
Normal file
248
Code/venv/bin/Activate.ps1
Normal file
@ -0,0 +1,248 @@
|
||||
<#
|
||||
.Synopsis
|
||||
Activate a Python virtual environment for the current PowerShell session.
|
||||
|
||||
.Description
|
||||
Pushes the python executable for a virtual environment to the front of the
|
||||
$Env:PATH environment variable and sets the prompt to signify that you are
|
||||
in a Python virtual environment. Makes use of the command line switches as
|
||||
well as the `pyvenv.cfg` file values present in the virtual environment.
|
||||
|
||||
.Parameter VenvDir
|
||||
Path to the directory that contains the virtual environment to activate. The
|
||||
default value for this is the parent of the directory that the Activate.ps1
|
||||
script is located within.
|
||||
|
||||
.Parameter Prompt
|
||||
The prompt prefix to display when this virtual environment is activated. By
|
||||
default, this prompt is the name of the virtual environment folder (VenvDir)
|
||||
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
|
||||
|
||||
.Example
|
||||
Activate.ps1
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -Verbose
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script,
|
||||
and shows extra information about the activation as it executes.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
|
||||
Activates the Python virtual environment located in the specified location.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -Prompt "MyPython"
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script,
|
||||
and prefixes the current prompt with the specified string (surrounded in
|
||||
parentheses) while the virtual environment is active.
|
||||
|
||||
.Notes
|
||||
On Windows, it may be required to enable this Activate.ps1 script by setting the
|
||||
execution policy for the user. You can do this by issuing the following PowerShell
|
||||
command:
|
||||
|
||||
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
|
||||
|
||||
For more information on Execution Policies:
|
||||
https://go.microsoft.com/fwlink/?LinkID=135170
|
||||
|
||||
#>
|
||||
Param(
|
||||
[Parameter(Mandatory = $false)]
|
||||
[String]
|
||||
$VenvDir,
|
||||
[Parameter(Mandatory = $false)]
|
||||
[String]
|
||||
$Prompt
|
||||
)
|
||||
|
||||
<# Function declarations --------------------------------------------------- #>
|
||||
|
||||
<#
|
||||
.Synopsis
|
||||
Remove all shell session elements added by the Activate script, including the
|
||||
addition of the virtual environment's Python executable from the beginning of
|
||||
the PATH variable.
|
||||
|
||||
.Parameter NonDestructive
|
||||
If present, do not remove this function from the global namespace for the
|
||||
session.
|
||||
|
||||
#>
|
||||
function global:deactivate ([switch]$NonDestructive) {
|
||||
# Revert to original values
|
||||
|
||||
# The prior prompt:
|
||||
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
|
||||
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
|
||||
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
|
||||
}
|
||||
|
||||
# The prior PYTHONHOME:
|
||||
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
|
||||
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
|
||||
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
|
||||
}
|
||||
|
||||
# The prior PATH:
|
||||
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
|
||||
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
|
||||
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
|
||||
}
|
||||
|
||||
# Just remove the VIRTUAL_ENV altogether:
|
||||
if (Test-Path -Path Env:VIRTUAL_ENV) {
|
||||
Remove-Item -Path env:VIRTUAL_ENV
|
||||
}
|
||||
|
||||
# Just remove VIRTUAL_ENV_PROMPT altogether.
|
||||
if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
|
||||
Remove-Item -Path env:VIRTUAL_ENV_PROMPT
|
||||
}
|
||||
|
||||
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
|
||||
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
|
||||
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
|
||||
}
|
||||
|
||||
# Leave deactivate function in the global namespace if requested:
|
||||
if (-not $NonDestructive) {
|
||||
Remove-Item -Path function:deactivate
|
||||
}
|
||||
}
|
||||
|
||||
<#
|
||||
.Description
|
||||
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
|
||||
given folder, and returns them in a map.
|
||||
|
||||
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
|
||||
two strings separated by `=` (with any amount of whitespace surrounding the =)
|
||||
then it is considered a `key = value` line. The left hand string is the key,
|
||||
the right hand is the value.
|
||||
|
||||
If the value starts with a `'` or a `"` then the first and last character is
|
||||
stripped from the value before being captured.
|
||||
|
||||
.Parameter ConfigDir
|
||||
Path to the directory that contains the `pyvenv.cfg` file.
|
||||
#>
|
||||
function Get-PyVenvConfig(
|
||||
[String]
|
||||
$ConfigDir
|
||||
) {
|
||||
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
|
||||
|
||||
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
|
||||
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
|
||||
|
||||
# An empty map will be returned if no config file is found.
|
||||
$pyvenvConfig = @{ }
|
||||
|
||||
if ($pyvenvConfigPath) {
|
||||
|
||||
Write-Verbose "File exists, parse `key = value` lines"
|
||||
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
|
||||
|
||||
$pyvenvConfigContent | ForEach-Object {
|
||||
$keyval = $PSItem -split "\s*=\s*", 2
|
||||
if ($keyval[0] -and $keyval[1]) {
|
||||
$val = $keyval[1]
|
||||
|
||||
# Remove extraneous quotations around a string value.
|
||||
if ("'""".Contains($val.Substring(0, 1))) {
|
||||
$val = $val.Substring(1, $val.Length - 2)
|
||||
}
|
||||
|
||||
$pyvenvConfig[$keyval[0]] = $val
|
||||
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
|
||||
}
|
||||
}
|
||||
}
|
||||
return $pyvenvConfig
|
||||
}
|
||||
|
||||
|
||||
<# Begin Activate script --------------------------------------------------- #>
|
||||
|
||||
# Determine the containing directory of this script
|
||||
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
|
||||
$VenvExecDir = Get-Item -Path $VenvExecPath
|
||||
|
||||
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
|
||||
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
|
||||
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
|
||||
|
||||
# Set values required in priority: CmdLine, ConfigFile, Default
|
||||
# First, get the location of the virtual environment, it might not be
|
||||
# VenvExecDir if specified on the command line.
|
||||
if ($VenvDir) {
|
||||
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
|
||||
}
|
||||
else {
|
||||
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
|
||||
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
|
||||
Write-Verbose "VenvDir=$VenvDir"
|
||||
}
|
||||
|
||||
# Next, read the `pyvenv.cfg` file to determine any required value such
|
||||
# as `prompt`.
|
||||
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
|
||||
|
||||
# Next, set the prompt from the command line, or the config file, or
|
||||
# just use the name of the virtual environment folder.
|
||||
if ($Prompt) {
|
||||
Write-Verbose "Prompt specified as argument, using '$Prompt'"
|
||||
}
|
||||
else {
|
||||
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
|
||||
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
|
||||
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
|
||||
$Prompt = $pyvenvCfg['prompt'];
|
||||
}
|
||||
else {
|
||||
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
|
||||
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
|
||||
$Prompt = Split-Path -Path $venvDir -Leaf
|
||||
}
|
||||
}
|
||||
|
||||
Write-Verbose "Prompt = '$Prompt'"
|
||||
Write-Verbose "VenvDir='$VenvDir'"
|
||||
|
||||
# Deactivate any currently active virtual environment, but leave the
|
||||
# deactivate function in place.
|
||||
deactivate -nondestructive
|
||||
|
||||
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
|
||||
# that there is an activated venv.
|
||||
$env:VIRTUAL_ENV = $VenvDir
|
||||
|
||||
$env:VIRTUAL_ENV_PROMPT = $Prompt
|
||||
|
||||
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
|
||||
|
||||
Write-Verbose "Setting prompt to '$Prompt'"
|
||||
|
||||
# Set the prompt to include the env name
|
||||
# Make sure _OLD_VIRTUAL_PROMPT is global
|
||||
function global:_OLD_VIRTUAL_PROMPT { "" }
|
||||
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
|
||||
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
|
||||
|
||||
function global:prompt {
|
||||
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
|
||||
_OLD_VIRTUAL_PROMPT
|
||||
}
|
||||
}
|
||||
|
||||
# Clear PYTHONHOME
|
||||
if (Test-Path -Path Env:PYTHONHOME) {
|
||||
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
|
||||
Remove-Item -Path Env:PYTHONHOME
|
||||
}
|
||||
|
||||
# Add the venv to the PATH
|
||||
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
|
||||
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
|
||||
76
Code/venv/bin/activate
Normal file
76
Code/venv/bin/activate
Normal file
@ -0,0 +1,76 @@
|
||||
# This file must be used with "source bin/activate" *from bash*
|
||||
# You cannot run it directly
|
||||
|
||||
deactivate () {
|
||||
# reset old environment variables
|
||||
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
|
||||
PATH="${_OLD_VIRTUAL_PATH:-}"
|
||||
export PATH
|
||||
unset _OLD_VIRTUAL_PATH
|
||||
fi
|
||||
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
|
||||
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
|
||||
export PYTHONHOME
|
||||
unset _OLD_VIRTUAL_PYTHONHOME
|
||||
fi
|
||||
|
||||
# Call hash to forget past locations. Without forgetting
|
||||
# past locations the $PATH changes we made may not be respected.
|
||||
# See "man bash" for more details. hash is usually a builtin of your shell
|
||||
hash -r 2> /dev/null
|
||||
|
||||
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
|
||||
PS1="${_OLD_VIRTUAL_PS1:-}"
|
||||
export PS1
|
||||
unset _OLD_VIRTUAL_PS1
|
||||
fi
|
||||
|
||||
unset VIRTUAL_ENV
|
||||
unset VIRTUAL_ENV_PROMPT
|
||||
if [ ! "${1:-}" = "nondestructive" ] ; then
|
||||
# Self destruct!
|
||||
unset -f deactivate
|
||||
fi
|
||||
}
|
||||
|
||||
# unset irrelevant variables
|
||||
deactivate nondestructive
|
||||
|
||||
# on Windows, a path can contain colons and backslashes and has to be converted:
|
||||
case "$(uname)" in
|
||||
CYGWIN*|MSYS*|MINGW*)
|
||||
# transform D:\path\to\venv to /d/path/to/venv on MSYS and MINGW
|
||||
# and to /cygdrive/d/path/to/venv on Cygwin
|
||||
VIRTUAL_ENV=$(cygpath /home/mofixx/Documents/Praxisarbeit-1/Code/venv)
|
||||
export VIRTUAL_ENV
|
||||
;;
|
||||
*)
|
||||
# use the path as-is
|
||||
export VIRTUAL_ENV=/home/mofixx/Documents/Praxisarbeit-1/Code/venv
|
||||
;;
|
||||
esac
|
||||
|
||||
_OLD_VIRTUAL_PATH="$PATH"
|
||||
PATH="$VIRTUAL_ENV/"bin":$PATH"
|
||||
export PATH
|
||||
|
||||
VIRTUAL_ENV_PROMPT=venv
|
||||
export VIRTUAL_ENV_PROMPT
|
||||
|
||||
# unset PYTHONHOME if set
|
||||
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
|
||||
# could use `if (set -u; : $PYTHONHOME) ;` in bash
|
||||
if [ -n "${PYTHONHOME:-}" ] ; then
|
||||
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
|
||||
unset PYTHONHOME
|
||||
fi
|
||||
|
||||
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
|
||||
_OLD_VIRTUAL_PS1="${PS1:-}"
|
||||
PS1="("venv") ${PS1:-}"
|
||||
export PS1
|
||||
fi
|
||||
|
||||
# Call hash to forget past commands. Without forgetting
|
||||
# past commands the $PATH changes we made may not be respected
|
||||
hash -r 2> /dev/null
|
||||
27
Code/venv/bin/activate.csh
Normal file
27
Code/venv/bin/activate.csh
Normal file
@ -0,0 +1,27 @@
|
||||
# This file must be used with "source bin/activate.csh" *from csh*.
|
||||
# You cannot run it directly.
|
||||
|
||||
# Created by Davide Di Blasi <davidedb@gmail.com>.
|
||||
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
|
||||
|
||||
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
|
||||
|
||||
# Unset irrelevant variables.
|
||||
deactivate nondestructive
|
||||
|
||||
setenv VIRTUAL_ENV /home/mofixx/Documents/Praxisarbeit-1/Code/venv
|
||||
|
||||
set _OLD_VIRTUAL_PATH="$PATH"
|
||||
setenv PATH "$VIRTUAL_ENV/"bin":$PATH"
|
||||
setenv VIRTUAL_ENV_PROMPT venv
|
||||
|
||||
|
||||
set _OLD_VIRTUAL_PROMPT="$prompt"
|
||||
|
||||
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
|
||||
set prompt = "("venv") $prompt:q"
|
||||
endif
|
||||
|
||||
alias pydoc python -m pydoc
|
||||
|
||||
rehash
|
||||
69
Code/venv/bin/activate.fish
Normal file
69
Code/venv/bin/activate.fish
Normal file
@ -0,0 +1,69 @@
|
||||
# This file must be used with "source <venv>/bin/activate.fish" *from fish*
|
||||
# (https://fishshell.com/). You cannot run it directly.
|
||||
|
||||
function deactivate -d "Exit virtual environment and return to normal shell environment"
|
||||
# reset old environment variables
|
||||
if test -n "$_OLD_VIRTUAL_PATH"
|
||||
set -gx PATH $_OLD_VIRTUAL_PATH
|
||||
set -e _OLD_VIRTUAL_PATH
|
||||
end
|
||||
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
|
||||
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
|
||||
set -e _OLD_VIRTUAL_PYTHONHOME
|
||||
end
|
||||
|
||||
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
|
||||
set -e _OLD_FISH_PROMPT_OVERRIDE
|
||||
# prevents error when using nested fish instances (Issue #93858)
|
||||
if functions -q _old_fish_prompt
|
||||
functions -e fish_prompt
|
||||
functions -c _old_fish_prompt fish_prompt
|
||||
functions -e _old_fish_prompt
|
||||
end
|
||||
end
|
||||
|
||||
set -e VIRTUAL_ENV
|
||||
set -e VIRTUAL_ENV_PROMPT
|
||||
if test "$argv[1]" != "nondestructive"
|
||||
# Self-destruct!
|
||||
functions -e deactivate
|
||||
end
|
||||
end
|
||||
|
||||
# Unset irrelevant variables.
|
||||
deactivate nondestructive
|
||||
|
||||
set -gx VIRTUAL_ENV /home/mofixx/Documents/Praxisarbeit-1/Code/venv
|
||||
|
||||
set -gx _OLD_VIRTUAL_PATH $PATH
|
||||
set -gx PATH "$VIRTUAL_ENV/"bin $PATH
|
||||
set -gx VIRTUAL_ENV_PROMPT venv
|
||||
|
||||
# Unset PYTHONHOME if set.
|
||||
if set -q PYTHONHOME
|
||||
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
|
||||
set -e PYTHONHOME
|
||||
end
|
||||
|
||||
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
|
||||
# fish uses a function instead of an env var to generate the prompt.
|
||||
|
||||
# Save the current fish_prompt function as the function _old_fish_prompt.
|
||||
functions -c fish_prompt _old_fish_prompt
|
||||
|
||||
# With the original prompt function renamed, we can override with our own.
|
||||
function fish_prompt
|
||||
# Save the return status of the last command.
|
||||
set -l old_status $status
|
||||
|
||||
# Output the venv prompt; color taken from the blue of the Python logo.
|
||||
printf "%s(%s)%s " (set_color 4B8BBE) venv (set_color normal)
|
||||
|
||||
# Restore the return status of the previous command.
|
||||
echo "exit $old_status" | .
|
||||
# Output the original/"old" prompt.
|
||||
_old_fish_prompt
|
||||
end
|
||||
|
||||
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
|
||||
end
|
||||
8
Code/venv/bin/pip
Executable file
8
Code/venv/bin/pip
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/mofixx/Documents/Praxisarbeit-1/Code/venv/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
Code/venv/bin/pip3
Executable file
8
Code/venv/bin/pip3
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/mofixx/Documents/Praxisarbeit-1/Code/venv/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
Code/venv/bin/pip3.13
Executable file
8
Code/venv/bin/pip3.13
Executable file
@ -0,0 +1,8 @@
|
||||
#!/home/mofixx/Documents/Praxisarbeit-1/Code/venv/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
1
Code/venv/bin/python
Symbolic link
1
Code/venv/bin/python
Symbolic link
@ -0,0 +1 @@
|
||||
/usr/bin/python
|
||||
1
Code/venv/bin/python3
Symbolic link
1
Code/venv/bin/python3
Symbolic link
@ -0,0 +1 @@
|
||||
python
|
||||
1
Code/venv/bin/python3.13
Symbolic link
1
Code/venv/bin/python3.13
Symbolic link
@ -0,0 +1 @@
|
||||
python
|
||||
19
Code/venv/cert.pem
Normal file
19
Code/venv/cert.pem
Normal file
@ -0,0 +1,19 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDCTCCAfGgAwIBAgIULzwRuiF/868ehrfyhf8Jh9j9rpgwDQYJKoZIhvcNAQEL
|
||||
BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTI1MDYyNjA3NDExNloXDTI1MDYy
|
||||
OTA3NDExNlowFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEF
|
||||
AAOCAQ8AMIIBCgKCAQEAkRqKYNLYX6tfEv6MbEOxuoS960N70ec1PejLSdkWYxPP
|
||||
AojOJ7a00y3nLaVA+CcZ+eLVf1iK9VOEvJbs/0sOyOzxj8dh2lpwEKvRdqELtSUc
|
||||
1+xOeAzdlS8kAV1ZQJl6S+AD6GG9lOLUwG8eSioWDuSd3xG2uUzKR87V3dCsrMf9
|
||||
jzeh4yhypflqsogLJsnavZ8bYZ7i5W3wD/whauaSDDn4VjEWpBtPH5r6roXWHSHf
|
||||
NpdcR0i2PoKTO0I/eO+5A7fwB2Z3jp8wQ1KzRtb4Ms6spXfOAIQfoG0PAZJ7PCCu
|
||||
FLmix0ukFXX8WclCfUwtVpFBEMLlzW447+hSFHU7FwIDAQABo1MwUTAdBgNVHQ4E
|
||||
FgQUpDwcfdDkcD0XJWeHZG/OK/PzCMYwHwYDVR0jBBgwFoAUpDwcfdDkcD0XJWeH
|
||||
ZG/OK/PzCMYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAL2bg
|
||||
n6jnuIuIXu2h8aMbB5bOpTscl59TBSohNok6qi/YYIrwX+MiaqXNTQESdOOScvgW
|
||||
3fqEQvOaMz0uZ45Fbp2ncYDT3TcWicd0u4PPOAbpT3IkG/oeHD81ufliT6CSER6d
|
||||
esgQsWXHoSNcLnAqLxUc4pS6W3EOEaKpTFK9SnfmvyAcKCOjIRzJNVr8nJqj+Ua/
|
||||
EcqRLFrOhBfOwl4N39bDxIMATO3Sgu6J35eD6cXZeIV1y4J8RSmHSFBfAasGOcRz
|
||||
hfN9NbJMeD3SWHaSVyd/Zr0FgLJ0LoCxUiD+aMUGmYNr3YO7zJrL5vUuiRYDrsd5
|
||||
UxMrnnTKNiPxsuCmDQ==
|
||||
-----END CERTIFICATE-----
|
||||
318
Code/venv/cert.py
Normal file
318
Code/venv/cert.py
Normal file
@ -0,0 +1,318 @@
|
||||
import argparse
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime
|
||||
import requests
|
||||
from aioquic.asyncio import connect, QuicConnectionProtocol
|
||||
from aioquic.quic.configuration import QuicConfiguration
|
||||
from aioquic.quic.events import HandshakeCompleted
|
||||
import OpenSSL
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
|
||||
# Helper-Funktionen zur Formatierung von Erweiterungen
|
||||
def format_san(san):
|
||||
try:
|
||||
dns_names = san.get_values_for_type(x509.DNSName)
|
||||
return ", ".join(dns_names)
|
||||
except Exception:
|
||||
return str(san)
|
||||
|
||||
def format_key_usage(key_usage):
|
||||
fields = []
|
||||
if key_usage.digital_signature:
|
||||
fields.append("digitalSignature")
|
||||
if key_usage.content_commitment:
|
||||
fields.append("nonRepudiation")
|
||||
if key_usage.key_encipherment:
|
||||
fields.append("keyEncipherment")
|
||||
if key_usage.data_encipherment:
|
||||
fields.append("dataEncipherment")
|
||||
if key_usage.key_agreement:
|
||||
fields.append("keyAgreement")
|
||||
if key_usage.key_cert_sign:
|
||||
fields.append("keyCertSign")
|
||||
if key_usage.crl_sign:
|
||||
fields.append("cRLSign")
|
||||
if key_usage.encipher_only:
|
||||
fields.append("encipherOnly")
|
||||
if key_usage.decipher_only:
|
||||
fields.append("decipherOnly")
|
||||
return ", ".join(fields)
|
||||
|
||||
def format_extended_key_usage(ext_key_usage):
|
||||
usages = []
|
||||
for oid in ext_key_usage:
|
||||
try:
|
||||
name = oid._name # Falls vorhanden
|
||||
except Exception:
|
||||
name = oid.dotted_string
|
||||
usages.append(name)
|
||||
return ", ".join(usages)
|
||||
|
||||
def format_crl_distribution_points(crl_dp):
|
||||
urls = []
|
||||
for dp in crl_dp:
|
||||
if dp.full_name:
|
||||
for gn in dp.full_name:
|
||||
try:
|
||||
urls.append(gn.value)
|
||||
except Exception:
|
||||
pass
|
||||
return ", ".join(urls)
|
||||
|
||||
def format_authority_information_access(aia):
|
||||
lines = []
|
||||
for ad in aia:
|
||||
try:
|
||||
method = ad.access_method._name
|
||||
except Exception:
|
||||
method = ad.access_method.dotted_string
|
||||
location = ad.access_location.value
|
||||
lines.append(f"{method}: {location}")
|
||||
return "; ".join(lines)
|
||||
|
||||
def format_certificate_policies(cp):
|
||||
policies = []
|
||||
for policy_info in cp:
|
||||
try:
|
||||
policy_name = policy_info.policy_identifier._name
|
||||
except Exception:
|
||||
policy_name = policy_info.policy_identifier.dotted_string
|
||||
if policy_info.policy_qualifiers:
|
||||
qualifiers = ", ".join(str(q) for q in policy_info.policy_qualifiers)
|
||||
policies.append(f"{policy_name} ({qualifiers})")
|
||||
else:
|
||||
policies.append(policy_name)
|
||||
return "; ".join(policies)
|
||||
|
||||
def format_sct(sct_value):
|
||||
try:
|
||||
if hasattr(sct_value, "scts"):
|
||||
return f"{len(sct_value.scts)} SCTs"
|
||||
else:
|
||||
return str(sct_value)
|
||||
except Exception:
|
||||
return str(sct_value)
|
||||
|
||||
# Helper-Funktion, die alle Detailinformationen eines Zertifikats formatiert zurückgibt.
|
||||
# Hier wird die Version um 1 erhöht, sodass v1, v2 und v3 als 1, 2 bzw. 3 angezeigt werden.
|
||||
def format_certificate_details(cert, label="Zertifikat"):
|
||||
details = []
|
||||
details.append(f"========== {label} ==========")
|
||||
|
||||
details.append("Subject:")
|
||||
for key, value in cert.get_subject().get_components():
|
||||
details.append(f" {key.decode('utf-8')}: {value.decode('utf-8')}")
|
||||
details.append("Issuer:")
|
||||
for key, value in cert.get_issuer().get_components():
|
||||
details.append(f" {key.decode('utf-8')}: {value.decode('utf-8')}")
|
||||
|
||||
details.append(f"Serial Number: {cert.get_serial_number()}")
|
||||
# Hier wird der von OpenSSL zurückgegebene Wert (0-indexiert) um 1 erhöht.
|
||||
details.append(f"Version: {cert.get_version() + 1}")
|
||||
|
||||
try:
|
||||
sig_algo = cert.get_signature_algorithm().decode("utf-8")
|
||||
details.append(f"Signaturalgorithmus: {sig_algo}")
|
||||
except Exception as e:
|
||||
details.append(f"Fehler beim Auslesen des Signaturalgorithmus: {e}")
|
||||
|
||||
try:
|
||||
pubkey = cert.get_pubkey()
|
||||
key_type = pubkey.type()
|
||||
if key_type == OpenSSL.crypto.TYPE_RSA:
|
||||
key_algo = "RSA"
|
||||
elif key_type == OpenSSL.crypto.TYPE_DSA:
|
||||
key_algo = "DSA"
|
||||
elif hasattr(OpenSSL.crypto, "TYPE_EC") and key_type == OpenSSL.crypto.TYPE_EC:
|
||||
key_algo = "EC"
|
||||
elif key_type == OpenSSL.crypto.TYPE_DH:
|
||||
key_algo = "DH"
|
||||
else:
|
||||
key_algo = f"Unbekannt (Type: {key_type})"
|
||||
details.append(f"Public Key Algorithmus: {key_algo}")
|
||||
except Exception as e:
|
||||
details.append(f"Fehler beim Auslesen des Public Key Algorithmus: {e}")
|
||||
|
||||
try:
|
||||
date_format = "%Y%m%d%H%M%SZ"
|
||||
not_before_str = cert.get_notBefore().decode('utf-8')
|
||||
not_after_str = cert.get_notAfter().decode('utf-8')
|
||||
not_before_dt = datetime.strptime(not_before_str, date_format)
|
||||
not_after_dt = datetime.strptime(not_after_str, date_format)
|
||||
details.append(f"Gültig von: {not_before_dt.strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
details.append(f"Gültig bis: {not_after_dt.strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
except Exception as e:
|
||||
details.append(f"Fehler beim Formatieren der Gültigkeitsdaten: {e}")
|
||||
|
||||
try:
|
||||
cert_bytes_for_san = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, cert)
|
||||
cert_crypto = x509.load_der_x509_certificate(cert_bytes_for_san, default_backend())
|
||||
san_extension = cert_crypto.extensions.get_extension_for_class(x509.SubjectAlternativeName)
|
||||
details.append(f"Subject Alternative Names: {format_san(san_extension.value)}")
|
||||
except Exception as e:
|
||||
details.append(f"Keine SAN-Erweiterung gefunden: {e}")
|
||||
|
||||
try:
|
||||
cert_bytes_for_ext = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, cert)
|
||||
cert_crypto = x509.load_der_x509_certificate(cert_bytes_for_ext, default_backend())
|
||||
|
||||
try:
|
||||
key_usage_ext = cert_crypto.extensions.get_extension_for_class(x509.KeyUsage)
|
||||
details.append(f"Key Usage: {format_key_usage(key_usage_ext.value)}")
|
||||
except Exception as e:
|
||||
details.append(f"Keine Key Usage gefunden: {e}")
|
||||
|
||||
try:
|
||||
ext_key_usage_ext = cert_crypto.extensions.get_extension_for_class(x509.ExtendedKeyUsage)
|
||||
details.append(f"Extended Key Usage: {format_extended_key_usage(ext_key_usage_ext.value)}")
|
||||
except Exception as e:
|
||||
details.append(f"Keine Extended Key Usage gefunden: {e}")
|
||||
|
||||
try:
|
||||
crl_dp_ext = cert_crypto.extensions.get_extension_for_class(x509.CRLDistributionPoints)
|
||||
details.append(f"CRL Distribution Points: {format_crl_distribution_points(crl_dp_ext.value)}")
|
||||
except Exception as e:
|
||||
details.append(f"Keine CRL Distribution Points gefunden: {e}")
|
||||
|
||||
try:
|
||||
aia_ext = cert_crypto.extensions.get_extension_for_class(x509.AuthorityInformationAccess)
|
||||
details.append(f"Authority Information Access: {format_authority_information_access(aia_ext.value)}")
|
||||
except Exception as e:
|
||||
details.append(f"Keine Authority Information Access gefunden: {e}")
|
||||
|
||||
try:
|
||||
cp_ext = cert_crypto.extensions.get_extension_for_class(x509.CertificatePolicies)
|
||||
details.append(f"Certificate Policies: {format_certificate_policies(cp_ext.value)}")
|
||||
except Exception as e:
|
||||
details.append(f"Keine Certificate Policies gefunden: {e}")
|
||||
|
||||
try:
|
||||
ski_ext = cert_crypto.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
|
||||
details.append(f"Subject Key Identifier: {ski_ext.value.digest.hex()}")
|
||||
except Exception as e:
|
||||
details.append(f"Kein Subject Key Identifier gefunden: {e}")
|
||||
|
||||
try:
|
||||
aki_ext = cert_crypto.extensions.get_extension_for_class(x509.AuthorityKeyIdentifier)
|
||||
key_id = aki_ext.value.key_identifier
|
||||
if key_id:
|
||||
details.append(f"Authority Key Identifier: {key_id.hex()}")
|
||||
else:
|
||||
details.append("Authority Key Identifier: N/A")
|
||||
except Exception as e:
|
||||
details.append(f"Keine Authority Key Identifier gefunden: {e}")
|
||||
|
||||
try:
|
||||
fingerprint = cert.digest("sha256")
|
||||
details.append(f"SHA256-Fingerprint: {fingerprint.decode()}")
|
||||
except Exception as e:
|
||||
details.append(f"Fehler beim Auslesen des Fingerprints: {e}")
|
||||
|
||||
try:
|
||||
sct_ext = cert_crypto.extensions.get_extension_for_oid(x509.ObjectIdentifier("1.3.6.1.4.1.11129.2.4.2"))
|
||||
details.append(f"SCT (Certificate Transparency): {format_sct(sct_ext.value)}")
|
||||
except Exception as e:
|
||||
details.append(f"Keine SCT-Erweiterung gefunden: {e}")
|
||||
except Exception as e:
|
||||
details.append(f"Fehler beim Auslesen weiterer Zertifikatsinformationen: {e}")
|
||||
details.append(" ")
|
||||
details.append("=================================================================== ENDE ===================================================================")
|
||||
details.append(" ")
|
||||
|
||||
return "\n".join(details)
|
||||
|
||||
# Hilfsfunktion zum rekursiven Herunterladen der Aussteller-Kette (Intermediate + Root CA)
|
||||
def download_issuer_chain(cert_crypto):
|
||||
chain = []
|
||||
current_cert = cert_crypto
|
||||
while True:
|
||||
try:
|
||||
aia_ext = current_cert.extensions.get_extension_for_class(x509.AuthorityInformationAccess)
|
||||
ca_issuers = [ad.access_location.value for ad in aia_ext.value if ad.access_method.dotted_string == "1.3.6.1.5.5.7.48.2"]
|
||||
if not ca_issuers:
|
||||
break
|
||||
issuer_url = ca_issuers[0]
|
||||
issuer_cert_data = requests.get(issuer_url, timeout=5).content
|
||||
issuer_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_ASN1, issuer_cert_data)
|
||||
chain.append(issuer_cert)
|
||||
issuer_cert_bytes = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, issuer_cert)
|
||||
issuer_cert_crypto = x509.load_der_x509_certificate(issuer_cert_bytes, default_backend())
|
||||
# Wenn self-signed, haben wir den Root erreicht
|
||||
if issuer_cert_crypto.subject == issuer_cert_crypto.issuer:
|
||||
break
|
||||
current_cert = issuer_cert_crypto
|
||||
except Exception:
|
||||
break
|
||||
return chain
|
||||
|
||||
class HTTP3Client(QuicConnectionProtocol):
|
||||
def quic_event_received(self, event):
|
||||
if isinstance(event, HandshakeCompleted):
|
||||
output_lines = []
|
||||
output_lines.append("TLS-Handshake abgeschlossen!")
|
||||
output_lines.append("Zertifikatsinformationen:")
|
||||
|
||||
# Versuche, Zertifikate aus dem Event zu holen
|
||||
certs = getattr(event, "peer_certificates", None)
|
||||
if certs is None:
|
||||
if hasattr(self._quic.tls, "peer_certificate"):
|
||||
cert = self._quic.tls.peer_certificate
|
||||
else:
|
||||
cert = getattr(self._quic.tls, "_peer_certificate", None)
|
||||
if cert:
|
||||
certs = [cert]
|
||||
|
||||
provided_chain = []
|
||||
if certs:
|
||||
for idx, cert_obj in enumerate(certs, start=1):
|
||||
try:
|
||||
if not isinstance(cert_obj, bytes):
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
cert_bytes = cert_obj.public_bytes(encoding=serialization.Encoding.DER)
|
||||
else:
|
||||
cert_bytes = cert_obj
|
||||
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_ASN1, cert_bytes)
|
||||
provided_chain.append(cert)
|
||||
output_lines.append(format_certificate_details(cert, label=f"Zertifikat {idx}"))
|
||||
except Exception as e:
|
||||
output_lines.append(f"Zertifikat {idx}: Fehler beim Laden des Zertifikats: {e}")
|
||||
|
||||
# Rekursives Herunterladen der Aussteller-Kette, falls das letzte übermittelte Zertifikat nicht self-signed ist
|
||||
if provided_chain:
|
||||
last_cert_bytes = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, provided_chain[-1])
|
||||
last_cert_crypto = x509.load_der_x509_certificate(last_cert_bytes, default_backend())
|
||||
if last_cert_crypto.subject != last_cert_crypto.issuer:
|
||||
chain_downloaded = download_issuer_chain(last_cert_crypto)
|
||||
for i, issuer_cert in enumerate(chain_downloaded):
|
||||
if i == len(chain_downloaded) - 1:
|
||||
label = "Root CA Zertifikat"
|
||||
else:
|
||||
label = "Intermediate Zertifikat"
|
||||
output_lines.append(format_certificate_details(issuer_cert, label=label))
|
||||
|
||||
final_output = "\n\n".join(output_lines)
|
||||
logging.info(final_output)
|
||||
|
||||
async def run(host, port):
|
||||
configuration = QuicConfiguration(is_client=True, alpn_protocols=["h3"])
|
||||
try:
|
||||
async with connect(host, port, configuration=configuration, create_protocol=HTTP3Client) as client:
|
||||
await asyncio.sleep(2)
|
||||
except Exception as e:
|
||||
logging.error("Fehler beim Verbindungsaufbau: %s", e)
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description="HTTP/3 Zertifikats-Info Tool")
|
||||
parser.add_argument("host", help="Hostname oder IP, zu dem verbunden werden soll")
|
||||
parser.add_argument("--port", type=int, default=443, help="Port, der verwendet werden soll (default: 443)")
|
||||
parser.add_argument("-o", "--output", help="Dateipfad, in den der Output geschrieben wird", default=None)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.output:
|
||||
logging.basicConfig(filename=args.output, filemode="w", level=logging.INFO, format="%(message)s")
|
||||
else:
|
||||
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
||||
|
||||
asyncio.run(run(args.host, args.port))
|
||||
58
Code/venv/cipher.py
Normal file
58
Code/venv/cipher.py
Normal file
@ -0,0 +1,58 @@
|
||||
import sys
|
||||
sys.unraisablehook = lambda unraisable: None
|
||||
|
||||
import asyncio
|
||||
import ssl
|
||||
from argparse import ArgumentParser
|
||||
|
||||
from aioquic.asyncio import connect
|
||||
from aioquic.quic.configuration import QuicConfiguration
|
||||
|
||||
TLS13_CIPHERS = [
|
||||
'TLS_AES_128_GCM_SHA256',
|
||||
'TLS_AES_256_GCM_SHA384',
|
||||
'TLS_CHACHA20_POLY1305_SHA256',
|
||||
'TLS_AES_128_CCM_SHA256'
|
||||
]
|
||||
|
||||
def get_client_supported_tls13_ciphers():
|
||||
"""Gibt die vom Client unterstützten TLS 1.3 Cipher-Suiten zurück."""
|
||||
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
|
||||
supported = []
|
||||
for cipher in ctx.get_ciphers():
|
||||
if cipher['protocol'] == 'TLSv1.3' and cipher['name'] in TLS13_CIPHERS:
|
||||
supported.append(cipher['name'])
|
||||
return supported
|
||||
|
||||
async def check_cipher(host, port, cipher):
|
||||
"""Versucht eine QUIC-Verbindung mit der angegebenen Cipher-Suite herzustellen."""
|
||||
config = QuicConfiguration(is_client=True)
|
||||
config.ciphers = [cipher]
|
||||
config.verify_mode = ssl.CERT_NONE
|
||||
config.alpn_protocols = ['h3'] # HTTP/3 ALPN
|
||||
|
||||
try:
|
||||
async with asyncio.timeout(5):
|
||||
async with connect(host, port, configuration=config):
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
async def main(host, port):
|
||||
client_ciphers = get_client_supported_tls13_ciphers()
|
||||
supported_ciphers = []
|
||||
|
||||
for cipher in client_ciphers:
|
||||
if await check_cipher(host, port, cipher):
|
||||
supported_ciphers.append(cipher)
|
||||
|
||||
for cipher in supported_ciphers:
|
||||
print(cipher)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = ArgumentParser(description="Ermittelt die unterstützten TLS 1.3 Cipher-Suiten eines HTTP/3-Servers.")
|
||||
parser.add_argument("host", help="Hostname des Servers")
|
||||
parser.add_argument("-p", "--port", type=int, default=443, help="Port des Servers (Standard: 443)")
|
||||
args = parser.parse_args()
|
||||
|
||||
asyncio.run(main(args.host, args.port))
|
||||
35
Code/venv/cipher_new.py
Normal file
35
Code/venv/cipher_new.py
Normal file
@ -0,0 +1,35 @@
|
||||
import subprocess
|
||||
import json
|
||||
|
||||
def analyze_with_external_tools(host, port, skip_cert_verification=False):
|
||||
# Basis curl-Kommando
|
||||
terminal = ['curl', '--http3-only', '-v', f'https://{host}:{port}']
|
||||
|
||||
# Zertifikatsüberprüfung skippen falls gewünscht
|
||||
if skip_cert_verification:
|
||||
terminal.append('-k') # oder '--insecure'
|
||||
|
||||
try:
|
||||
result = subprocess.run(terminal, capture_output=True, text=True)
|
||||
|
||||
# Analyse der Ausgabe für TLS-Informationen
|
||||
return result.stderr
|
||||
except Exception as e:
|
||||
print(f"Externe Tool-Analyse fehlgeschlagen: {e}")
|
||||
return None
|
||||
|
||||
# Verwendung mit Zertifikatsüberprüfung (Standard)
|
||||
result = analyze_with_external_tools("cloudflare.com", 443)
|
||||
print(60*"=" + "cloudflare.com" + 60*"=")
|
||||
print(result)
|
||||
print(200*"=")
|
||||
print()
|
||||
print()
|
||||
result = analyze_with_external_tools("nextcloud.mofixx.net", 443)
|
||||
print(60*"=" + "nextcloud.mofixx.net" + 60*"=")
|
||||
print(result)
|
||||
print(200*"=")
|
||||
|
||||
# Verwendung ohne Zertifikatsüberprüfung
|
||||
#result_insecure = analyze_with_external_tools("localhost", 4433, skip_cert_verification=True)
|
||||
#print(result_insecure)
|
||||
28
Code/venv/key.pem
Normal file
28
Code/venv/key.pem
Normal file
@ -0,0 +1,28 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCRGopg0thfq18S
|
||||
/oxsQ7G6hL3rQ3vR5zU96MtJ2RZjE88CiM4ntrTTLectpUD4Jxn54tV/WIr1U4S8
|
||||
luz/Sw7I7PGPx2HaWnAQq9F2oQu1JRzX7E54DN2VLyQBXVlAmXpL4APoYb2U4tTA
|
||||
bx5KKhYO5J3fEba5TMpHztXd0Kysx/2PN6HjKHKl+WqyiAsmydq9nxthnuLlbfAP
|
||||
/CFq5pIMOfhWMRakG08fmvquhdYdId82l1xHSLY+gpM7Qj9477kDt/AHZneOnzBD
|
||||
UrNG1vgyzqyld84AhB+gbQ8Bkns8IK4UuaLHS6QVdfxZyUJ9TC1WkUEQwuXNbjjv
|
||||
6FIUdTsXAgMBAAECggEABJLLMBW6g9AWq28z6Nc1l63jDd0Sl7ogyWnp5e8+a+R1
|
||||
N5WfDAYsI93SdckfOWFKkOpvapCcetY6qvZbDHtNwUUjd8AK9+a2YL14vJb091e1
|
||||
8fcur/ej15lIYWdMFc5SWhLeSuJ4bisto831rNShiCExTxVQAxqUVVugKloddH8T
|
||||
7G7ev8+fWMe+1TufT2+wzPI5BhIfuGk1AV3MsACbAMHO+4U5lxkJy7D3H7mal96v
|
||||
s7J+o1r7I4ulmYJAudEafaO4Npi40UuOWMK6JOGifPFt6L042wx5rgL8zlSlY9Pf
|
||||
/7Mn5BZUkaBP4GtMJlUqMY+WcEdxGnVqYLS+vjlFXQKBgQDDddjBW60uAUu4/3N6
|
||||
NfmXSK5T1NN1zDguHYFQmxEYf55YFTiBnj1jyJKem0fjQVXMhprxWf/4UmOE0vhb
|
||||
B8zwvJftIG5LmUXgmSBEiFGshtZQU8+N13e6Gfu4g+1MKYl2Y1+jV8i46+kq6Guw
|
||||
+aaE8P18L8s7B/d0siHWQA8d8wKBgQC+C+MpQ1jrNY9bhs6Sh+HlEFutMPt7xD+P
|
||||
DJ6gQRENVlWUkC49JByPhH22wffL/qLgaX+E7GiikGNooEGg5yR3LI5vMgNEgIyy
|
||||
49oJt6t0u4i94CwZGev+byLZHM2u2+cg4vDF8rr4/kU9JcH+rWUF18EyqqhiZjDI
|
||||
3eyrLSsjTQKBgQC3ZXe/1Xkk1WprZqS/Yq2R2UxYtDTFLpz4CA8xxo2/4t57301p
|
||||
sUQAwE1yLFjDTqZmhVYUZ1HZedSO6DTHlQYiS5JxmCBlJqd+Ga9BHncj3lBwhnMU
|
||||
r7tTDtY1RvP8pmyofI1cAh4ABLp+3B1PrK8lxcjSITYbaVoyUIGXOfv1KQKBgEvE
|
||||
UXxR7VFYuR60UAb7RYQdUW9q59ggFXbigRtC1ZmLofLoyIZ3rytIlxYVnsUlb6oo
|
||||
ZT6JQe4NwpQj0AZmIJOin+aU+diFp5JqZLMbFrAjVBDfHrWjMALwK+SDJz1fTnY+
|
||||
A9/nNcOvG8uVEhKG7o+1xMB+aUNRmoF6eLCGAXzBAoGAXeJZMiFJQ6X7F8HHRM7P
|
||||
wuOWhQBFDci2jOb72u+yzryCzlQ5IyqNaZOWnSiusawyJgZkN+fX2Gh9iZ5j3+XP
|
||||
ogIpMp6CGzW+iFnCCZ16odgvlQNjck2OZssn7WmA6xO3qeEA9D2j1U9aEegXNi3X
|
||||
SijyRz9vOkjh96QEVWkVkVA=
|
||||
-----END PRIVATE KEY-----
|
||||
58
Code/venv/launch.json
Normal file
58
Code/venv/launch.json
Normal file
@ -0,0 +1,58 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "cert",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "cert.py",
|
||||
"console": "integratedTerminal",
|
||||
"args": [
|
||||
"cloudflare.com",
|
||||
"-o output.txt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "cipher",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "cipher.py",
|
||||
"console": "integratedTerminal",
|
||||
"args": [
|
||||
"localhost",
|
||||
"-p 4433"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "cipher localhost",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "cipher_new.py",
|
||||
"console": "integratedTerminal",
|
||||
"args": [
|
||||
"localhost",
|
||||
"-p 4433"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "cipher cloudflare",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "cipher_new.py",
|
||||
"console": "integratedTerminal",
|
||||
"args": [
|
||||
"cloudflare.com"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "server",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "server.py",
|
||||
"console": "integratedTerminal"
|
||||
}
|
||||
]
|
||||
}
|
||||
3197
Code/venv/lib/python3.13/site-packages/OpenSSL/SSL.py
Normal file
3197
Code/venv/lib/python3.13/site-packages/OpenSSL/SSL.py
Normal file
File diff suppressed because it is too large
Load Diff
31
Code/venv/lib/python3.13/site-packages/OpenSSL/__init__.py
Normal file
31
Code/venv/lib/python3.13/site-packages/OpenSSL/__init__.py
Normal file
@ -0,0 +1,31 @@
|
||||
# Copyright (C) AB Strakt
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
pyOpenSSL - A simple wrapper around the OpenSSL library
|
||||
"""
|
||||
|
||||
from OpenSSL import SSL, crypto
|
||||
from OpenSSL.version import (
|
||||
__author__,
|
||||
__copyright__,
|
||||
__email__,
|
||||
__license__,
|
||||
__summary__,
|
||||
__title__,
|
||||
__uri__,
|
||||
__version__,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"SSL",
|
||||
"__author__",
|
||||
"__copyright__",
|
||||
"__email__",
|
||||
"__license__",
|
||||
"__summary__",
|
||||
"__title__",
|
||||
"__uri__",
|
||||
"__version__",
|
||||
"crypto",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
129
Code/venv/lib/python3.13/site-packages/OpenSSL/_util.py
Normal file
129
Code/venv/lib/python3.13/site-packages/OpenSSL/_util.py
Normal file
@ -0,0 +1,129 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
from typing import Any, Callable, NoReturn, Union
|
||||
|
||||
from cryptography.hazmat.bindings.openssl.binding import Binding
|
||||
|
||||
if sys.version_info >= (3, 9):
|
||||
StrOrBytesPath = Union[str, bytes, os.PathLike[str], os.PathLike[bytes]]
|
||||
else:
|
||||
StrOrBytesPath = Union[str, bytes, os.PathLike]
|
||||
|
||||
binding = Binding()
|
||||
ffi = binding.ffi
|
||||
lib = binding.lib
|
||||
|
||||
|
||||
# This is a special CFFI allocator that does not bother to zero its memory
|
||||
# after allocation. This has vastly better performance on large allocations and
|
||||
# so should be used whenever we don't need the memory zeroed out.
|
||||
no_zero_allocator = ffi.new_allocator(should_clear_after_alloc=False)
|
||||
|
||||
|
||||
def text(charp: Any) -> str:
|
||||
"""
|
||||
Get a native string type representing of the given CFFI ``char*`` object.
|
||||
|
||||
:param charp: A C-style string represented using CFFI.
|
||||
|
||||
:return: :class:`str`
|
||||
"""
|
||||
if not charp:
|
||||
return ""
|
||||
return ffi.string(charp).decode("utf-8")
|
||||
|
||||
|
||||
def exception_from_error_queue(exception_type: type[Exception]) -> NoReturn:
|
||||
"""
|
||||
Convert an OpenSSL library failure into a Python exception.
|
||||
|
||||
When a call to the native OpenSSL library fails, this is usually signalled
|
||||
by the return value, and an error code is stored in an error queue
|
||||
associated with the current thread. The err library provides functions to
|
||||
obtain these error codes and textual error messages.
|
||||
"""
|
||||
errors = []
|
||||
|
||||
while True:
|
||||
error = lib.ERR_get_error()
|
||||
if error == 0:
|
||||
break
|
||||
errors.append(
|
||||
(
|
||||
text(lib.ERR_lib_error_string(error)),
|
||||
text(lib.ERR_func_error_string(error)),
|
||||
text(lib.ERR_reason_error_string(error)),
|
||||
)
|
||||
)
|
||||
|
||||
raise exception_type(errors)
|
||||
|
||||
|
||||
def make_assert(error: type[Exception]) -> Callable[[bool], Any]:
|
||||
"""
|
||||
Create an assert function that uses :func:`exception_from_error_queue` to
|
||||
raise an exception wrapped by *error*.
|
||||
"""
|
||||
|
||||
def openssl_assert(ok: bool) -> None:
|
||||
"""
|
||||
If *ok* is not True, retrieve the error from OpenSSL and raise it.
|
||||
"""
|
||||
if ok is not True:
|
||||
exception_from_error_queue(error)
|
||||
|
||||
return openssl_assert
|
||||
|
||||
|
||||
def path_bytes(s: StrOrBytesPath) -> bytes:
|
||||
"""
|
||||
Convert a Python path to a :py:class:`bytes` for the path which can be
|
||||
passed into an OpenSSL API accepting a filename.
|
||||
|
||||
:param s: A path (valid for os.fspath).
|
||||
|
||||
:return: An instance of :py:class:`bytes`.
|
||||
"""
|
||||
b = os.fspath(s)
|
||||
|
||||
if isinstance(b, str):
|
||||
return b.encode(sys.getfilesystemencoding())
|
||||
else:
|
||||
return b
|
||||
|
||||
|
||||
def byte_string(s: str) -> bytes:
|
||||
return s.encode("charmap")
|
||||
|
||||
|
||||
# A marker object to observe whether some optional arguments are passed any
|
||||
# value or not.
|
||||
UNSPECIFIED = object()
|
||||
|
||||
_TEXT_WARNING = "str for {0} is no longer accepted, use bytes"
|
||||
|
||||
|
||||
def text_to_bytes_and_warn(label: str, obj: Any) -> Any:
|
||||
"""
|
||||
If ``obj`` is text, emit a warning that it should be bytes instead and try
|
||||
to convert it to bytes automatically.
|
||||
|
||||
:param str label: The name of the parameter from which ``obj`` was taken
|
||||
(so a developer can easily find the source of the problem and correct
|
||||
it).
|
||||
|
||||
:return: If ``obj`` is the text string type, a ``bytes`` object giving the
|
||||
UTF-8 encoding of that text is returned. Otherwise, ``obj`` itself is
|
||||
returned.
|
||||
"""
|
||||
if isinstance(obj, str):
|
||||
warnings.warn(
|
||||
_TEXT_WARNING.format(label),
|
||||
category=DeprecationWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
return obj.encode("utf-8")
|
||||
return obj
|
||||
2450
Code/venv/lib/python3.13/site-packages/OpenSSL/crypto.py
Normal file
2450
Code/venv/lib/python3.13/site-packages/OpenSSL/crypto.py
Normal file
File diff suppressed because it is too large
Load Diff
40
Code/venv/lib/python3.13/site-packages/OpenSSL/debug.py
Normal file
40
Code/venv/lib/python3.13/site-packages/OpenSSL/debug.py
Normal file
@ -0,0 +1,40 @@
|
||||
import ssl
|
||||
import sys
|
||||
|
||||
import cffi
|
||||
import cryptography
|
||||
|
||||
import OpenSSL.SSL
|
||||
|
||||
from . import version
|
||||
|
||||
_env_info = """\
|
||||
pyOpenSSL: {pyopenssl}
|
||||
cryptography: {cryptography}
|
||||
cffi: {cffi}
|
||||
cryptography's compiled against OpenSSL: {crypto_openssl_compile}
|
||||
cryptography's linked OpenSSL: {crypto_openssl_link}
|
||||
Python's OpenSSL: {python_openssl}
|
||||
Python executable: {python}
|
||||
Python version: {python_version}
|
||||
Platform: {platform}
|
||||
sys.path: {sys_path}""".format(
|
||||
pyopenssl=version.__version__,
|
||||
crypto_openssl_compile=OpenSSL._util.ffi.string(
|
||||
OpenSSL._util.lib.OPENSSL_VERSION_TEXT,
|
||||
).decode("ascii"),
|
||||
crypto_openssl_link=OpenSSL.SSL.SSLeay_version(
|
||||
OpenSSL.SSL.SSLEAY_VERSION
|
||||
).decode("ascii"),
|
||||
python_openssl=getattr(ssl, "OPENSSL_VERSION", "n/a"),
|
||||
cryptography=cryptography.__version__,
|
||||
cffi=cffi.__version__,
|
||||
python=sys.executable,
|
||||
python_version=sys.version,
|
||||
platform=sys.platform,
|
||||
sys_path=sys.path,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(_env_info)
|
||||
50
Code/venv/lib/python3.13/site-packages/OpenSSL/rand.py
Normal file
50
Code/venv/lib/python3.13/site-packages/OpenSSL/rand.py
Normal file
@ -0,0 +1,50 @@
|
||||
"""
|
||||
PRNG management routines, thin wrappers.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
|
||||
from OpenSSL._util import lib as _lib
|
||||
|
||||
warnings.warn(
|
||||
"OpenSSL.rand is deprecated - you should use os.urandom instead",
|
||||
DeprecationWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
|
||||
def add(buffer: bytes, entropy: int) -> None:
|
||||
"""
|
||||
Mix bytes from *string* into the PRNG state.
|
||||
|
||||
The *entropy* argument is (the lower bound of) an estimate of how much
|
||||
randomness is contained in *string*, measured in bytes.
|
||||
|
||||
For more information, see e.g. :rfc:`1750`.
|
||||
|
||||
This function is only relevant if you are forking Python processes and
|
||||
need to reseed the CSPRNG after fork.
|
||||
|
||||
:param buffer: Buffer with random data.
|
||||
:param entropy: The entropy (in bytes) measurement of the buffer.
|
||||
|
||||
:return: :obj:`None`
|
||||
"""
|
||||
if not isinstance(buffer, bytes):
|
||||
raise TypeError("buffer must be a byte string")
|
||||
|
||||
if not isinstance(entropy, int):
|
||||
raise TypeError("entropy must be an integer")
|
||||
|
||||
_lib.RAND_add(buffer, len(buffer), entropy)
|
||||
|
||||
|
||||
def status() -> int:
|
||||
"""
|
||||
Check whether the PRNG has been seeded with enough data.
|
||||
|
||||
:return: 1 if the PRNG is seeded enough, 0 otherwise.
|
||||
"""
|
||||
return _lib.RAND_status()
|
||||
28
Code/venv/lib/python3.13/site-packages/OpenSSL/version.py
Normal file
28
Code/venv/lib/python3.13/site-packages/OpenSSL/version.py
Normal file
@ -0,0 +1,28 @@
|
||||
# Copyright (C) AB Strakt
|
||||
# Copyright (C) Jean-Paul Calderone
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
pyOpenSSL - A simple wrapper around the OpenSSL library
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
"__author__",
|
||||
"__copyright__",
|
||||
"__email__",
|
||||
"__license__",
|
||||
"__summary__",
|
||||
"__title__",
|
||||
"__uri__",
|
||||
"__version__",
|
||||
]
|
||||
|
||||
__version__ = "25.1.0"
|
||||
|
||||
__title__ = "pyOpenSSL"
|
||||
__uri__ = "https://pyopenssl.org/"
|
||||
__summary__ = "Python wrapper module around the OpenSSL library"
|
||||
__author__ = "The pyOpenSSL developers"
|
||||
__email__ = "cryptography-dev@python.org"
|
||||
__license__ = "Apache License, Version 2.0"
|
||||
__copyright__ = f"Copyright 2001-2025 {__author__}"
|
||||
Binary file not shown.
@ -0,0 +1 @@
|
||||
pip
|
||||
@ -0,0 +1,25 @@
|
||||
Copyright (c) Jeremy Lainé.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* Neither the name of aioquic nor the names of its contributors may
|
||||
be used to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
@ -0,0 +1,194 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: aioquic
|
||||
Version: 1.2.0
|
||||
Summary: An implementation of QUIC and HTTP/3
|
||||
Author-email: Jeremy Lainé <jeremy.laine@m4x.org>
|
||||
License: BSD-3-Clause
|
||||
Project-URL: Homepage, https://github.com/aiortc/aioquic
|
||||
Project-URL: Changelog, https://aioquic.readthedocs.io/en/stable/changelog.html
|
||||
Project-URL: Documentation, https://aioquic.readthedocs.io/
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Environment :: Web Environment
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: BSD License
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Classifier: Topic :: Internet :: WWW/HTTP
|
||||
Requires-Python: >=3.8
|
||||
Description-Content-Type: text/x-rst
|
||||
License-File: LICENSE
|
||||
Requires-Dist: certifi
|
||||
Requires-Dist: cryptography >=42.0.0
|
||||
Requires-Dist: pylsqpack <0.4.0,>=0.3.3
|
||||
Requires-Dist: pyopenssl >=24
|
||||
Requires-Dist: service-identity >=24.1.0
|
||||
Provides-Extra: dev
|
||||
Requires-Dist: coverage[toml] >=7.2.2 ; extra == 'dev'
|
||||
|
||||
aioquic
|
||||
=======
|
||||
|
||||
.. image:: https://img.shields.io/pypi/l/aioquic.svg
|
||||
:target: https://pypi.python.org/pypi/aioquic
|
||||
:alt: License
|
||||
|
||||
.. image:: https://img.shields.io/pypi/v/aioquic.svg
|
||||
:target: https://pypi.python.org/pypi/aioquic
|
||||
:alt: Version
|
||||
|
||||
.. image:: https://img.shields.io/pypi/pyversions/aioquic.svg
|
||||
:target: https://pypi.python.org/pypi/aioquic
|
||||
:alt: Python versions
|
||||
|
||||
.. image:: https://github.com/aiortc/aioquic/workflows/tests/badge.svg
|
||||
:target: https://github.com/aiortc/aioquic/actions
|
||||
:alt: Tests
|
||||
|
||||
.. image:: https://img.shields.io/codecov/c/github/aiortc/aioquic.svg
|
||||
:target: https://codecov.io/gh/aiortc/aioquic
|
||||
:alt: Coverage
|
||||
|
||||
.. image:: https://readthedocs.org/projects/aioquic/badge/?version=latest
|
||||
:target: https://aioquic.readthedocs.io/
|
||||
:alt: Documentation
|
||||
|
||||
What is ``aioquic``?
|
||||
--------------------
|
||||
|
||||
``aioquic`` is a library for the QUIC network protocol in Python. It features
|
||||
a minimal TLS 1.3 implementation, a QUIC stack and an HTTP/3 stack.
|
||||
|
||||
``aioquic`` is used by Python opensource projects such as `dnspython`_,
|
||||
`hypercorn`_, `mitmproxy`_ and the `Web Platform Tests`_ cross-browser test
|
||||
suite. It has also been used extensively in research papers about QUIC.
|
||||
|
||||
To learn more about ``aioquic`` please `read the documentation`_.
|
||||
|
||||
Why should I use ``aioquic``?
|
||||
-----------------------------
|
||||
|
||||
``aioquic`` has been designed to be embedded into Python client and server
|
||||
libraries wishing to support QUIC and / or HTTP/3. The goal is to provide a
|
||||
common codebase for Python libraries in the hope of avoiding duplicated effort.
|
||||
|
||||
Both the QUIC and the HTTP/3 APIs follow the "bring your own I/O" pattern,
|
||||
leaving actual I/O operations to the API user. This approach has a number of
|
||||
advantages including making the code testable and allowing integration with
|
||||
different concurrency models.
|
||||
|
||||
A lot of effort has gone into writing an extensive test suite for the
|
||||
``aioquic`` code to ensure best-in-class code quality, and it is regularly
|
||||
`tested for interoperability`_ against other `QUIC implementations`_.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- minimal TLS 1.3 implementation conforming with `RFC 8446`_
|
||||
- QUIC stack conforming with `RFC 9000`_ (QUIC v1) and `RFC 9369`_ (QUIC v2)
|
||||
* IPv4 and IPv6 support
|
||||
* connection migration and NAT rebinding
|
||||
* logging TLS traffic secrets
|
||||
* logging QUIC events in QLOG format
|
||||
* version negotiation conforming with `RFC 9368`_
|
||||
- HTTP/3 stack conforming with `RFC 9114`_
|
||||
* server push support
|
||||
* WebSocket bootstrapping conforming with `RFC 9220`_
|
||||
* datagram support conforming with `RFC 9297`_
|
||||
|
||||
Installing
|
||||
----------
|
||||
|
||||
The easiest way to install ``aioquic`` is to run:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
pip install aioquic
|
||||
|
||||
Building from source
|
||||
--------------------
|
||||
|
||||
If there are no wheels for your system or if you wish to build ``aioquic``
|
||||
from source you will need the OpenSSL development headers.
|
||||
|
||||
Linux
|
||||
.....
|
||||
|
||||
On Debian/Ubuntu run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
sudo apt install libssl-dev python3-dev
|
||||
|
||||
On Alpine Linux run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
sudo apk add openssl-dev python3-dev bsd-compat-headers libffi-dev
|
||||
|
||||
OS X
|
||||
....
|
||||
|
||||
On OS X run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
brew install openssl
|
||||
|
||||
You will need to set some environment variables to link against OpenSSL:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
export CFLAGS=-I$(brew --prefix openssl)/include
|
||||
export LDFLAGS=-L$(brew --prefix openssl)/lib
|
||||
|
||||
Windows
|
||||
.......
|
||||
|
||||
On Windows the easiest way to install OpenSSL is to use `Chocolatey`_.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
choco install openssl
|
||||
|
||||
You will need to set some environment variables to link against OpenSSL:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$Env:INCLUDE = "C:\Progra~1\OpenSSL\include"
|
||||
$Env:LIB = "C:\Progra~1\OpenSSL\lib"
|
||||
|
||||
Running the examples
|
||||
--------------------
|
||||
|
||||
`aioquic` comes with a number of examples illustrating various QUIC usecases.
|
||||
|
||||
You can browse these examples here: https://github.com/aiortc/aioquic/tree/main/examples
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
``aioquic`` is released under the `BSD license`_.
|
||||
|
||||
.. _read the documentation: https://aioquic.readthedocs.io/en/latest/
|
||||
.. _dnspython: https://github.com/rthalley/dnspython
|
||||
.. _hypercorn: https://github.com/pgjones/hypercorn
|
||||
.. _mitmproxy: https://github.com/mitmproxy/mitmproxy
|
||||
.. _Web Platform Tests: https://github.com/web-platform-tests/wpt
|
||||
.. _tested for interoperability: https://interop.seemann.io/
|
||||
.. _QUIC implementations: https://github.com/quicwg/base-drafts/wiki/Implementations
|
||||
.. _cryptography: https://cryptography.io/
|
||||
.. _Chocolatey: https://chocolatey.org/
|
||||
.. _BSD license: https://aioquic.readthedocs.io/en/latest/license.html
|
||||
.. _RFC 8446: https://datatracker.ietf.org/doc/html/rfc8446
|
||||
.. _RFC 9000: https://datatracker.ietf.org/doc/html/rfc9000
|
||||
.. _RFC 9114: https://datatracker.ietf.org/doc/html/rfc9114
|
||||
.. _RFC 9220: https://datatracker.ietf.org/doc/html/rfc9220
|
||||
.. _RFC 9297: https://datatracker.ietf.org/doc/html/rfc9297
|
||||
.. _RFC 9368: https://datatracker.ietf.org/doc/html/rfc9368
|
||||
.. _RFC 9369: https://datatracker.ietf.org/doc/html/rfc9369
|
||||
@ -0,0 +1,72 @@
|
||||
aioquic-1.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
aioquic-1.2.0.dist-info/LICENSE,sha256=jX4xQ89ui69Ofw7l_ArFknyeC83E6Mgyn8Pf5049HPU,1503
|
||||
aioquic-1.2.0.dist-info/METADATA,sha256=UrtLGwqhHVJVVJ2fvMSaxF-4Qm-o1CAir0odgG7f6wg,6327
|
||||
aioquic-1.2.0.dist-info/RECORD,,
|
||||
aioquic-1.2.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
aioquic-1.2.0.dist-info/WHEEL,sha256=YU-_AmJaQHVuj2oV7rp4R8w--gLra7_KJbMZX5zWGNo,148
|
||||
aioquic-1.2.0.dist-info/top_level.txt,sha256=wIoYXYSiS7-Q4mTEPQ437O21BliXaBRsqhRDaDp_SeY,8
|
||||
aioquic/__init__.py,sha256=MpAT5hgNoHnTtG1XRD_GV_A7QrHVU6vJjGSw_8qMGA4,22
|
||||
aioquic/__pycache__/__init__.cpython-313.pyc,,
|
||||
aioquic/__pycache__/buffer.cpython-313.pyc,,
|
||||
aioquic/__pycache__/tls.cpython-313.pyc,,
|
||||
aioquic/_buffer.abi3.so,sha256=r0vwkOCh3BUlZbf7li5oOWFwH0Igy6e8dUJwCuHI_cQ,42656
|
||||
aioquic/_buffer.c,sha256=sFIXTidQJ7XkDxB8-Wg5kbiAamo1KjaJMI1rG1uJroc,12282
|
||||
aioquic/_buffer.pyi,sha256=e34UHSxy8Wp2E_bqVEpNLOyKAtByJ5-ff73TUrD4G7I,1014
|
||||
aioquic/_crypto.abi3.so,sha256=1lixAF_xGPdhvNtzswclhTjvvClMN9KDmK9_GrJlewk,5967120
|
||||
aioquic/_crypto.c,sha256=Jl1Ncmqva4hrOG8mn5C-d40qqo1MenXUq0KWrbt080Y,12089
|
||||
aioquic/_crypto.pyi,sha256=nvDbJk0cTD5EAllb8wvCfUxMBOE_-DeFxgndvuDEPik,610
|
||||
aioquic/asyncio/__init__.py,sha256=JsklCTY12r8P2FyM8bfbdq-eM-gDcxonnleLxqzduxg,123
|
||||
aioquic/asyncio/__pycache__/__init__.cpython-313.pyc,,
|
||||
aioquic/asyncio/__pycache__/client.cpython-313.pyc,,
|
||||
aioquic/asyncio/__pycache__/protocol.cpython-313.pyc,,
|
||||
aioquic/asyncio/__pycache__/server.cpython-313.pyc,,
|
||||
aioquic/asyncio/client.py,sha256=9hhKRR7ctGxVok2sEAHlfWbSw7Kjyf6CMRfo8NRCyH8,3791
|
||||
aioquic/asyncio/protocol.py,sha256=Z7afi6sQV72Hi4SmxVq0mrAoZEvmaSo8uQRYkMkTVjs,9843
|
||||
aioquic/asyncio/server.py,sha256=uXbNRrE1GlPN_WbyzgTUNc4r11yH7nFh3di9EBQgbPQ,8432
|
||||
aioquic/buffer.py,sha256=JjhIHbmSU5kcSKUJhM2ojhCaEErviauPTKBJEcMcdSg,770
|
||||
aioquic/h0/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
aioquic/h0/__pycache__/__init__.cpython-313.pyc,,
|
||||
aioquic/h0/__pycache__/connection.cpython-313.pyc,,
|
||||
aioquic/h0/connection.py,sha256=0DiPj3_JmqQoDaeqdOanRLghNFLpao3PWgNi_GANBTg,2558
|
||||
aioquic/h3/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
aioquic/h3/__pycache__/__init__.cpython-313.pyc,,
|
||||
aioquic/h3/__pycache__/connection.cpython-313.pyc,,
|
||||
aioquic/h3/__pycache__/events.cpython-313.pyc,,
|
||||
aioquic/h3/__pycache__/exceptions.cpython-313.pyc,,
|
||||
aioquic/h3/connection.py,sha256=5-idx097NTdxEyp_360Q_4EginECy1j_M7jg4JCI190,43238
|
||||
aioquic/h3/events.py,sha256=fAsT4AEZR5PWCYdcj6fq6HhNvDQrSoJPINiloq1y5oo,2147
|
||||
aioquic/h3/exceptions.py,sha256=ZwnJ3kKbSzG31gJY7SWN7G4rDFx0VJZ6aSDmn2o5uwg,341
|
||||
aioquic/py.typed,sha256=sow9soTwP9T_gEAQSVh7Gb8855h04Nwmhs2We-JRgZM,7
|
||||
aioquic/quic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
aioquic/quic/__pycache__/__init__.cpython-313.pyc,,
|
||||
aioquic/quic/__pycache__/configuration.cpython-313.pyc,,
|
||||
aioquic/quic/__pycache__/connection.cpython-313.pyc,,
|
||||
aioquic/quic/__pycache__/crypto.cpython-313.pyc,,
|
||||
aioquic/quic/__pycache__/events.cpython-313.pyc,,
|
||||
aioquic/quic/__pycache__/logger.cpython-313.pyc,,
|
||||
aioquic/quic/__pycache__/packet.cpython-313.pyc,,
|
||||
aioquic/quic/__pycache__/packet_builder.cpython-313.pyc,,
|
||||
aioquic/quic/__pycache__/rangeset.cpython-313.pyc,,
|
||||
aioquic/quic/__pycache__/recovery.cpython-313.pyc,,
|
||||
aioquic/quic/__pycache__/retry.cpython-313.pyc,,
|
||||
aioquic/quic/__pycache__/stream.cpython-313.pyc,,
|
||||
aioquic/quic/configuration.py,sha256=6jtrVGIoXEGuuca0h7k9cjd-uVm9kX8mM-8MSKlenTw,4549
|
||||
aioquic/quic/congestion/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
aioquic/quic/congestion/__pycache__/__init__.cpython-313.pyc,,
|
||||
aioquic/quic/congestion/__pycache__/base.cpython-313.pyc,,
|
||||
aioquic/quic/congestion/__pycache__/cubic.cpython-313.pyc,,
|
||||
aioquic/quic/congestion/__pycache__/reno.cpython-313.pyc,,
|
||||
aioquic/quic/congestion/base.py,sha256=oqARpP2Yabdptn23FDKnkByAIHHVWGEetpxtJhp19gY,3858
|
||||
aioquic/quic/congestion/cubic.py,sha256=R2tU8M7SujU0twzokgVBZblD3UQj9DZkBaD_myCviIQ,8018
|
||||
aioquic/quic/congestion/reno.py,sha256=fJi0LkrtRsTgDVF__77WCEfPA0e_oQ8f1C7Z_imIg7M,2855
|
||||
aioquic/quic/connection.py,sha256=fBRZQHO7370EhRVCRvxShD0Wa26wwSd53frBZ9pZTXQ,139259
|
||||
aioquic/quic/crypto.py,sha256=rBQmqSp__SIt6KkHNpO_hH9lPkyI8yR4Am9a_V0xNW4,8150
|
||||
aioquic/quic/events.py,sha256=QH1jzrBUTsFDu3qs_Vj_3tda7IhnDd0IbIJL9tBq8hM,2728
|
||||
aioquic/quic/logger.py,sha256=yn1NiuCGjMBN3cAH0L1Fa3uygDkyArx1gVBI7fotG8A,10336
|
||||
aioquic/quic/packet.py,sha256=SqHOLoWIkIis5_rkUcFjJekx3A2gK83U7UpbguqBJVE,20076
|
||||
aioquic/quic/packet_builder.py,sha256=_S7zCBh-899MP7kp2CO1jt-1nB0sq1HBOfBb77IhtU8,13072
|
||||
aioquic/quic/rangeset.py,sha256=34Slw9RFXp4FNBQutOWqWrtm1-u4B2FOYtMFJHpqHdA,3133
|
||||
aioquic/quic/recovery.py,sha256=vUwW3dW67xkEFSTPMqmPtHJBdQTQBdyCIm2O1-Exb7E,13534
|
||||
aioquic/quic/retry.py,sha256=VKgwH65NZkVF7qLNxjiSMytyodWYsKWXqw8YSvKvcqs,1882
|
||||
aioquic/quic/stream.py,sha256=9HpaX7Ffa9SJiG6xpbsrl0dqfElwV2nUR606GlKLYok,12081
|
||||
aioquic/tls.py,sha256=oZsI5LwGfF-dV_9VJV7Vhr61oY3r7Wh34n1WsTk_48Q,76410
|
||||
@ -0,0 +1,6 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.43.0)
|
||||
Root-Is-Purelib: false
|
||||
Tag: cp38-abi3-manylinux_2_17_x86_64
|
||||
Tag: cp38-abi3-manylinux2014_x86_64
|
||||
|
||||
@ -0,0 +1 @@
|
||||
aioquic
|
||||
@ -0,0 +1 @@
|
||||
__version__ = "1.2.0"
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
Code/venv/lib/python3.13/site-packages/aioquic/_buffer.abi3.so
Executable file
BIN
Code/venv/lib/python3.13/site-packages/aioquic/_buffer.abi3.so
Executable file
Binary file not shown.
422
Code/venv/lib/python3.13/site-packages/aioquic/_buffer.c
Normal file
422
Code/venv/lib/python3.13/site-packages/aioquic/_buffer.c
Normal file
@ -0,0 +1,422 @@
|
||||
#define PY_SSIZE_T_CLEAN
|
||||
|
||||
#include <Python.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#define MODULE_NAME "aioquic._buffer"
|
||||
|
||||
static PyObject *BufferReadError;
|
||||
static PyObject *BufferWriteError;
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
uint8_t *base;
|
||||
uint8_t *end;
|
||||
uint8_t *pos;
|
||||
} BufferObject;
|
||||
|
||||
static PyObject *BufferType;
|
||||
|
||||
#define CHECK_READ_BOUNDS(self, len) \
|
||||
if (len < 0 || self->pos + len > self->end) { \
|
||||
PyErr_SetString(BufferReadError, "Read out of bounds"); \
|
||||
return NULL; \
|
||||
}
|
||||
|
||||
#define CHECK_WRITE_BOUNDS(self, len) \
|
||||
if (self->pos + len > self->end) { \
|
||||
PyErr_SetString(BufferWriteError, "Write out of bounds"); \
|
||||
return NULL; \
|
||||
}
|
||||
|
||||
static int
|
||||
Buffer_init(BufferObject *self, PyObject *args, PyObject *kwargs)
|
||||
{
|
||||
const char *kwlist[] = {"capacity", "data", NULL};
|
||||
Py_ssize_t capacity = 0;
|
||||
const unsigned char *data = NULL;
|
||||
Py_ssize_t data_len = 0;
|
||||
|
||||
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ny#", (char**)kwlist, &capacity, &data, &data_len))
|
||||
return -1;
|
||||
|
||||
if (data != NULL) {
|
||||
self->base = malloc(data_len);
|
||||
self->end = self->base + data_len;
|
||||
memcpy(self->base, data, data_len);
|
||||
} else {
|
||||
self->base = malloc(capacity);
|
||||
self->end = self->base + capacity;
|
||||
}
|
||||
self->pos = self->base;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
Buffer_dealloc(BufferObject *self)
|
||||
{
|
||||
free(self->base);
|
||||
PyTypeObject *tp = Py_TYPE(self);
|
||||
freefunc free = PyType_GetSlot(tp, Py_tp_free);
|
||||
free(self);
|
||||
Py_DECREF(tp);
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
Buffer_data_slice(BufferObject *self, PyObject *args)
|
||||
{
|
||||
Py_ssize_t start, stop;
|
||||
if (!PyArg_ParseTuple(args, "nn", &start, &stop))
|
||||
return NULL;
|
||||
|
||||
if (start < 0 || self->base + start > self->end ||
|
||||
stop < 0 || self->base + stop > self->end ||
|
||||
stop < start) {
|
||||
PyErr_SetString(BufferReadError, "Read out of bounds");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return PyBytes_FromStringAndSize((const char*)(self->base + start), (stop - start));
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
Buffer_eof(BufferObject *self, PyObject *args)
|
||||
{
|
||||
if (self->pos == self->end)
|
||||
Py_RETURN_TRUE;
|
||||
Py_RETURN_FALSE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
Buffer_pull_bytes(BufferObject *self, PyObject *args)
|
||||
{
|
||||
Py_ssize_t len;
|
||||
if (!PyArg_ParseTuple(args, "n", &len))
|
||||
return NULL;
|
||||
|
||||
CHECK_READ_BOUNDS(self, len);
|
||||
|
||||
PyObject *o = PyBytes_FromStringAndSize((const char*)self->pos, len);
|
||||
self->pos += len;
|
||||
return o;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
Buffer_pull_uint8(BufferObject *self, PyObject *args)
|
||||
{
|
||||
CHECK_READ_BOUNDS(self, 1)
|
||||
|
||||
return PyLong_FromUnsignedLong(
|
||||
(uint8_t)(*(self->pos++))
|
||||
);
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
Buffer_pull_uint16(BufferObject *self, PyObject *args)
|
||||
{
|
||||
CHECK_READ_BOUNDS(self, 2)
|
||||
|
||||
uint16_t value = (uint16_t)(*(self->pos)) << 8 |
|
||||
(uint16_t)(*(self->pos + 1));
|
||||
self->pos += 2;
|
||||
return PyLong_FromUnsignedLong(value);
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
Buffer_pull_uint32(BufferObject *self, PyObject *args)
|
||||
{
|
||||
CHECK_READ_BOUNDS(self, 4)
|
||||
|
||||
uint32_t value = (uint32_t)(*(self->pos)) << 24 |
|
||||
(uint32_t)(*(self->pos + 1)) << 16 |
|
||||
(uint32_t)(*(self->pos + 2)) << 8 |
|
||||
(uint32_t)(*(self->pos + 3));
|
||||
self->pos += 4;
|
||||
return PyLong_FromUnsignedLong(value);
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
Buffer_pull_uint64(BufferObject *self, PyObject *args)
|
||||
{
|
||||
CHECK_READ_BOUNDS(self, 8)
|
||||
|
||||
uint64_t value = (uint64_t)(*(self->pos)) << 56 |
|
||||
(uint64_t)(*(self->pos + 1)) << 48 |
|
||||
(uint64_t)(*(self->pos + 2)) << 40 |
|
||||
(uint64_t)(*(self->pos + 3)) << 32 |
|
||||
(uint64_t)(*(self->pos + 4)) << 24 |
|
||||
(uint64_t)(*(self->pos + 5)) << 16 |
|
||||
(uint64_t)(*(self->pos + 6)) << 8 |
|
||||
(uint64_t)(*(self->pos + 7));
|
||||
self->pos += 8;
|
||||
return PyLong_FromUnsignedLongLong(value);
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
Buffer_pull_uint_var(BufferObject *self, PyObject *args)
|
||||
{
|
||||
uint64_t value;
|
||||
CHECK_READ_BOUNDS(self, 1)
|
||||
switch (*(self->pos) >> 6) {
|
||||
case 0:
|
||||
value = *(self->pos++) & 0x3F;
|
||||
break;
|
||||
case 1:
|
||||
CHECK_READ_BOUNDS(self, 2)
|
||||
value = (uint16_t)(*(self->pos) & 0x3F) << 8 |
|
||||
(uint16_t)(*(self->pos + 1));
|
||||
self->pos += 2;
|
||||
break;
|
||||
case 2:
|
||||
CHECK_READ_BOUNDS(self, 4)
|
||||
value = (uint32_t)(*(self->pos) & 0x3F) << 24 |
|
||||
(uint32_t)(*(self->pos + 1)) << 16 |
|
||||
(uint32_t)(*(self->pos + 2)) << 8 |
|
||||
(uint32_t)(*(self->pos + 3));
|
||||
self->pos += 4;
|
||||
break;
|
||||
default:
|
||||
CHECK_READ_BOUNDS(self, 8)
|
||||
value = (uint64_t)(*(self->pos) & 0x3F) << 56 |
|
||||
(uint64_t)(*(self->pos + 1)) << 48 |
|
||||
(uint64_t)(*(self->pos + 2)) << 40 |
|
||||
(uint64_t)(*(self->pos + 3)) << 32 |
|
||||
(uint64_t)(*(self->pos + 4)) << 24 |
|
||||
(uint64_t)(*(self->pos + 5)) << 16 |
|
||||
(uint64_t)(*(self->pos + 6)) << 8 |
|
||||
(uint64_t)(*(self->pos + 7));
|
||||
self->pos += 8;
|
||||
break;
|
||||
}
|
||||
return PyLong_FromUnsignedLongLong(value);
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
Buffer_push_bytes(BufferObject *self, PyObject *args)
|
||||
{
|
||||
const unsigned char *data;
|
||||
Py_ssize_t data_len;
|
||||
if (!PyArg_ParseTuple(args, "y#", &data, &data_len))
|
||||
return NULL;
|
||||
|
||||
CHECK_WRITE_BOUNDS(self, data_len)
|
||||
|
||||
memcpy(self->pos, data, data_len);
|
||||
self->pos += data_len;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
Buffer_push_uint8(BufferObject *self, PyObject *args)
|
||||
{
|
||||
uint8_t value;
|
||||
if (!PyArg_ParseTuple(args, "B", &value))
|
||||
return NULL;
|
||||
|
||||
CHECK_WRITE_BOUNDS(self, 1)
|
||||
|
||||
*(self->pos++) = value;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
Buffer_push_uint16(BufferObject *self, PyObject *args)
|
||||
{
|
||||
uint16_t value;
|
||||
if (!PyArg_ParseTuple(args, "H", &value))
|
||||
return NULL;
|
||||
|
||||
CHECK_WRITE_BOUNDS(self, 2)
|
||||
|
||||
*(self->pos++) = (value >> 8);
|
||||
*(self->pos++) = value;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
Buffer_push_uint32(BufferObject *self, PyObject *args)
|
||||
{
|
||||
uint32_t value;
|
||||
if (!PyArg_ParseTuple(args, "I", &value))
|
||||
return NULL;
|
||||
|
||||
CHECK_WRITE_BOUNDS(self, 4)
|
||||
*(self->pos++) = (value >> 24);
|
||||
*(self->pos++) = (value >> 16);
|
||||
*(self->pos++) = (value >> 8);
|
||||
*(self->pos++) = value;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
Buffer_push_uint64(BufferObject *self, PyObject *args)
|
||||
{
|
||||
uint64_t value;
|
||||
if (!PyArg_ParseTuple(args, "K", &value))
|
||||
return NULL;
|
||||
|
||||
CHECK_WRITE_BOUNDS(self, 8)
|
||||
*(self->pos++) = (value >> 56);
|
||||
*(self->pos++) = (value >> 48);
|
||||
*(self->pos++) = (value >> 40);
|
||||
*(self->pos++) = (value >> 32);
|
||||
*(self->pos++) = (value >> 24);
|
||||
*(self->pos++) = (value >> 16);
|
||||
*(self->pos++) = (value >> 8);
|
||||
*(self->pos++) = value;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
Buffer_push_uint_var(BufferObject *self, PyObject *args)
|
||||
{
|
||||
uint64_t value;
|
||||
if (!PyArg_ParseTuple(args, "K", &value))
|
||||
return NULL;
|
||||
|
||||
if (value <= 0x3F) {
|
||||
CHECK_WRITE_BOUNDS(self, 1)
|
||||
*(self->pos++) = value;
|
||||
Py_RETURN_NONE;
|
||||
} else if (value <= 0x3FFF) {
|
||||
CHECK_WRITE_BOUNDS(self, 2)
|
||||
*(self->pos++) = (value >> 8) | 0x40;
|
||||
*(self->pos++) = value;
|
||||
Py_RETURN_NONE;
|
||||
} else if (value <= 0x3FFFFFFF) {
|
||||
CHECK_WRITE_BOUNDS(self, 4)
|
||||
*(self->pos++) = (value >> 24) | 0x80;
|
||||
*(self->pos++) = (value >> 16);
|
||||
*(self->pos++) = (value >> 8);
|
||||
*(self->pos++) = value;
|
||||
Py_RETURN_NONE;
|
||||
} else if (value <= 0x3FFFFFFFFFFFFFFF) {
|
||||
CHECK_WRITE_BOUNDS(self, 8)
|
||||
*(self->pos++) = (value >> 56) | 0xC0;
|
||||
*(self->pos++) = (value >> 48);
|
||||
*(self->pos++) = (value >> 40);
|
||||
*(self->pos++) = (value >> 32);
|
||||
*(self->pos++) = (value >> 24);
|
||||
*(self->pos++) = (value >> 16);
|
||||
*(self->pos++) = (value >> 8);
|
||||
*(self->pos++) = value;
|
||||
Py_RETURN_NONE;
|
||||
} else {
|
||||
PyErr_SetString(PyExc_ValueError, "Integer is too big for a variable-length integer");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
Buffer_seek(BufferObject *self, PyObject *args)
|
||||
{
|
||||
Py_ssize_t pos;
|
||||
if (!PyArg_ParseTuple(args, "n", &pos))
|
||||
return NULL;
|
||||
|
||||
if (pos < 0 || self->base + pos > self->end) {
|
||||
PyErr_SetString(BufferReadError, "Seek out of bounds");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
self->pos = self->base + pos;
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
Buffer_tell(BufferObject *self, PyObject *args)
|
||||
{
|
||||
return PyLong_FromSsize_t(self->pos - self->base);
|
||||
}
|
||||
|
||||
static PyMethodDef Buffer_methods[] = {
|
||||
{"data_slice", (PyCFunction)Buffer_data_slice, METH_VARARGS, ""},
|
||||
{"eof", (PyCFunction)Buffer_eof, METH_VARARGS, ""},
|
||||
{"pull_bytes", (PyCFunction)Buffer_pull_bytes, METH_VARARGS, "Pull bytes."},
|
||||
{"pull_uint8", (PyCFunction)Buffer_pull_uint8, METH_VARARGS, "Pull an 8-bit unsigned integer."},
|
||||
{"pull_uint16", (PyCFunction)Buffer_pull_uint16, METH_VARARGS, "Pull a 16-bit unsigned integer."},
|
||||
{"pull_uint32", (PyCFunction)Buffer_pull_uint32, METH_VARARGS, "Pull a 32-bit unsigned integer."},
|
||||
{"pull_uint64", (PyCFunction)Buffer_pull_uint64, METH_VARARGS, "Pull a 64-bit unsigned integer."},
|
||||
{"pull_uint_var", (PyCFunction)Buffer_pull_uint_var, METH_VARARGS, "Pull a QUIC variable-length unsigned integer."},
|
||||
{"push_bytes", (PyCFunction)Buffer_push_bytes, METH_VARARGS, "Push bytes."},
|
||||
{"push_uint8", (PyCFunction)Buffer_push_uint8, METH_VARARGS, "Push an 8-bit unsigned integer."},
|
||||
{"push_uint16", (PyCFunction)Buffer_push_uint16, METH_VARARGS, "Push a 16-bit unsigned integer."},
|
||||
{"push_uint32", (PyCFunction)Buffer_push_uint32, METH_VARARGS, "Push a 32-bit unsigned integer."},
|
||||
{"push_uint64", (PyCFunction)Buffer_push_uint64, METH_VARARGS, "Push a 64-bit unsigned integer."},
|
||||
{"push_uint_var", (PyCFunction)Buffer_push_uint_var, METH_VARARGS, "Push a QUIC variable-length unsigned integer."},
|
||||
{"seek", (PyCFunction)Buffer_seek, METH_VARARGS, ""},
|
||||
{"tell", (PyCFunction)Buffer_tell, METH_VARARGS, ""},
|
||||
{NULL}
|
||||
};
|
||||
|
||||
static PyObject*
|
||||
Buffer_capacity_getter(BufferObject* self, void *closure) {
|
||||
return PyLong_FromSsize_t(self->end - self->base);
|
||||
}
|
||||
|
||||
static PyObject*
|
||||
Buffer_data_getter(BufferObject* self, void *closure) {
|
||||
return PyBytes_FromStringAndSize((const char*)self->base, self->pos - self->base);
|
||||
}
|
||||
|
||||
static PyGetSetDef Buffer_getset[] = {
|
||||
{"capacity", (getter) Buffer_capacity_getter, NULL, "", NULL },
|
||||
{"data", (getter) Buffer_data_getter, NULL, "", NULL },
|
||||
{NULL}
|
||||
};
|
||||
|
||||
static PyType_Slot BufferType_slots[] = {
|
||||
{Py_tp_dealloc, Buffer_dealloc},
|
||||
{Py_tp_methods, Buffer_methods},
|
||||
{Py_tp_doc, "Buffer objects"},
|
||||
{Py_tp_getset, Buffer_getset},
|
||||
{Py_tp_init, Buffer_init},
|
||||
{0, 0},
|
||||
};
|
||||
|
||||
static PyType_Spec BufferType_spec = {
|
||||
MODULE_NAME ".Buffer",
|
||||
sizeof(BufferObject),
|
||||
0,
|
||||
Py_TPFLAGS_DEFAULT,
|
||||
BufferType_slots
|
||||
};
|
||||
|
||||
|
||||
static struct PyModuleDef moduledef = {
|
||||
PyModuleDef_HEAD_INIT,
|
||||
MODULE_NAME, /* m_name */
|
||||
"Serialization utilities.", /* m_doc */
|
||||
-1, /* m_size */
|
||||
NULL, /* m_methods */
|
||||
NULL, /* m_reload */
|
||||
NULL, /* m_traverse */
|
||||
NULL, /* m_clear */
|
||||
NULL, /* m_free */
|
||||
};
|
||||
|
||||
|
||||
PyMODINIT_FUNC
|
||||
PyInit__buffer(void)
|
||||
{
|
||||
PyObject* m;
|
||||
|
||||
m = PyModule_Create(&moduledef);
|
||||
if (m == NULL)
|
||||
return NULL;
|
||||
|
||||
BufferReadError = PyErr_NewException(MODULE_NAME ".BufferReadError", PyExc_ValueError, NULL);
|
||||
Py_INCREF(BufferReadError);
|
||||
PyModule_AddObject(m, "BufferReadError", BufferReadError);
|
||||
|
||||
BufferWriteError = PyErr_NewException(MODULE_NAME ".BufferWriteError", PyExc_ValueError, NULL);
|
||||
Py_INCREF(BufferWriteError);
|
||||
PyModule_AddObject(m, "BufferWriteError", BufferWriteError);
|
||||
|
||||
BufferType = PyType_FromSpec(&BufferType_spec);
|
||||
if (BufferType == NULL)
|
||||
return NULL;
|
||||
PyModule_AddObject(m, "Buffer", BufferType);
|
||||
|
||||
return m;
|
||||
}
|
||||
27
Code/venv/lib/python3.13/site-packages/aioquic/_buffer.pyi
Normal file
27
Code/venv/lib/python3.13/site-packages/aioquic/_buffer.pyi
Normal file
@ -0,0 +1,27 @@
|
||||
from typing import Optional
|
||||
|
||||
class BufferReadError(ValueError): ...
|
||||
class BufferWriteError(ValueError): ...
|
||||
|
||||
class Buffer:
|
||||
def __init__(self, capacity: Optional[int] = 0, data: Optional[bytes] = None): ...
|
||||
@property
|
||||
def capacity(self) -> int: ...
|
||||
@property
|
||||
def data(self) -> bytes: ...
|
||||
def data_slice(self, start: int, end: int) -> bytes: ...
|
||||
def eof(self) -> bool: ...
|
||||
def seek(self, pos: int) -> None: ...
|
||||
def tell(self) -> int: ...
|
||||
def pull_bytes(self, length: int) -> bytes: ...
|
||||
def pull_uint8(self) -> int: ...
|
||||
def pull_uint16(self) -> int: ...
|
||||
def pull_uint32(self) -> int: ...
|
||||
def pull_uint64(self) -> int: ...
|
||||
def pull_uint_var(self) -> int: ...
|
||||
def push_bytes(self, value: bytes) -> None: ...
|
||||
def push_uint8(self, value: int) -> None: ...
|
||||
def push_uint16(self, value: int) -> None: ...
|
||||
def push_uint32(self, v: int) -> None: ...
|
||||
def push_uint64(self, v: int) -> None: ...
|
||||
def push_uint_var(self, value: int) -> None: ...
|
||||
BIN
Code/venv/lib/python3.13/site-packages/aioquic/_crypto.abi3.so
Executable file
BIN
Code/venv/lib/python3.13/site-packages/aioquic/_crypto.abi3.so
Executable file
Binary file not shown.
416
Code/venv/lib/python3.13/site-packages/aioquic/_crypto.c
Normal file
416
Code/venv/lib/python3.13/site-packages/aioquic/_crypto.c
Normal file
@ -0,0 +1,416 @@
|
||||
#define PY_SSIZE_T_CLEAN
|
||||
|
||||
#include <Python.h>
|
||||
#include <openssl/err.h>
|
||||
#include <openssl/evp.h>
|
||||
|
||||
#define MODULE_NAME "aioquic._crypto"
|
||||
|
||||
#define AEAD_KEY_LENGTH_MAX 32
|
||||
#define AEAD_NONCE_LENGTH 12
|
||||
#define AEAD_TAG_LENGTH 16
|
||||
|
||||
#define PACKET_LENGTH_MAX 1500
|
||||
#define PACKET_NUMBER_LENGTH_MAX 4
|
||||
#define SAMPLE_LENGTH 16
|
||||
|
||||
#define CHECK_RESULT(expr) \
|
||||
if (!(expr)) { \
|
||||
ERR_clear_error(); \
|
||||
PyErr_SetString(CryptoError, "OpenSSL call failed"); \
|
||||
return NULL; \
|
||||
}
|
||||
|
||||
#define CHECK_RESULT_CTOR(expr) \
|
||||
if (!(expr)) { \
|
||||
ERR_clear_error(); \
|
||||
PyErr_SetString(CryptoError, "OpenSSL call failed"); \
|
||||
return -1; \
|
||||
}
|
||||
|
||||
static PyObject *CryptoError;
|
||||
|
||||
/* AEAD */
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
EVP_CIPHER_CTX *decrypt_ctx;
|
||||
EVP_CIPHER_CTX *encrypt_ctx;
|
||||
unsigned char buffer[PACKET_LENGTH_MAX];
|
||||
unsigned char key[AEAD_KEY_LENGTH_MAX];
|
||||
unsigned char iv[AEAD_NONCE_LENGTH];
|
||||
unsigned char nonce[AEAD_NONCE_LENGTH];
|
||||
} AEADObject;
|
||||
|
||||
static PyObject *AEADType;
|
||||
|
||||
static EVP_CIPHER_CTX *
|
||||
create_ctx(const EVP_CIPHER *cipher, int key_length, int operation)
|
||||
{
|
||||
EVP_CIPHER_CTX *ctx;
|
||||
int res;
|
||||
|
||||
ctx = EVP_CIPHER_CTX_new();
|
||||
CHECK_RESULT(ctx != 0);
|
||||
|
||||
res = EVP_CipherInit_ex(ctx, cipher, NULL, NULL, NULL, operation);
|
||||
CHECK_RESULT(res != 0);
|
||||
|
||||
res = EVP_CIPHER_CTX_set_key_length(ctx, key_length);
|
||||
CHECK_RESULT(res != 0);
|
||||
|
||||
res = EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_SET_IVLEN, AEAD_NONCE_LENGTH, NULL);
|
||||
CHECK_RESULT(res != 0);
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
static int
|
||||
AEAD_init(AEADObject *self, PyObject *args, PyObject *kwargs)
|
||||
{
|
||||
const char *cipher_name;
|
||||
const unsigned char *key, *iv;
|
||||
Py_ssize_t cipher_name_len, key_len, iv_len;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "y#y#y#", &cipher_name, &cipher_name_len, &key, &key_len, &iv, &iv_len))
|
||||
return -1;
|
||||
|
||||
const EVP_CIPHER *evp_cipher = EVP_get_cipherbyname(cipher_name);
|
||||
if (evp_cipher == 0) {
|
||||
PyErr_Format(CryptoError, "Invalid cipher name: %s", cipher_name);
|
||||
return -1;
|
||||
}
|
||||
if (key_len > AEAD_KEY_LENGTH_MAX) {
|
||||
PyErr_SetString(CryptoError, "Invalid key length");
|
||||
return -1;
|
||||
}
|
||||
if (iv_len > AEAD_NONCE_LENGTH) {
|
||||
PyErr_SetString(CryptoError, "Invalid iv length");
|
||||
return -1;
|
||||
}
|
||||
|
||||
memcpy(self->key, key, key_len);
|
||||
memcpy(self->iv, iv, iv_len);
|
||||
|
||||
self->decrypt_ctx = create_ctx(evp_cipher, key_len, 0);
|
||||
CHECK_RESULT_CTOR(self->decrypt_ctx != 0);
|
||||
|
||||
self->encrypt_ctx = create_ctx(evp_cipher, key_len, 1);
|
||||
CHECK_RESULT_CTOR(self->encrypt_ctx != 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
AEAD_dealloc(AEADObject *self)
|
||||
{
|
||||
EVP_CIPHER_CTX_free(self->decrypt_ctx);
|
||||
EVP_CIPHER_CTX_free(self->encrypt_ctx);
|
||||
PyTypeObject *tp = Py_TYPE(self);
|
||||
freefunc free = PyType_GetSlot(tp, Py_tp_free);
|
||||
free(self);
|
||||
Py_DECREF(tp);
|
||||
}
|
||||
|
||||
static PyObject*
|
||||
AEAD_decrypt(AEADObject *self, PyObject *args)
|
||||
{
|
||||
const unsigned char *data, *associated;
|
||||
Py_ssize_t data_len, associated_len;
|
||||
int outlen, outlen2, res;
|
||||
uint64_t pn;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "y#y#K", &data, &data_len, &associated, &associated_len, &pn))
|
||||
return NULL;
|
||||
|
||||
if (data_len < AEAD_TAG_LENGTH || data_len > PACKET_LENGTH_MAX) {
|
||||
PyErr_SetString(CryptoError, "Invalid payload length");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memcpy(self->nonce, self->iv, AEAD_NONCE_LENGTH);
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
self->nonce[AEAD_NONCE_LENGTH - 1 - i] ^= (uint8_t)(pn >> 8 * i);
|
||||
}
|
||||
|
||||
res = EVP_CIPHER_CTX_ctrl(self->decrypt_ctx, EVP_CTRL_CCM_SET_TAG, AEAD_TAG_LENGTH, (void*)(data + (data_len - AEAD_TAG_LENGTH)));
|
||||
CHECK_RESULT(res != 0);
|
||||
|
||||
res = EVP_CipherInit_ex(self->decrypt_ctx, NULL, NULL, self->key, self->nonce, 0);
|
||||
CHECK_RESULT(res != 0);
|
||||
|
||||
res = EVP_CipherUpdate(self->decrypt_ctx, NULL, &outlen, associated, associated_len);
|
||||
CHECK_RESULT(res != 0);
|
||||
|
||||
res = EVP_CipherUpdate(self->decrypt_ctx, self->buffer, &outlen, data, data_len - AEAD_TAG_LENGTH);
|
||||
CHECK_RESULT(res != 0);
|
||||
|
||||
res = EVP_CipherFinal_ex(self->decrypt_ctx, NULL, &outlen2);
|
||||
if (res == 0) {
|
||||
PyErr_SetString(CryptoError, "Payload decryption failed");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return PyBytes_FromStringAndSize((const char*)self->buffer, outlen);
|
||||
}
|
||||
|
||||
static PyObject*
|
||||
AEAD_encrypt(AEADObject *self, PyObject *args)
|
||||
{
|
||||
const unsigned char *data, *associated;
|
||||
Py_ssize_t data_len, associated_len;
|
||||
int outlen, outlen2, res;
|
||||
uint64_t pn;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "y#y#K", &data, &data_len, &associated, &associated_len, &pn))
|
||||
return NULL;
|
||||
|
||||
if (data_len > PACKET_LENGTH_MAX) {
|
||||
PyErr_SetString(CryptoError, "Invalid payload length");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memcpy(self->nonce, self->iv, AEAD_NONCE_LENGTH);
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
self->nonce[AEAD_NONCE_LENGTH - 1 - i] ^= (uint8_t)(pn >> 8 * i);
|
||||
}
|
||||
|
||||
res = EVP_CipherInit_ex(self->encrypt_ctx, NULL, NULL, self->key, self->nonce, 1);
|
||||
CHECK_RESULT(res != 0);
|
||||
|
||||
res = EVP_CipherUpdate(self->encrypt_ctx, NULL, &outlen, associated, associated_len);
|
||||
CHECK_RESULT(res != 0);
|
||||
|
||||
res = EVP_CipherUpdate(self->encrypt_ctx, self->buffer, &outlen, data, data_len);
|
||||
CHECK_RESULT(res != 0);
|
||||
|
||||
res = EVP_CipherFinal_ex(self->encrypt_ctx, NULL, &outlen2);
|
||||
CHECK_RESULT(res != 0 && outlen2 == 0);
|
||||
|
||||
res = EVP_CIPHER_CTX_ctrl(self->encrypt_ctx, EVP_CTRL_CCM_GET_TAG, AEAD_TAG_LENGTH, self->buffer + outlen);
|
||||
CHECK_RESULT(res != 0);
|
||||
|
||||
return PyBytes_FromStringAndSize((const char*)self->buffer, outlen + AEAD_TAG_LENGTH);
|
||||
}
|
||||
|
||||
static PyMethodDef AEAD_methods[] = {
|
||||
{"decrypt", (PyCFunction)AEAD_decrypt, METH_VARARGS, ""},
|
||||
{"encrypt", (PyCFunction)AEAD_encrypt, METH_VARARGS, ""},
|
||||
|
||||
{NULL}
|
||||
};
|
||||
|
||||
static PyType_Slot AEADType_slots[] = {
|
||||
{Py_tp_dealloc, AEAD_dealloc},
|
||||
{Py_tp_methods, AEAD_methods},
|
||||
{Py_tp_doc, "AEAD objects"},
|
||||
{Py_tp_init, AEAD_init},
|
||||
{0, 0},
|
||||
};
|
||||
|
||||
static PyType_Spec AEADType_spec = {
|
||||
MODULE_NAME ".AEADType",
|
||||
sizeof(AEADObject),
|
||||
0,
|
||||
Py_TPFLAGS_DEFAULT,
|
||||
AEADType_slots
|
||||
};
|
||||
|
||||
/* HeaderProtection */
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
EVP_CIPHER_CTX *ctx;
|
||||
int is_chacha20;
|
||||
unsigned char buffer[PACKET_LENGTH_MAX];
|
||||
unsigned char mask[31];
|
||||
unsigned char zero[5];
|
||||
} HeaderProtectionObject;
|
||||
|
||||
static PyObject *HeaderProtectionType;
|
||||
|
||||
static int
|
||||
HeaderProtection_init(HeaderProtectionObject *self, PyObject *args, PyObject *kwargs)
|
||||
{
|
||||
const char *cipher_name;
|
||||
const unsigned char *key;
|
||||
Py_ssize_t cipher_name_len, key_len;
|
||||
int res;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "y#y#", &cipher_name, &cipher_name_len, &key, &key_len))
|
||||
return -1;
|
||||
|
||||
const EVP_CIPHER *evp_cipher = EVP_get_cipherbyname(cipher_name);
|
||||
if (evp_cipher == 0) {
|
||||
PyErr_Format(CryptoError, "Invalid cipher name: %s", cipher_name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
memset(self->mask, 0, sizeof(self->mask));
|
||||
memset(self->zero, 0, sizeof(self->zero));
|
||||
self->is_chacha20 = cipher_name_len == 8 && memcmp(cipher_name, "chacha20", 8) == 0;
|
||||
|
||||
self->ctx = EVP_CIPHER_CTX_new();
|
||||
CHECK_RESULT_CTOR(self->ctx != 0);
|
||||
|
||||
res = EVP_CipherInit_ex(self->ctx, evp_cipher, NULL, NULL, NULL, 1);
|
||||
CHECK_RESULT_CTOR(res != 0);
|
||||
|
||||
res = EVP_CIPHER_CTX_set_key_length(self->ctx, key_len);
|
||||
CHECK_RESULT_CTOR(res != 0);
|
||||
|
||||
res = EVP_CipherInit_ex(self->ctx, NULL, NULL, key, NULL, 1);
|
||||
CHECK_RESULT_CTOR(res != 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
HeaderProtection_dealloc(HeaderProtectionObject *self)
|
||||
{
|
||||
EVP_CIPHER_CTX_free(self->ctx);
|
||||
PyTypeObject *tp = Py_TYPE(self);
|
||||
freefunc free = PyType_GetSlot(tp, Py_tp_free);
|
||||
free(self);
|
||||
Py_DECREF(tp);
|
||||
}
|
||||
|
||||
static int HeaderProtection_mask(HeaderProtectionObject *self, const unsigned char* sample)
|
||||
{
|
||||
int outlen;
|
||||
if (self->is_chacha20) {
|
||||
return EVP_CipherInit_ex(self->ctx, NULL, NULL, NULL, sample, 1) &&
|
||||
EVP_CipherUpdate(self->ctx, self->mask, &outlen, self->zero, sizeof(self->zero));
|
||||
} else {
|
||||
return EVP_CipherUpdate(self->ctx, self->mask, &outlen, sample, SAMPLE_LENGTH);
|
||||
}
|
||||
}
|
||||
|
||||
static PyObject*
|
||||
HeaderProtection_apply(HeaderProtectionObject *self, PyObject *args)
|
||||
{
|
||||
const unsigned char *header, *payload;
|
||||
Py_ssize_t header_len, payload_len;
|
||||
int res;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "y#y#", &header, &header_len, &payload, &payload_len))
|
||||
return NULL;
|
||||
|
||||
int pn_length = (header[0] & 0x03) + 1;
|
||||
int pn_offset = header_len - pn_length;
|
||||
|
||||
res = HeaderProtection_mask(self, payload + PACKET_NUMBER_LENGTH_MAX - pn_length);
|
||||
CHECK_RESULT(res != 0);
|
||||
|
||||
memcpy(self->buffer, header, header_len);
|
||||
memcpy(self->buffer + header_len, payload, payload_len);
|
||||
|
||||
if (self->buffer[0] & 0x80) {
|
||||
self->buffer[0] ^= self->mask[0] & 0x0F;
|
||||
} else {
|
||||
self->buffer[0] ^= self->mask[0] & 0x1F;
|
||||
}
|
||||
|
||||
for (int i = 0; i < pn_length; ++i) {
|
||||
self->buffer[pn_offset + i] ^= self->mask[1 + i];
|
||||
}
|
||||
|
||||
return PyBytes_FromStringAndSize((const char*)self->buffer, header_len + payload_len);
|
||||
}
|
||||
|
||||
static PyObject*
|
||||
HeaderProtection_remove(HeaderProtectionObject *self, PyObject *args)
|
||||
{
|
||||
const unsigned char *packet;
|
||||
Py_ssize_t packet_len;
|
||||
int pn_offset, res;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "y#I", &packet, &packet_len, &pn_offset))
|
||||
return NULL;
|
||||
|
||||
res = HeaderProtection_mask(self, packet + pn_offset + PACKET_NUMBER_LENGTH_MAX);
|
||||
CHECK_RESULT(res != 0);
|
||||
|
||||
memcpy(self->buffer, packet, pn_offset + PACKET_NUMBER_LENGTH_MAX);
|
||||
|
||||
if (self->buffer[0] & 0x80) {
|
||||
self->buffer[0] ^= self->mask[0] & 0x0F;
|
||||
} else {
|
||||
self->buffer[0] ^= self->mask[0] & 0x1F;
|
||||
}
|
||||
|
||||
int pn_length = (self->buffer[0] & 0x03) + 1;
|
||||
uint32_t pn_truncated = 0;
|
||||
for (int i = 0; i < pn_length; ++i) {
|
||||
self->buffer[pn_offset + i] ^= self->mask[1 + i];
|
||||
pn_truncated = self->buffer[pn_offset + i] | (pn_truncated << 8);
|
||||
}
|
||||
|
||||
return Py_BuildValue("y#i", self->buffer, pn_offset + pn_length, pn_truncated);
|
||||
}
|
||||
|
||||
static PyMethodDef HeaderProtection_methods[] = {
|
||||
{"apply", (PyCFunction)HeaderProtection_apply, METH_VARARGS, ""},
|
||||
{"remove", (PyCFunction)HeaderProtection_remove, METH_VARARGS, ""},
|
||||
{NULL}
|
||||
};
|
||||
|
||||
static PyType_Slot HeaderProtectionType_slots[] = {
|
||||
{Py_tp_dealloc, HeaderProtection_dealloc},
|
||||
{Py_tp_methods, HeaderProtection_methods},
|
||||
{Py_tp_doc, "HeaderProtection objects"},
|
||||
{Py_tp_init, HeaderProtection_init},
|
||||
{0, 0},
|
||||
};
|
||||
|
||||
static PyType_Spec HeaderProtectionType_spec = {
|
||||
MODULE_NAME ".HeaderProtectionType",
|
||||
sizeof(HeaderProtectionObject),
|
||||
0,
|
||||
Py_TPFLAGS_DEFAULT,
|
||||
HeaderProtectionType_slots
|
||||
};
|
||||
|
||||
static struct PyModuleDef moduledef = {
|
||||
PyModuleDef_HEAD_INIT,
|
||||
MODULE_NAME, /* m_name */
|
||||
"Cryptography utilities.", /* m_doc */
|
||||
-1, /* m_size */
|
||||
NULL, /* m_methods */
|
||||
NULL, /* m_reload */
|
||||
NULL, /* m_traverse */
|
||||
NULL, /* m_clear */
|
||||
NULL, /* m_free */
|
||||
};
|
||||
|
||||
PyMODINIT_FUNC
|
||||
PyInit__crypto(void)
|
||||
{
|
||||
PyObject* m;
|
||||
|
||||
m = PyModule_Create(&moduledef);
|
||||
if (m == NULL)
|
||||
return NULL;
|
||||
|
||||
CryptoError = PyErr_NewException(MODULE_NAME ".CryptoError", PyExc_ValueError, NULL);
|
||||
Py_INCREF(CryptoError);
|
||||
PyModule_AddObject(m, "CryptoError", CryptoError);
|
||||
|
||||
AEADType = PyType_FromSpec(&AEADType_spec);
|
||||
if (AEADType == NULL)
|
||||
return NULL;
|
||||
PyModule_AddObject(m, "AEAD", AEADType);
|
||||
|
||||
HeaderProtectionType = PyType_FromSpec(&HeaderProtectionType_spec);
|
||||
if (HeaderProtectionType == NULL)
|
||||
return NULL;
|
||||
PyModule_AddObject(m, "HeaderProtection", HeaderProtectionType);
|
||||
|
||||
// ensure required ciphers are initialised
|
||||
EVP_add_cipher(EVP_aes_128_ecb());
|
||||
EVP_add_cipher(EVP_aes_128_gcm());
|
||||
EVP_add_cipher(EVP_aes_256_ecb());
|
||||
EVP_add_cipher(EVP_aes_256_gcm());
|
||||
|
||||
return m;
|
||||
}
|
||||
17
Code/venv/lib/python3.13/site-packages/aioquic/_crypto.pyi
Normal file
17
Code/venv/lib/python3.13/site-packages/aioquic/_crypto.pyi
Normal file
@ -0,0 +1,17 @@
|
||||
from typing import Tuple
|
||||
|
||||
class AEAD:
|
||||
def __init__(self, cipher_name: bytes, key: bytes, iv: bytes): ...
|
||||
def decrypt(
|
||||
self, data: bytes, associated_data: bytes, packet_number: int
|
||||
) -> bytes: ...
|
||||
def encrypt(
|
||||
self, data: bytes, associated_data: bytes, packet_number: int
|
||||
) -> bytes: ...
|
||||
|
||||
class CryptoError(ValueError): ...
|
||||
|
||||
class HeaderProtection:
|
||||
def __init__(self, cipher_name: bytes, key: bytes): ...
|
||||
def apply(self, plain_header: bytes, protected_payload: bytes) -> bytes: ...
|
||||
def remove(self, packet: bytes, encrypted_offset: int) -> Tuple[bytes, int]: ...
|
||||
@ -0,0 +1,3 @@
|
||||
from .client import connect # noqa
|
||||
from .protocol import QuicConnectionProtocol # noqa
|
||||
from .server import serve # noqa
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,98 @@
|
||||
import asyncio
|
||||
import socket
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import AsyncGenerator, Callable, Optional, cast
|
||||
|
||||
from ..quic.configuration import QuicConfiguration
|
||||
from ..quic.connection import QuicConnection, QuicTokenHandler
|
||||
from ..tls import SessionTicketHandler
|
||||
from .protocol import QuicConnectionProtocol, QuicStreamHandler
|
||||
|
||||
__all__ = ["connect"]
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def connect(
|
||||
host: str,
|
||||
port: int,
|
||||
*,
|
||||
configuration: Optional[QuicConfiguration] = None,
|
||||
create_protocol: Optional[Callable] = QuicConnectionProtocol,
|
||||
session_ticket_handler: Optional[SessionTicketHandler] = None,
|
||||
stream_handler: Optional[QuicStreamHandler] = None,
|
||||
token_handler: Optional[QuicTokenHandler] = None,
|
||||
wait_connected: bool = True,
|
||||
local_port: int = 0,
|
||||
) -> AsyncGenerator[QuicConnectionProtocol, None]:
|
||||
"""
|
||||
Connect to a QUIC server at the given `host` and `port`.
|
||||
|
||||
:meth:`connect()` returns an awaitable. Awaiting it yields a
|
||||
:class:`~aioquic.asyncio.QuicConnectionProtocol` which can be used to
|
||||
create streams.
|
||||
|
||||
:func:`connect` also accepts the following optional arguments:
|
||||
|
||||
* ``configuration`` is a :class:`~aioquic.quic.configuration.QuicConfiguration`
|
||||
configuration object.
|
||||
* ``create_protocol`` allows customizing the :class:`~asyncio.Protocol` that
|
||||
manages the connection. It should be a callable or class accepting the same
|
||||
arguments as :class:`~aioquic.asyncio.QuicConnectionProtocol` and returning
|
||||
an instance of :class:`~aioquic.asyncio.QuicConnectionProtocol` or a subclass.
|
||||
* ``session_ticket_handler`` is a callback which is invoked by the TLS
|
||||
engine when a new session ticket is received.
|
||||
* ``stream_handler`` is a callback which is invoked whenever a stream is
|
||||
created. It must accept two arguments: a :class:`asyncio.StreamReader`
|
||||
and a :class:`asyncio.StreamWriter`.
|
||||
* ``wait_connected`` indicates whether the context manager should wait for the
|
||||
connection to be established before yielding the
|
||||
:class:`~aioquic.asyncio.QuicConnectionProtocol`. By default this is `True` but
|
||||
you can set it to `False` if you want to immediately start sending data using
|
||||
0-RTT.
|
||||
* ``local_port`` is the UDP port number that this client wants to bind.
|
||||
"""
|
||||
loop = asyncio.get_event_loop()
|
||||
local_host = "::"
|
||||
|
||||
# lookup remote address
|
||||
infos = await loop.getaddrinfo(host, port, type=socket.SOCK_DGRAM)
|
||||
addr = infos[0][4]
|
||||
if len(addr) == 2:
|
||||
addr = ("::ffff:" + addr[0], addr[1], 0, 0)
|
||||
|
||||
# prepare QUIC connection
|
||||
if configuration is None:
|
||||
configuration = QuicConfiguration(is_client=True)
|
||||
if configuration.server_name is None:
|
||||
configuration.server_name = host
|
||||
connection = QuicConnection(
|
||||
configuration=configuration,
|
||||
session_ticket_handler=session_ticket_handler,
|
||||
token_handler=token_handler,
|
||||
)
|
||||
|
||||
# explicitly enable IPv4/IPv6 dual stack
|
||||
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
|
||||
completed = False
|
||||
try:
|
||||
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
|
||||
sock.bind((local_host, local_port, 0, 0))
|
||||
completed = True
|
||||
finally:
|
||||
if not completed:
|
||||
sock.close()
|
||||
# connect
|
||||
transport, protocol = await loop.create_datagram_endpoint(
|
||||
lambda: create_protocol(connection, stream_handler=stream_handler),
|
||||
sock=sock,
|
||||
)
|
||||
protocol = cast(QuicConnectionProtocol, protocol)
|
||||
try:
|
||||
protocol.connect(addr, transmit=wait_connected)
|
||||
if wait_connected:
|
||||
await protocol.wait_connected()
|
||||
yield protocol
|
||||
finally:
|
||||
protocol.close()
|
||||
await protocol.wait_closed()
|
||||
transport.close()
|
||||
@ -0,0 +1,272 @@
|
||||
import asyncio
|
||||
from typing import Any, Callable, Dict, Optional, Text, Tuple, Union, cast
|
||||
|
||||
from ..quic import events
|
||||
from ..quic.connection import NetworkAddress, QuicConnection
|
||||
from ..quic.packet import QuicErrorCode
|
||||
|
||||
QuicConnectionIdHandler = Callable[[bytes], None]
|
||||
QuicStreamHandler = Callable[[asyncio.StreamReader, asyncio.StreamWriter], None]
|
||||
|
||||
|
||||
class QuicConnectionProtocol(asyncio.DatagramProtocol):
|
||||
def __init__(
|
||||
self, quic: QuicConnection, stream_handler: Optional[QuicStreamHandler] = None
|
||||
):
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
self._closed = asyncio.Event()
|
||||
self._connected = False
|
||||
self._connected_waiter: Optional[asyncio.Future[None]] = None
|
||||
self._loop = loop
|
||||
self._ping_waiters: Dict[int, asyncio.Future[None]] = {}
|
||||
self._quic = quic
|
||||
self._stream_readers: Dict[int, asyncio.StreamReader] = {}
|
||||
self._timer: Optional[asyncio.TimerHandle] = None
|
||||
self._timer_at: Optional[float] = None
|
||||
self._transmit_task: Optional[asyncio.Handle] = None
|
||||
self._transport: Optional[asyncio.DatagramTransport] = None
|
||||
|
||||
# callbacks
|
||||
self._connection_id_issued_handler: QuicConnectionIdHandler = lambda c: None
|
||||
self._connection_id_retired_handler: QuicConnectionIdHandler = lambda c: None
|
||||
self._connection_terminated_handler: Callable[[], None] = lambda: None
|
||||
if stream_handler is not None:
|
||||
self._stream_handler = stream_handler
|
||||
else:
|
||||
self._stream_handler = lambda r, w: None
|
||||
|
||||
def change_connection_id(self) -> None:
|
||||
"""
|
||||
Change the connection ID used to communicate with the peer.
|
||||
|
||||
The previous connection ID will be retired.
|
||||
"""
|
||||
self._quic.change_connection_id()
|
||||
self.transmit()
|
||||
|
||||
def close(
|
||||
self,
|
||||
error_code: int = QuicErrorCode.NO_ERROR,
|
||||
reason_phrase: str = "",
|
||||
) -> None:
|
||||
"""
|
||||
Close the connection.
|
||||
|
||||
:param error_code: An error code indicating why the connection is
|
||||
being closed.
|
||||
:param reason_phrase: A human-readable explanation of why the
|
||||
connection is being closed.
|
||||
"""
|
||||
self._quic.close(
|
||||
error_code=error_code,
|
||||
reason_phrase=reason_phrase,
|
||||
)
|
||||
self.transmit()
|
||||
|
||||
def connect(self, addr: NetworkAddress, transmit=True) -> None:
|
||||
"""
|
||||
Initiate the TLS handshake.
|
||||
|
||||
This method can only be called for clients and a single time.
|
||||
"""
|
||||
self._quic.connect(addr, now=self._loop.time())
|
||||
if transmit:
|
||||
self.transmit()
|
||||
|
||||
async def create_stream(
|
||||
self, is_unidirectional: bool = False
|
||||
) -> Tuple[asyncio.StreamReader, asyncio.StreamWriter]:
|
||||
"""
|
||||
Create a QUIC stream and return a pair of (reader, writer) objects.
|
||||
|
||||
The returned reader and writer objects are instances of
|
||||
:class:`asyncio.StreamReader` and :class:`asyncio.StreamWriter` classes.
|
||||
"""
|
||||
stream_id = self._quic.get_next_available_stream_id(
|
||||
is_unidirectional=is_unidirectional
|
||||
)
|
||||
return self._create_stream(stream_id)
|
||||
|
||||
def request_key_update(self) -> None:
|
||||
"""
|
||||
Request an update of the encryption keys.
|
||||
"""
|
||||
self._quic.request_key_update()
|
||||
self.transmit()
|
||||
|
||||
async def ping(self) -> None:
|
||||
"""
|
||||
Ping the peer and wait for the response.
|
||||
"""
|
||||
waiter = self._loop.create_future()
|
||||
uid = id(waiter)
|
||||
self._ping_waiters[uid] = waiter
|
||||
self._quic.send_ping(uid)
|
||||
self.transmit()
|
||||
await asyncio.shield(waiter)
|
||||
|
||||
def transmit(self) -> None:
|
||||
"""
|
||||
Send pending datagrams to the peer and arm the timer if needed.
|
||||
|
||||
This method is called automatically when data is received from the peer
|
||||
or when a timer goes off. If you interact directly with the underlying
|
||||
:class:`~aioquic.quic.connection.QuicConnection`, make sure you call this
|
||||
method whenever data needs to be sent out to the network.
|
||||
"""
|
||||
self._transmit_task = None
|
||||
|
||||
# send datagrams
|
||||
for data, addr in self._quic.datagrams_to_send(now=self._loop.time()):
|
||||
self._transport.sendto(data, addr)
|
||||
|
||||
# re-arm timer
|
||||
timer_at = self._quic.get_timer()
|
||||
if self._timer is not None and self._timer_at != timer_at:
|
||||
self._timer.cancel()
|
||||
self._timer = None
|
||||
if self._timer is None and timer_at is not None:
|
||||
self._timer = self._loop.call_at(timer_at, self._handle_timer)
|
||||
self._timer_at = timer_at
|
||||
|
||||
async def wait_closed(self) -> None:
|
||||
"""
|
||||
Wait for the connection to be closed.
|
||||
"""
|
||||
await self._closed.wait()
|
||||
|
||||
async def wait_connected(self) -> None:
|
||||
"""
|
||||
Wait for the TLS handshake to complete.
|
||||
"""
|
||||
assert self._connected_waiter is None, "already awaiting connected"
|
||||
if not self._connected:
|
||||
self._connected_waiter = self._loop.create_future()
|
||||
await asyncio.shield(self._connected_waiter)
|
||||
|
||||
# asyncio.Transport
|
||||
|
||||
def connection_made(self, transport: asyncio.BaseTransport) -> None:
|
||||
""":meta private:"""
|
||||
self._transport = cast(asyncio.DatagramTransport, transport)
|
||||
|
||||
def datagram_received(self, data: Union[bytes, Text], addr: NetworkAddress) -> None:
|
||||
""":meta private:"""
|
||||
self._quic.receive_datagram(cast(bytes, data), addr, now=self._loop.time())
|
||||
self._process_events()
|
||||
self.transmit()
|
||||
|
||||
# overridable
|
||||
|
||||
def quic_event_received(self, event: events.QuicEvent) -> None:
|
||||
"""
|
||||
Called when a QUIC event is received.
|
||||
|
||||
Reimplement this in your subclass to handle the events.
|
||||
"""
|
||||
# FIXME: move this to a subclass
|
||||
if isinstance(event, events.ConnectionTerminated):
|
||||
for reader in self._stream_readers.values():
|
||||
reader.feed_eof()
|
||||
elif isinstance(event, events.StreamDataReceived):
|
||||
reader = self._stream_readers.get(event.stream_id, None)
|
||||
if reader is None:
|
||||
reader, writer = self._create_stream(event.stream_id)
|
||||
self._stream_handler(reader, writer)
|
||||
reader.feed_data(event.data)
|
||||
if event.end_stream:
|
||||
reader.feed_eof()
|
||||
|
||||
# private
|
||||
|
||||
def _create_stream(
|
||||
self, stream_id: int
|
||||
) -> Tuple[asyncio.StreamReader, asyncio.StreamWriter]:
|
||||
adapter = QuicStreamAdapter(self, stream_id)
|
||||
reader = asyncio.StreamReader()
|
||||
protocol = asyncio.streams.StreamReaderProtocol(reader)
|
||||
writer = asyncio.StreamWriter(adapter, protocol, reader, self._loop)
|
||||
self._stream_readers[stream_id] = reader
|
||||
return reader, writer
|
||||
|
||||
def _handle_timer(self) -> None:
|
||||
now = max(self._timer_at, self._loop.time())
|
||||
self._timer = None
|
||||
self._timer_at = None
|
||||
self._quic.handle_timer(now=now)
|
||||
self._process_events()
|
||||
self.transmit()
|
||||
|
||||
def _process_events(self) -> None:
|
||||
event = self._quic.next_event()
|
||||
while event is not None:
|
||||
if isinstance(event, events.ConnectionIdIssued):
|
||||
self._connection_id_issued_handler(event.connection_id)
|
||||
elif isinstance(event, events.ConnectionIdRetired):
|
||||
self._connection_id_retired_handler(event.connection_id)
|
||||
elif isinstance(event, events.ConnectionTerminated):
|
||||
self._connection_terminated_handler()
|
||||
|
||||
# abort connection waiter
|
||||
if self._connected_waiter is not None:
|
||||
waiter = self._connected_waiter
|
||||
self._connected_waiter = None
|
||||
waiter.set_exception(ConnectionError)
|
||||
|
||||
# abort ping waiters
|
||||
for waiter in self._ping_waiters.values():
|
||||
waiter.set_exception(ConnectionError)
|
||||
self._ping_waiters.clear()
|
||||
|
||||
self._closed.set()
|
||||
elif isinstance(event, events.HandshakeCompleted):
|
||||
if self._connected_waiter is not None:
|
||||
waiter = self._connected_waiter
|
||||
self._connected = True
|
||||
self._connected_waiter = None
|
||||
waiter.set_result(None)
|
||||
elif isinstance(event, events.PingAcknowledged):
|
||||
waiter = self._ping_waiters.pop(event.uid, None)
|
||||
if waiter is not None:
|
||||
waiter.set_result(None)
|
||||
self.quic_event_received(event)
|
||||
event = self._quic.next_event()
|
||||
|
||||
def _transmit_soon(self) -> None:
|
||||
if self._transmit_task is None:
|
||||
self._transmit_task = self._loop.call_soon(self.transmit)
|
||||
|
||||
|
||||
class QuicStreamAdapter(asyncio.Transport):
|
||||
def __init__(self, protocol: QuicConnectionProtocol, stream_id: int):
|
||||
self.protocol = protocol
|
||||
self.stream_id = stream_id
|
||||
self._closing = False
|
||||
|
||||
def can_write_eof(self) -> bool:
|
||||
return True
|
||||
|
||||
def get_extra_info(self, name: str, default: Any = None) -> Any:
|
||||
"""
|
||||
Get information about the underlying QUIC stream.
|
||||
"""
|
||||
if name == "stream_id":
|
||||
return self.stream_id
|
||||
|
||||
def write(self, data):
|
||||
self.protocol._quic.send_stream_data(self.stream_id, data)
|
||||
self.protocol._transmit_soon()
|
||||
|
||||
def write_eof(self):
|
||||
if self._closing:
|
||||
return
|
||||
self._closing = True
|
||||
self.protocol._quic.send_stream_data(self.stream_id, b"", end_stream=True)
|
||||
self.protocol._transmit_soon()
|
||||
|
||||
def close(self):
|
||||
self.write_eof()
|
||||
|
||||
def is_closing(self) -> bool:
|
||||
return self._closing
|
||||
215
Code/venv/lib/python3.13/site-packages/aioquic/asyncio/server.py
Normal file
215
Code/venv/lib/python3.13/site-packages/aioquic/asyncio/server.py
Normal file
@ -0,0 +1,215 @@
|
||||
import asyncio
|
||||
import os
|
||||
from functools import partial
|
||||
from typing import Callable, Dict, Optional, Text, Union, cast
|
||||
|
||||
from ..buffer import Buffer
|
||||
from ..quic.configuration import SMALLEST_MAX_DATAGRAM_SIZE, QuicConfiguration
|
||||
from ..quic.connection import NetworkAddress, QuicConnection
|
||||
from ..quic.packet import (
|
||||
QuicPacketType,
|
||||
encode_quic_retry,
|
||||
encode_quic_version_negotiation,
|
||||
pull_quic_header,
|
||||
)
|
||||
from ..quic.retry import QuicRetryTokenHandler
|
||||
from ..tls import SessionTicketFetcher, SessionTicketHandler
|
||||
from .protocol import QuicConnectionProtocol, QuicStreamHandler
|
||||
|
||||
__all__ = ["serve"]
|
||||
|
||||
|
||||
class QuicServer(asyncio.DatagramProtocol):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
configuration: QuicConfiguration,
|
||||
create_protocol: Callable = QuicConnectionProtocol,
|
||||
session_ticket_fetcher: Optional[SessionTicketFetcher] = None,
|
||||
session_ticket_handler: Optional[SessionTicketHandler] = None,
|
||||
retry: bool = False,
|
||||
stream_handler: Optional[QuicStreamHandler] = None,
|
||||
) -> None:
|
||||
self._configuration = configuration
|
||||
self._create_protocol = create_protocol
|
||||
self._loop = asyncio.get_event_loop()
|
||||
self._protocols: Dict[bytes, QuicConnectionProtocol] = {}
|
||||
self._session_ticket_fetcher = session_ticket_fetcher
|
||||
self._session_ticket_handler = session_ticket_handler
|
||||
self._transport: Optional[asyncio.DatagramTransport] = None
|
||||
|
||||
self._stream_handler = stream_handler
|
||||
|
||||
if retry:
|
||||
self._retry = QuicRetryTokenHandler()
|
||||
else:
|
||||
self._retry = None
|
||||
|
||||
def close(self):
|
||||
for protocol in set(self._protocols.values()):
|
||||
protocol.close()
|
||||
self._protocols.clear()
|
||||
self._transport.close()
|
||||
|
||||
def connection_made(self, transport: asyncio.BaseTransport) -> None:
|
||||
self._transport = cast(asyncio.DatagramTransport, transport)
|
||||
|
||||
def datagram_received(self, data: Union[bytes, Text], addr: NetworkAddress) -> None:
|
||||
data = cast(bytes, data)
|
||||
buf = Buffer(data=data)
|
||||
|
||||
try:
|
||||
header = pull_quic_header(
|
||||
buf, host_cid_length=self._configuration.connection_id_length
|
||||
)
|
||||
except ValueError:
|
||||
return
|
||||
|
||||
# version negotiation
|
||||
if (
|
||||
header.version is not None
|
||||
and header.version not in self._configuration.supported_versions
|
||||
):
|
||||
self._transport.sendto(
|
||||
encode_quic_version_negotiation(
|
||||
source_cid=header.destination_cid,
|
||||
destination_cid=header.source_cid,
|
||||
supported_versions=self._configuration.supported_versions,
|
||||
),
|
||||
addr,
|
||||
)
|
||||
return
|
||||
|
||||
protocol = self._protocols.get(header.destination_cid, None)
|
||||
original_destination_connection_id: Optional[bytes] = None
|
||||
retry_source_connection_id: Optional[bytes] = None
|
||||
if (
|
||||
protocol is None
|
||||
and len(data) >= SMALLEST_MAX_DATAGRAM_SIZE
|
||||
and header.packet_type == QuicPacketType.INITIAL
|
||||
):
|
||||
# retry
|
||||
if self._retry is not None:
|
||||
if not header.token:
|
||||
# create a retry token
|
||||
source_cid = os.urandom(8)
|
||||
self._transport.sendto(
|
||||
encode_quic_retry(
|
||||
version=header.version,
|
||||
source_cid=source_cid,
|
||||
destination_cid=header.source_cid,
|
||||
original_destination_cid=header.destination_cid,
|
||||
retry_token=self._retry.create_token(
|
||||
addr, header.destination_cid, source_cid
|
||||
),
|
||||
),
|
||||
addr,
|
||||
)
|
||||
return
|
||||
else:
|
||||
# validate retry token
|
||||
try:
|
||||
(
|
||||
original_destination_connection_id,
|
||||
retry_source_connection_id,
|
||||
) = self._retry.validate_token(addr, header.token)
|
||||
except ValueError:
|
||||
return
|
||||
else:
|
||||
original_destination_connection_id = header.destination_cid
|
||||
|
||||
# create new connection
|
||||
connection = QuicConnection(
|
||||
configuration=self._configuration,
|
||||
original_destination_connection_id=original_destination_connection_id,
|
||||
retry_source_connection_id=retry_source_connection_id,
|
||||
session_ticket_fetcher=self._session_ticket_fetcher,
|
||||
session_ticket_handler=self._session_ticket_handler,
|
||||
)
|
||||
protocol = self._create_protocol(
|
||||
connection, stream_handler=self._stream_handler
|
||||
)
|
||||
protocol.connection_made(self._transport)
|
||||
|
||||
# register callbacks
|
||||
protocol._connection_id_issued_handler = partial(
|
||||
self._connection_id_issued, protocol=protocol
|
||||
)
|
||||
protocol._connection_id_retired_handler = partial(
|
||||
self._connection_id_retired, protocol=protocol
|
||||
)
|
||||
protocol._connection_terminated_handler = partial(
|
||||
self._connection_terminated, protocol=protocol
|
||||
)
|
||||
|
||||
self._protocols[header.destination_cid] = protocol
|
||||
self._protocols[connection.host_cid] = protocol
|
||||
|
||||
if protocol is not None:
|
||||
protocol.datagram_received(data, addr)
|
||||
|
||||
def _connection_id_issued(self, cid: bytes, protocol: QuicConnectionProtocol):
|
||||
self._protocols[cid] = protocol
|
||||
|
||||
def _connection_id_retired(
|
||||
self, cid: bytes, protocol: QuicConnectionProtocol
|
||||
) -> None:
|
||||
assert self._protocols[cid] == protocol
|
||||
del self._protocols[cid]
|
||||
|
||||
def _connection_terminated(self, protocol: QuicConnectionProtocol):
|
||||
for cid, proto in list(self._protocols.items()):
|
||||
if proto == protocol:
|
||||
del self._protocols[cid]
|
||||
|
||||
|
||||
async def serve(
|
||||
host: str,
|
||||
port: int,
|
||||
*,
|
||||
configuration: QuicConfiguration,
|
||||
create_protocol: Callable = QuicConnectionProtocol,
|
||||
session_ticket_fetcher: Optional[SessionTicketFetcher] = None,
|
||||
session_ticket_handler: Optional[SessionTicketHandler] = None,
|
||||
retry: bool = False,
|
||||
stream_handler: QuicStreamHandler = None,
|
||||
) -> QuicServer:
|
||||
"""
|
||||
Start a QUIC server at the given `host` and `port`.
|
||||
|
||||
:func:`serve` requires a :class:`~aioquic.quic.configuration.QuicConfiguration`
|
||||
containing TLS certificate and private key as the ``configuration`` argument.
|
||||
|
||||
:func:`serve` also accepts the following optional arguments:
|
||||
|
||||
* ``create_protocol`` allows customizing the :class:`~asyncio.Protocol` that
|
||||
manages the connection. It should be a callable or class accepting the same
|
||||
arguments as :class:`~aioquic.asyncio.QuicConnectionProtocol` and returning
|
||||
an instance of :class:`~aioquic.asyncio.QuicConnectionProtocol` or a subclass.
|
||||
* ``session_ticket_fetcher`` is a callback which is invoked by the TLS
|
||||
engine when a session ticket is presented by the peer. It should return
|
||||
the session ticket with the specified ID or `None` if it is not found.
|
||||
* ``session_ticket_handler`` is a callback which is invoked by the TLS
|
||||
engine when a new session ticket is issued. It should store the session
|
||||
ticket for future lookup.
|
||||
* ``retry`` specifies whether client addresses should be validated prior to
|
||||
the cryptographic handshake using a retry packet.
|
||||
* ``stream_handler`` is a callback which is invoked whenever a stream is
|
||||
created. It must accept two arguments: a :class:`asyncio.StreamReader`
|
||||
and a :class:`asyncio.StreamWriter`.
|
||||
"""
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
_, protocol = await loop.create_datagram_endpoint(
|
||||
lambda: QuicServer(
|
||||
configuration=configuration,
|
||||
create_protocol=create_protocol,
|
||||
session_ticket_fetcher=session_ticket_fetcher,
|
||||
session_ticket_handler=session_ticket_handler,
|
||||
retry=retry,
|
||||
stream_handler=stream_handler,
|
||||
),
|
||||
local_addr=(host, port),
|
||||
)
|
||||
return protocol
|
||||
30
Code/venv/lib/python3.13/site-packages/aioquic/buffer.py
Normal file
30
Code/venv/lib/python3.13/site-packages/aioquic/buffer.py
Normal file
@ -0,0 +1,30 @@
|
||||
from ._buffer import Buffer, BufferReadError, BufferWriteError # noqa
|
||||
|
||||
UINT_VAR_MAX = 0x3FFFFFFFFFFFFFFF
|
||||
UINT_VAR_MAX_SIZE = 8
|
||||
|
||||
|
||||
def encode_uint_var(value: int) -> bytes:
|
||||
"""
|
||||
Encode a variable-length unsigned integer.
|
||||
"""
|
||||
buf = Buffer(capacity=UINT_VAR_MAX_SIZE)
|
||||
buf.push_uint_var(value)
|
||||
return buf.data
|
||||
|
||||
|
||||
def size_uint_var(value: int) -> int:
|
||||
"""
|
||||
Return the number of bytes required to encode the given value
|
||||
as a QUIC variable-length unsigned integer.
|
||||
"""
|
||||
if value <= 0x3F:
|
||||
return 1
|
||||
elif value <= 0x3FFF:
|
||||
return 2
|
||||
elif value <= 0x3FFFFFFF:
|
||||
return 4
|
||||
elif value <= 0x3FFFFFFFFFFFFFFF:
|
||||
return 8
|
||||
else:
|
||||
raise ValueError("Integer is too big for a variable-length integer")
|
||||
Binary file not shown.
Binary file not shown.
@ -0,0 +1,68 @@
|
||||
from typing import Dict, List
|
||||
|
||||
from aioquic.h3.events import DataReceived, H3Event, Headers, HeadersReceived
|
||||
from aioquic.quic.connection import QuicConnection
|
||||
from aioquic.quic.events import QuicEvent, StreamDataReceived
|
||||
|
||||
H0_ALPN = ["hq-interop"]
|
||||
|
||||
|
||||
class H0Connection:
|
||||
"""
|
||||
An HTTP/0.9 connection object.
|
||||
"""
|
||||
|
||||
def __init__(self, quic: QuicConnection):
|
||||
self._buffer: Dict[int, bytes] = {}
|
||||
self._headers_received: Dict[int, bool] = {}
|
||||
self._is_client = quic.configuration.is_client
|
||||
self._quic = quic
|
||||
|
||||
def handle_event(self, event: QuicEvent) -> List[H3Event]:
|
||||
http_events: List[H3Event] = []
|
||||
|
||||
if isinstance(event, StreamDataReceived) and (event.stream_id % 4) == 0:
|
||||
data = self._buffer.pop(event.stream_id, b"") + event.data
|
||||
if not self._headers_received.get(event.stream_id, False):
|
||||
if self._is_client:
|
||||
http_events.append(
|
||||
HeadersReceived(
|
||||
headers=[], stream_ended=False, stream_id=event.stream_id
|
||||
)
|
||||
)
|
||||
elif data.endswith(b"\r\n") or event.end_stream:
|
||||
method, path = data.rstrip().split(b" ", 1)
|
||||
http_events.append(
|
||||
HeadersReceived(
|
||||
headers=[(b":method", method), (b":path", path)],
|
||||
stream_ended=False,
|
||||
stream_id=event.stream_id,
|
||||
)
|
||||
)
|
||||
data = b""
|
||||
else:
|
||||
# incomplete request, stash the data
|
||||
self._buffer[event.stream_id] = data
|
||||
return http_events
|
||||
self._headers_received[event.stream_id] = True
|
||||
|
||||
http_events.append(
|
||||
DataReceived(
|
||||
data=data, stream_ended=event.end_stream, stream_id=event.stream_id
|
||||
)
|
||||
)
|
||||
|
||||
return http_events
|
||||
|
||||
def send_data(self, stream_id: int, data: bytes, end_stream: bool) -> None:
|
||||
self._quic.send_stream_data(stream_id, data, end_stream)
|
||||
|
||||
def send_headers(
|
||||
self, stream_id: int, headers: Headers, end_stream: bool = False
|
||||
) -> None:
|
||||
if self._is_client:
|
||||
headers_dict = dict(headers)
|
||||
data = headers_dict[b":method"] + b" " + headers_dict[b":path"] + b"\r\n"
|
||||
else:
|
||||
data = b""
|
||||
self._quic.send_stream_data(stream_id, data, end_stream)
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
1218
Code/venv/lib/python3.13/site-packages/aioquic/h3/connection.py
Normal file
1218
Code/venv/lib/python3.13/site-packages/aioquic/h3/connection.py
Normal file
File diff suppressed because it is too large
Load Diff
100
Code/venv/lib/python3.13/site-packages/aioquic/h3/events.py
Normal file
100
Code/venv/lib/python3.13/site-packages/aioquic/h3/events.py
Normal file
@ -0,0 +1,100 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
Headers = List[Tuple[bytes, bytes]]
|
||||
|
||||
|
||||
class H3Event:
|
||||
"""
|
||||
Base class for HTTP/3 events.
|
||||
"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class DataReceived(H3Event):
|
||||
"""
|
||||
The DataReceived event is fired whenever data is received on a stream from
|
||||
the remote peer.
|
||||
"""
|
||||
|
||||
data: bytes
|
||||
"The data which was received."
|
||||
|
||||
stream_id: int
|
||||
"The ID of the stream the data was received for."
|
||||
|
||||
stream_ended: bool
|
||||
"Whether the STREAM frame had the FIN bit set."
|
||||
|
||||
push_id: Optional[int] = None
|
||||
"The Push ID or `None` if this is not a push."
|
||||
|
||||
|
||||
@dataclass
|
||||
class DatagramReceived(H3Event):
|
||||
"""
|
||||
The DatagramReceived is fired whenever a datagram is received from the
|
||||
the remote peer.
|
||||
"""
|
||||
|
||||
data: bytes
|
||||
"The data which was received."
|
||||
|
||||
stream_id: int
|
||||
"The ID of the stream the data was received for."
|
||||
|
||||
|
||||
@dataclass
|
||||
class HeadersReceived(H3Event):
|
||||
"""
|
||||
The HeadersReceived event is fired whenever headers are received.
|
||||
"""
|
||||
|
||||
headers: Headers
|
||||
"The headers."
|
||||
|
||||
stream_id: int
|
||||
"The ID of the stream the headers were received for."
|
||||
|
||||
stream_ended: bool
|
||||
"Whether the STREAM frame had the FIN bit set."
|
||||
|
||||
push_id: Optional[int] = None
|
||||
"The Push ID or `None` if this is not a push."
|
||||
|
||||
|
||||
@dataclass
|
||||
class PushPromiseReceived(H3Event):
|
||||
"""
|
||||
The PushedStreamReceived event is fired whenever a pushed stream has been
|
||||
received from the remote peer.
|
||||
"""
|
||||
|
||||
headers: Headers
|
||||
"The request headers."
|
||||
|
||||
push_id: int
|
||||
"The Push ID of the push promise."
|
||||
|
||||
stream_id: int
|
||||
"The Stream ID of the stream that the push is related to."
|
||||
|
||||
|
||||
@dataclass
|
||||
class WebTransportStreamDataReceived(H3Event):
|
||||
"""
|
||||
The WebTransportStreamDataReceived is fired whenever data is received
|
||||
for a WebTransport stream.
|
||||
"""
|
||||
|
||||
data: bytes
|
||||
"The data which was received."
|
||||
|
||||
stream_id: int
|
||||
"The ID of the stream the data was received for."
|
||||
|
||||
stream_ended: bool
|
||||
"Whether the STREAM frame had the FIN bit set."
|
||||
|
||||
session_id: int
|
||||
"The ID of the session the data was received for."
|
||||
@ -0,0 +1,17 @@
|
||||
class H3Error(Exception):
|
||||
"""
|
||||
Base class for HTTP/3 exceptions.
|
||||
"""
|
||||
|
||||
|
||||
class InvalidStreamTypeError(H3Error):
|
||||
"""
|
||||
An action was attempted on an invalid stream type.
|
||||
"""
|
||||
|
||||
|
||||
class NoAvailablePushIDError(H3Error):
|
||||
"""
|
||||
There are no available push IDs left, or push is not supported
|
||||
by the remote party.
|
||||
"""
|
||||
1
Code/venv/lib/python3.13/site-packages/aioquic/py.typed
Normal file
1
Code/venv/lib/python3.13/site-packages/aioquic/py.typed
Normal file
@ -0,0 +1 @@
|
||||
Marker
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,163 @@
|
||||
from dataclasses import dataclass, field
|
||||
from os import PathLike
|
||||
from re import split
|
||||
from typing import Any, List, Optional, TextIO, Union
|
||||
|
||||
from ..tls import (
|
||||
CipherSuite,
|
||||
SessionTicket,
|
||||
load_pem_private_key,
|
||||
load_pem_x509_certificates,
|
||||
)
|
||||
from .logger import QuicLogger
|
||||
from .packet import QuicProtocolVersion
|
||||
|
||||
SMALLEST_MAX_DATAGRAM_SIZE = 1200
|
||||
|
||||
|
||||
@dataclass
|
||||
class QuicConfiguration:
|
||||
"""
|
||||
A QUIC configuration.
|
||||
"""
|
||||
|
||||
alpn_protocols: Optional[List[str]] = None
|
||||
"""
|
||||
A list of supported ALPN protocols.
|
||||
"""
|
||||
|
||||
congestion_control_algorithm: str = "reno"
|
||||
"""
|
||||
The name of the congestion control algorithm to use.
|
||||
|
||||
Currently supported algorithms: `"reno", `"cubic"`.
|
||||
"""
|
||||
|
||||
connection_id_length: int = 8
|
||||
"""
|
||||
The length in bytes of local connection IDs.
|
||||
"""
|
||||
|
||||
idle_timeout: float = 60.0
|
||||
"""
|
||||
The idle timeout in seconds.
|
||||
|
||||
The connection is terminated if nothing is received for the given duration.
|
||||
"""
|
||||
|
||||
is_client: bool = True
|
||||
"""
|
||||
Whether this is the client side of the QUIC connection.
|
||||
"""
|
||||
|
||||
max_data: int = 1048576
|
||||
"""
|
||||
Connection-wide flow control limit.
|
||||
"""
|
||||
|
||||
max_datagram_size: int = SMALLEST_MAX_DATAGRAM_SIZE
|
||||
"""
|
||||
The maximum QUIC payload size in bytes to send, excluding UDP or IP overhead.
|
||||
"""
|
||||
|
||||
max_stream_data: int = 1048576
|
||||
"""
|
||||
Per-stream flow control limit.
|
||||
"""
|
||||
|
||||
quic_logger: Optional[QuicLogger] = None
|
||||
"""
|
||||
The :class:`~aioquic.quic.logger.QuicLogger` instance to log events to.
|
||||
"""
|
||||
|
||||
secrets_log_file: TextIO = None
|
||||
"""
|
||||
A file-like object in which to log traffic secrets.
|
||||
|
||||
This is useful to analyze traffic captures with Wireshark.
|
||||
"""
|
||||
|
||||
server_name: Optional[str] = None
|
||||
"""
|
||||
The server name to use when verifying the server's TLS certificate, which
|
||||
can either be a DNS name or an IP address.
|
||||
|
||||
If it is a DNS name, it is also sent during the TLS handshake in the
|
||||
Server Name Indication (SNI) extension.
|
||||
|
||||
.. note:: This is only used by clients.
|
||||
"""
|
||||
|
||||
session_ticket: Optional[SessionTicket] = None
|
||||
"""
|
||||
The TLS session ticket which should be used for session resumption.
|
||||
"""
|
||||
|
||||
token: bytes = b""
|
||||
"""
|
||||
The address validation token that can be used to validate future connections.
|
||||
|
||||
.. note:: This is only used by clients.
|
||||
"""
|
||||
|
||||
# For internal purposes, not guaranteed to be stable.
|
||||
cadata: Optional[bytes] = None
|
||||
cafile: Optional[str] = None
|
||||
capath: Optional[str] = None
|
||||
certificate: Any = None
|
||||
certificate_chain: List[Any] = field(default_factory=list)
|
||||
cipher_suites: Optional[List[CipherSuite]] = None
|
||||
initial_rtt: float = 0.1
|
||||
max_datagram_frame_size: Optional[int] = None
|
||||
original_version: Optional[int] = None
|
||||
private_key: Any = None
|
||||
quantum_readiness_test: bool = False
|
||||
supported_versions: List[int] = field(
|
||||
default_factory=lambda: [
|
||||
QuicProtocolVersion.VERSION_1,
|
||||
QuicProtocolVersion.VERSION_2,
|
||||
]
|
||||
)
|
||||
verify_mode: Optional[int] = None
|
||||
|
||||
def load_cert_chain(
|
||||
self,
|
||||
certfile: PathLike,
|
||||
keyfile: Optional[PathLike] = None,
|
||||
password: Optional[Union[bytes, str]] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Load a private key and the corresponding certificate.
|
||||
"""
|
||||
with open(certfile, "rb") as fp:
|
||||
boundary = b"-----BEGIN PRIVATE KEY-----\n"
|
||||
chunks = split(b"\n" + boundary, fp.read())
|
||||
certificates = load_pem_x509_certificates(chunks[0])
|
||||
if len(chunks) == 2:
|
||||
private_key = boundary + chunks[1]
|
||||
self.private_key = load_pem_private_key(private_key)
|
||||
self.certificate = certificates[0]
|
||||
self.certificate_chain = certificates[1:]
|
||||
|
||||
if keyfile is not None:
|
||||
with open(keyfile, "rb") as fp:
|
||||
self.private_key = load_pem_private_key(
|
||||
fp.read(),
|
||||
password=password.encode("utf8")
|
||||
if isinstance(password, str)
|
||||
else password,
|
||||
)
|
||||
|
||||
def load_verify_locations(
|
||||
self,
|
||||
cafile: Optional[str] = None,
|
||||
capath: Optional[str] = None,
|
||||
cadata: Optional[bytes] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Load a set of "certification authority" (CA) certificates used to
|
||||
validate other peers' certificates.
|
||||
"""
|
||||
self.cafile = cafile
|
||||
self.capath = capath
|
||||
self.cadata = cadata
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,128 @@
|
||||
import abc
|
||||
from typing import Any, Dict, Iterable, Optional, Protocol
|
||||
|
||||
from ..packet_builder import QuicSentPacket
|
||||
|
||||
K_GRANULARITY = 0.001 # seconds
|
||||
K_INITIAL_WINDOW = 10
|
||||
K_MINIMUM_WINDOW = 2
|
||||
|
||||
|
||||
class QuicCongestionControl(abc.ABC):
|
||||
"""
|
||||
Base class for congestion control implementations.
|
||||
"""
|
||||
|
||||
bytes_in_flight: int = 0
|
||||
congestion_window: int = 0
|
||||
ssthresh: Optional[int] = None
|
||||
|
||||
def __init__(self, *, max_datagram_size: int) -> None:
|
||||
self.congestion_window = K_INITIAL_WINDOW * max_datagram_size
|
||||
|
||||
@abc.abstractmethod
|
||||
def on_packet_acked(self, *, now: float, packet: QuicSentPacket) -> None: ...
|
||||
|
||||
@abc.abstractmethod
|
||||
def on_packet_sent(self, *, packet: QuicSentPacket) -> None: ...
|
||||
|
||||
@abc.abstractmethod
|
||||
def on_packets_expired(self, *, packets: Iterable[QuicSentPacket]) -> None: ...
|
||||
|
||||
@abc.abstractmethod
|
||||
def on_packets_lost(
|
||||
self, *, now: float, packets: Iterable[QuicSentPacket]
|
||||
) -> None: ...
|
||||
|
||||
@abc.abstractmethod
|
||||
def on_rtt_measurement(self, *, now: float, rtt: float) -> None: ...
|
||||
|
||||
def get_log_data(self) -> Dict[str, Any]:
|
||||
data = {"cwnd": self.congestion_window, "bytes_in_flight": self.bytes_in_flight}
|
||||
if self.ssthresh is not None:
|
||||
data["ssthresh"] = self.ssthresh
|
||||
return data
|
||||
|
||||
|
||||
class QuicCongestionControlFactory(Protocol):
|
||||
def __call__(self, *, max_datagram_size: int) -> QuicCongestionControl: ...
|
||||
|
||||
|
||||
class QuicRttMonitor:
|
||||
"""
|
||||
Roundtrip time monitor for HyStart.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._increases = 0
|
||||
self._last_time = None
|
||||
self._ready = False
|
||||
self._size = 5
|
||||
|
||||
self._filtered_min: Optional[float] = None
|
||||
|
||||
self._sample_idx = 0
|
||||
self._sample_max: Optional[float] = None
|
||||
self._sample_min: Optional[float] = None
|
||||
self._sample_time = 0.0
|
||||
self._samples = [0.0 for i in range(self._size)]
|
||||
|
||||
def add_rtt(self, *, rtt: float) -> None:
|
||||
self._samples[self._sample_idx] = rtt
|
||||
self._sample_idx += 1
|
||||
|
||||
if self._sample_idx >= self._size:
|
||||
self._sample_idx = 0
|
||||
self._ready = True
|
||||
|
||||
if self._ready:
|
||||
self._sample_max = self._samples[0]
|
||||
self._sample_min = self._samples[0]
|
||||
for sample in self._samples[1:]:
|
||||
if sample < self._sample_min:
|
||||
self._sample_min = sample
|
||||
elif sample > self._sample_max:
|
||||
self._sample_max = sample
|
||||
|
||||
def is_rtt_increasing(self, *, now: float, rtt: float) -> bool:
|
||||
if now > self._sample_time + K_GRANULARITY:
|
||||
self.add_rtt(rtt=rtt)
|
||||
self._sample_time = now
|
||||
|
||||
if self._ready:
|
||||
if self._filtered_min is None or self._filtered_min > self._sample_max:
|
||||
self._filtered_min = self._sample_max
|
||||
|
||||
delta = self._sample_min - self._filtered_min
|
||||
if delta * 4 >= self._filtered_min:
|
||||
self._increases += 1
|
||||
if self._increases >= self._size:
|
||||
return True
|
||||
elif delta > 0:
|
||||
self._increases = 0
|
||||
return False
|
||||
|
||||
|
||||
_factories: Dict[str, QuicCongestionControlFactory] = {}
|
||||
|
||||
|
||||
def create_congestion_control(
|
||||
name: str, *, max_datagram_size: int
|
||||
) -> QuicCongestionControl:
|
||||
"""
|
||||
Create an instance of the `name` congestion control algorithm.
|
||||
"""
|
||||
try:
|
||||
factory = _factories[name]
|
||||
except KeyError:
|
||||
raise Exception(f"Unknown congestion control algorithm: {name}")
|
||||
return factory(max_datagram_size=max_datagram_size)
|
||||
|
||||
|
||||
def register_congestion_control(
|
||||
name: str, factory: QuicCongestionControlFactory
|
||||
) -> None:
|
||||
"""
|
||||
Register a congestion control algorithm named `name`.
|
||||
"""
|
||||
_factories[name] = factory
|
||||
@ -0,0 +1,212 @@
|
||||
from typing import Any, Dict, Iterable
|
||||
|
||||
from ..packet_builder import QuicSentPacket
|
||||
from .base import (
|
||||
K_INITIAL_WINDOW,
|
||||
K_MINIMUM_WINDOW,
|
||||
QuicCongestionControl,
|
||||
QuicRttMonitor,
|
||||
register_congestion_control,
|
||||
)
|
||||
|
||||
# cubic specific variables (see https://www.rfc-editor.org/rfc/rfc9438.html#name-definitions)
|
||||
K_CUBIC_C = 0.4
|
||||
K_CUBIC_LOSS_REDUCTION_FACTOR = 0.7
|
||||
K_CUBIC_MAX_IDLE_TIME = 2 # reset the cwnd after 2 seconds of inactivity
|
||||
|
||||
|
||||
def better_cube_root(x: float) -> float:
|
||||
if x < 0:
|
||||
# avoid precision errors that make the cube root returns an imaginary number
|
||||
return -((-x) ** (1.0 / 3.0))
|
||||
else:
|
||||
return (x) ** (1.0 / 3.0)
|
||||
|
||||
|
||||
class CubicCongestionControl(QuicCongestionControl):
|
||||
"""
|
||||
Cubic congestion control implementation for aioquic
|
||||
"""
|
||||
|
||||
def __init__(self, max_datagram_size: int) -> None:
|
||||
super().__init__(max_datagram_size=max_datagram_size)
|
||||
# increase by one segment
|
||||
self.additive_increase_factor: int = max_datagram_size
|
||||
self._max_datagram_size: int = max_datagram_size
|
||||
self._congestion_recovery_start_time = 0.0
|
||||
|
||||
self._rtt_monitor = QuicRttMonitor()
|
||||
|
||||
self.rtt = 0.02 # starting RTT is considered to be 20ms
|
||||
|
||||
self.reset()
|
||||
|
||||
self.last_ack = 0.0
|
||||
|
||||
def W_cubic(self, t) -> int:
|
||||
W_max_segments = self._W_max / self._max_datagram_size
|
||||
target_segments = K_CUBIC_C * (t - self.K) ** 3 + (W_max_segments)
|
||||
return int(target_segments * self._max_datagram_size)
|
||||
|
||||
def is_reno_friendly(self, t) -> bool:
|
||||
return self.W_cubic(t) < self._W_est
|
||||
|
||||
def is_concave(self) -> bool:
|
||||
return self.congestion_window < self._W_max
|
||||
|
||||
def reset(self) -> None:
|
||||
self.congestion_window = K_INITIAL_WINDOW * self._max_datagram_size
|
||||
self.ssthresh = None
|
||||
|
||||
self._first_slow_start = True
|
||||
self._starting_congestion_avoidance = False
|
||||
self.K: float = 0.0
|
||||
self._W_est = 0
|
||||
self._cwnd_epoch = 0
|
||||
self._t_epoch = 0.0
|
||||
self._W_max = self.congestion_window
|
||||
|
||||
def on_packet_acked(self, *, now: float, packet: QuicSentPacket) -> None:
|
||||
self.bytes_in_flight -= packet.sent_bytes
|
||||
self.last_ack = packet.sent_time
|
||||
|
||||
if self.ssthresh is None or self.congestion_window < self.ssthresh:
|
||||
# slow start
|
||||
self.congestion_window += packet.sent_bytes
|
||||
else:
|
||||
# congestion avoidance
|
||||
if self._first_slow_start and not self._starting_congestion_avoidance:
|
||||
# exiting slow start without having a loss
|
||||
self._first_slow_start = False
|
||||
self._W_max = self.congestion_window
|
||||
self._t_epoch = now
|
||||
self._cwnd_epoch = self.congestion_window
|
||||
self._W_est = self._cwnd_epoch
|
||||
# calculate K
|
||||
W_max_segments = self._W_max / self._max_datagram_size
|
||||
cwnd_epoch_segments = self._cwnd_epoch / self._max_datagram_size
|
||||
self.K = better_cube_root(
|
||||
(W_max_segments - cwnd_epoch_segments) / K_CUBIC_C
|
||||
)
|
||||
|
||||
# initialize the variables used at start of congestion avoidance
|
||||
if self._starting_congestion_avoidance:
|
||||
self._starting_congestion_avoidance = False
|
||||
self._first_slow_start = False
|
||||
self._t_epoch = now
|
||||
self._cwnd_epoch = self.congestion_window
|
||||
self._W_est = self._cwnd_epoch
|
||||
# calculate K
|
||||
W_max_segments = self._W_max / self._max_datagram_size
|
||||
cwnd_epoch_segments = self._cwnd_epoch / self._max_datagram_size
|
||||
self.K = better_cube_root(
|
||||
(W_max_segments - cwnd_epoch_segments) / K_CUBIC_C
|
||||
)
|
||||
|
||||
self._W_est = int(
|
||||
self._W_est
|
||||
+ self.additive_increase_factor
|
||||
* (packet.sent_bytes / self.congestion_window)
|
||||
)
|
||||
|
||||
t = now - self._t_epoch
|
||||
|
||||
target: int = 0
|
||||
W_cubic = self.W_cubic(t + self.rtt)
|
||||
if W_cubic < self.congestion_window:
|
||||
target = self.congestion_window
|
||||
elif W_cubic > 1.5 * self.congestion_window:
|
||||
target = int(self.congestion_window * 1.5)
|
||||
else:
|
||||
target = W_cubic
|
||||
|
||||
if self.is_reno_friendly(t):
|
||||
# reno friendly region of cubic
|
||||
# (https://www.rfc-editor.org/rfc/rfc9438.html#name-reno-friendly-region)
|
||||
self.congestion_window = self._W_est
|
||||
elif self.is_concave():
|
||||
# concave region of cubic
|
||||
# (https://www.rfc-editor.org/rfc/rfc9438.html#name-concave-region)
|
||||
self.congestion_window = int(
|
||||
self.congestion_window
|
||||
+ (
|
||||
(target - self.congestion_window)
|
||||
* (self._max_datagram_size / self.congestion_window)
|
||||
)
|
||||
)
|
||||
else:
|
||||
# convex region of cubic
|
||||
# (https://www.rfc-editor.org/rfc/rfc9438.html#name-convex-region)
|
||||
self.congestion_window = int(
|
||||
self.congestion_window
|
||||
+ (
|
||||
(target - self.congestion_window)
|
||||
* (self._max_datagram_size / self.congestion_window)
|
||||
)
|
||||
)
|
||||
|
||||
def on_packet_sent(self, *, packet: QuicSentPacket) -> None:
|
||||
self.bytes_in_flight += packet.sent_bytes
|
||||
if self.last_ack == 0.0:
|
||||
return
|
||||
elapsed_idle = packet.sent_time - self.last_ack
|
||||
if elapsed_idle >= K_CUBIC_MAX_IDLE_TIME:
|
||||
self.reset()
|
||||
|
||||
def on_packets_expired(self, *, packets: Iterable[QuicSentPacket]) -> None:
|
||||
for packet in packets:
|
||||
self.bytes_in_flight -= packet.sent_bytes
|
||||
|
||||
def on_packets_lost(self, *, now: float, packets: Iterable[QuicSentPacket]) -> None:
|
||||
lost_largest_time = 0.0
|
||||
for packet in packets:
|
||||
self.bytes_in_flight -= packet.sent_bytes
|
||||
lost_largest_time = packet.sent_time
|
||||
|
||||
# start a new congestion event if packet was sent after the
|
||||
# start of the previous congestion recovery period.
|
||||
if lost_largest_time > self._congestion_recovery_start_time:
|
||||
self._congestion_recovery_start_time = now
|
||||
|
||||
# Normal congestion handle, can't be used in same time as fast convergence
|
||||
# self._W_max = self.congestion_window
|
||||
|
||||
# fast convergence
|
||||
if self._W_max is not None and self.congestion_window < self._W_max:
|
||||
self._W_max = int(
|
||||
self.congestion_window * (1 + K_CUBIC_LOSS_REDUCTION_FACTOR) / 2
|
||||
)
|
||||
else:
|
||||
self._W_max = self.congestion_window
|
||||
|
||||
# normal congestion MD
|
||||
flight_size = self.bytes_in_flight
|
||||
new_ssthresh = max(
|
||||
int(flight_size * K_CUBIC_LOSS_REDUCTION_FACTOR),
|
||||
K_MINIMUM_WINDOW * self._max_datagram_size,
|
||||
)
|
||||
self.ssthresh = new_ssthresh
|
||||
self.congestion_window = max(
|
||||
self.ssthresh, K_MINIMUM_WINDOW * self._max_datagram_size
|
||||
)
|
||||
|
||||
# restart a new congestion avoidance phase
|
||||
self._starting_congestion_avoidance = True
|
||||
|
||||
def on_rtt_measurement(self, *, now: float, rtt: float) -> None:
|
||||
self.rtt = rtt
|
||||
# check whether we should exit slow start
|
||||
if self.ssthresh is None and self._rtt_monitor.is_rtt_increasing(
|
||||
rtt=rtt, now=now
|
||||
):
|
||||
self.ssthresh = self.congestion_window
|
||||
|
||||
def get_log_data(self) -> Dict[str, Any]:
|
||||
data = super().get_log_data()
|
||||
|
||||
data["cubic-wmax"] = int(self._W_max)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
register_congestion_control("cubic", CubicCongestionControl)
|
||||
@ -0,0 +1,77 @@
|
||||
from typing import Iterable
|
||||
|
||||
from ..packet_builder import QuicSentPacket
|
||||
from .base import (
|
||||
K_MINIMUM_WINDOW,
|
||||
QuicCongestionControl,
|
||||
QuicRttMonitor,
|
||||
register_congestion_control,
|
||||
)
|
||||
|
||||
K_LOSS_REDUCTION_FACTOR = 0.5
|
||||
|
||||
|
||||
class RenoCongestionControl(QuicCongestionControl):
|
||||
"""
|
||||
New Reno congestion control.
|
||||
"""
|
||||
|
||||
def __init__(self, *, max_datagram_size: int) -> None:
|
||||
super().__init__(max_datagram_size=max_datagram_size)
|
||||
self._max_datagram_size = max_datagram_size
|
||||
self._congestion_recovery_start_time = 0.0
|
||||
self._congestion_stash = 0
|
||||
self._rtt_monitor = QuicRttMonitor()
|
||||
|
||||
def on_packet_acked(self, *, now: float, packet: QuicSentPacket) -> None:
|
||||
self.bytes_in_flight -= packet.sent_bytes
|
||||
|
||||
# don't increase window in congestion recovery
|
||||
if packet.sent_time <= self._congestion_recovery_start_time:
|
||||
return
|
||||
|
||||
if self.ssthresh is None or self.congestion_window < self.ssthresh:
|
||||
# slow start
|
||||
self.congestion_window += packet.sent_bytes
|
||||
else:
|
||||
# congestion avoidance
|
||||
self._congestion_stash += packet.sent_bytes
|
||||
count = self._congestion_stash // self.congestion_window
|
||||
if count:
|
||||
self._congestion_stash -= count * self.congestion_window
|
||||
self.congestion_window += count * self._max_datagram_size
|
||||
|
||||
def on_packet_sent(self, *, packet: QuicSentPacket) -> None:
|
||||
self.bytes_in_flight += packet.sent_bytes
|
||||
|
||||
def on_packets_expired(self, *, packets: Iterable[QuicSentPacket]) -> None:
|
||||
for packet in packets:
|
||||
self.bytes_in_flight -= packet.sent_bytes
|
||||
|
||||
def on_packets_lost(self, *, now: float, packets: Iterable[QuicSentPacket]) -> None:
|
||||
lost_largest_time = 0.0
|
||||
for packet in packets:
|
||||
self.bytes_in_flight -= packet.sent_bytes
|
||||
lost_largest_time = packet.sent_time
|
||||
|
||||
# start a new congestion event if packet was sent after the
|
||||
# start of the previous congestion recovery period.
|
||||
if lost_largest_time > self._congestion_recovery_start_time:
|
||||
self._congestion_recovery_start_time = now
|
||||
self.congestion_window = max(
|
||||
int(self.congestion_window * K_LOSS_REDUCTION_FACTOR),
|
||||
K_MINIMUM_WINDOW * self._max_datagram_size,
|
||||
)
|
||||
self.ssthresh = self.congestion_window
|
||||
|
||||
# TODO : collapse congestion window if persistent congestion
|
||||
|
||||
def on_rtt_measurement(self, *, now: float, rtt: float) -> None:
|
||||
# check whether we should exit slow start
|
||||
if self.ssthresh is None and self._rtt_monitor.is_rtt_increasing(
|
||||
now=now, rtt=rtt
|
||||
):
|
||||
self.ssthresh = self.congestion_window
|
||||
|
||||
|
||||
register_congestion_control("reno", RenoCongestionControl)
|
||||
3623
Code/venv/lib/python3.13/site-packages/aioquic/quic/connection.py
Normal file
3623
Code/venv/lib/python3.13/site-packages/aioquic/quic/connection.py
Normal file
File diff suppressed because it is too large
Load Diff
246
Code/venv/lib/python3.13/site-packages/aioquic/quic/crypto.py
Normal file
246
Code/venv/lib/python3.13/site-packages/aioquic/quic/crypto.py
Normal file
@ -0,0 +1,246 @@
|
||||
import binascii
|
||||
from typing import Callable, Optional, Tuple
|
||||
|
||||
from .._crypto import AEAD, CryptoError, HeaderProtection
|
||||
from ..tls import CipherSuite, cipher_suite_hash, hkdf_expand_label, hkdf_extract
|
||||
from .packet import (
|
||||
QuicProtocolVersion,
|
||||
decode_packet_number,
|
||||
is_long_header,
|
||||
)
|
||||
|
||||
CIPHER_SUITES = {
|
||||
CipherSuite.AES_128_GCM_SHA256: (b"aes-128-ecb", b"aes-128-gcm"),
|
||||
CipherSuite.AES_256_GCM_SHA384: (b"aes-256-ecb", b"aes-256-gcm"),
|
||||
CipherSuite.CHACHA20_POLY1305_SHA256: (b"chacha20", b"chacha20-poly1305"),
|
||||
}
|
||||
INITIAL_CIPHER_SUITE = CipherSuite.AES_128_GCM_SHA256
|
||||
INITIAL_SALT_VERSION_1 = binascii.unhexlify("38762cf7f55934b34d179ae6a4c80cadccbb7f0a")
|
||||
INITIAL_SALT_VERSION_2 = binascii.unhexlify("0dede3def700a6db819381be6e269dcbf9bd2ed9")
|
||||
SAMPLE_SIZE = 16
|
||||
|
||||
|
||||
Callback = Callable[[str], None]
|
||||
|
||||
|
||||
def NoCallback(trigger: str) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class KeyUnavailableError(CryptoError):
|
||||
pass
|
||||
|
||||
|
||||
def derive_key_iv_hp(
|
||||
*, cipher_suite: CipherSuite, secret: bytes, version: int
|
||||
) -> Tuple[bytes, bytes, bytes]:
|
||||
algorithm = cipher_suite_hash(cipher_suite)
|
||||
if cipher_suite in [
|
||||
CipherSuite.AES_256_GCM_SHA384,
|
||||
CipherSuite.CHACHA20_POLY1305_SHA256,
|
||||
]:
|
||||
key_size = 32
|
||||
else:
|
||||
key_size = 16
|
||||
if version == QuicProtocolVersion.VERSION_2:
|
||||
return (
|
||||
hkdf_expand_label(algorithm, secret, b"quicv2 key", b"", key_size),
|
||||
hkdf_expand_label(algorithm, secret, b"quicv2 iv", b"", 12),
|
||||
hkdf_expand_label(algorithm, secret, b"quicv2 hp", b"", key_size),
|
||||
)
|
||||
else:
|
||||
return (
|
||||
hkdf_expand_label(algorithm, secret, b"quic key", b"", key_size),
|
||||
hkdf_expand_label(algorithm, secret, b"quic iv", b"", 12),
|
||||
hkdf_expand_label(algorithm, secret, b"quic hp", b"", key_size),
|
||||
)
|
||||
|
||||
|
||||
class CryptoContext:
|
||||
def __init__(
|
||||
self,
|
||||
key_phase: int = 0,
|
||||
setup_cb: Callback = NoCallback,
|
||||
teardown_cb: Callback = NoCallback,
|
||||
) -> None:
|
||||
self.aead: Optional[AEAD] = None
|
||||
self.cipher_suite: Optional[CipherSuite] = None
|
||||
self.hp: Optional[HeaderProtection] = None
|
||||
self.key_phase = key_phase
|
||||
self.secret: Optional[bytes] = None
|
||||
self.version: Optional[int] = None
|
||||
self._setup_cb = setup_cb
|
||||
self._teardown_cb = teardown_cb
|
||||
|
||||
def decrypt_packet(
|
||||
self, packet: bytes, encrypted_offset: int, expected_packet_number: int
|
||||
) -> Tuple[bytes, bytes, int, bool]:
|
||||
if self.aead is None:
|
||||
raise KeyUnavailableError("Decryption key is not available")
|
||||
|
||||
# header protection
|
||||
plain_header, packet_number = self.hp.remove(packet, encrypted_offset)
|
||||
first_byte = plain_header[0]
|
||||
|
||||
# packet number
|
||||
pn_length = (first_byte & 0x03) + 1
|
||||
packet_number = decode_packet_number(
|
||||
packet_number, pn_length * 8, expected_packet_number
|
||||
)
|
||||
|
||||
# detect key phase change
|
||||
crypto = self
|
||||
if not is_long_header(first_byte):
|
||||
key_phase = (first_byte & 4) >> 2
|
||||
if key_phase != self.key_phase:
|
||||
crypto = next_key_phase(self)
|
||||
|
||||
# payload protection
|
||||
payload = crypto.aead.decrypt(
|
||||
packet[len(plain_header) :], plain_header, packet_number
|
||||
)
|
||||
|
||||
return plain_header, payload, packet_number, crypto != self
|
||||
|
||||
def encrypt_packet(
|
||||
self, plain_header: bytes, plain_payload: bytes, packet_number: int
|
||||
) -> bytes:
|
||||
assert self.is_valid(), "Encryption key is not available"
|
||||
|
||||
# payload protection
|
||||
protected_payload = self.aead.encrypt(
|
||||
plain_payload, plain_header, packet_number
|
||||
)
|
||||
|
||||
# header protection
|
||||
return self.hp.apply(plain_header, protected_payload)
|
||||
|
||||
def is_valid(self) -> bool:
|
||||
return self.aead is not None
|
||||
|
||||
def setup(self, *, cipher_suite: CipherSuite, secret: bytes, version: int) -> None:
|
||||
hp_cipher_name, aead_cipher_name = CIPHER_SUITES[cipher_suite]
|
||||
|
||||
key, iv, hp = derive_key_iv_hp(
|
||||
cipher_suite=cipher_suite,
|
||||
secret=secret,
|
||||
version=version,
|
||||
)
|
||||
self.aead = AEAD(aead_cipher_name, key, iv)
|
||||
self.cipher_suite = cipher_suite
|
||||
self.hp = HeaderProtection(hp_cipher_name, hp)
|
||||
self.secret = secret
|
||||
self.version = version
|
||||
|
||||
# trigger callback
|
||||
self._setup_cb("tls")
|
||||
|
||||
def teardown(self) -> None:
|
||||
self.aead = None
|
||||
self.cipher_suite = None
|
||||
self.hp = None
|
||||
self.secret = None
|
||||
|
||||
# trigger callback
|
||||
self._teardown_cb("tls")
|
||||
|
||||
|
||||
def apply_key_phase(self: CryptoContext, crypto: CryptoContext, trigger: str) -> None:
|
||||
self.aead = crypto.aead
|
||||
self.key_phase = crypto.key_phase
|
||||
self.secret = crypto.secret
|
||||
|
||||
# trigger callback
|
||||
self._setup_cb(trigger)
|
||||
|
||||
|
||||
def next_key_phase(self: CryptoContext) -> CryptoContext:
|
||||
algorithm = cipher_suite_hash(self.cipher_suite)
|
||||
|
||||
crypto = CryptoContext(key_phase=int(not self.key_phase))
|
||||
crypto.setup(
|
||||
cipher_suite=self.cipher_suite,
|
||||
secret=hkdf_expand_label(
|
||||
algorithm, self.secret, b"quic ku", b"", algorithm.digest_size
|
||||
),
|
||||
version=self.version,
|
||||
)
|
||||
return crypto
|
||||
|
||||
|
||||
class CryptoPair:
|
||||
def __init__(
|
||||
self,
|
||||
recv_setup_cb: Callback = NoCallback,
|
||||
recv_teardown_cb: Callback = NoCallback,
|
||||
send_setup_cb: Callback = NoCallback,
|
||||
send_teardown_cb: Callback = NoCallback,
|
||||
) -> None:
|
||||
self.aead_tag_size = 16
|
||||
self.recv = CryptoContext(setup_cb=recv_setup_cb, teardown_cb=recv_teardown_cb)
|
||||
self.send = CryptoContext(setup_cb=send_setup_cb, teardown_cb=send_teardown_cb)
|
||||
self._update_key_requested = False
|
||||
|
||||
def decrypt_packet(
|
||||
self, packet: bytes, encrypted_offset: int, expected_packet_number: int
|
||||
) -> Tuple[bytes, bytes, int]:
|
||||
plain_header, payload, packet_number, update_key = self.recv.decrypt_packet(
|
||||
packet, encrypted_offset, expected_packet_number
|
||||
)
|
||||
if update_key:
|
||||
self._update_key("remote_update")
|
||||
return plain_header, payload, packet_number
|
||||
|
||||
def encrypt_packet(
|
||||
self, plain_header: bytes, plain_payload: bytes, packet_number: int
|
||||
) -> bytes:
|
||||
if self._update_key_requested:
|
||||
self._update_key("local_update")
|
||||
return self.send.encrypt_packet(plain_header, plain_payload, packet_number)
|
||||
|
||||
def setup_initial(self, cid: bytes, is_client: bool, version: int) -> None:
|
||||
if is_client:
|
||||
recv_label, send_label = b"server in", b"client in"
|
||||
else:
|
||||
recv_label, send_label = b"client in", b"server in"
|
||||
|
||||
if version == QuicProtocolVersion.VERSION_2:
|
||||
initial_salt = INITIAL_SALT_VERSION_2
|
||||
else:
|
||||
initial_salt = INITIAL_SALT_VERSION_1
|
||||
|
||||
algorithm = cipher_suite_hash(INITIAL_CIPHER_SUITE)
|
||||
initial_secret = hkdf_extract(algorithm, initial_salt, cid)
|
||||
self.recv.setup(
|
||||
cipher_suite=INITIAL_CIPHER_SUITE,
|
||||
secret=hkdf_expand_label(
|
||||
algorithm, initial_secret, recv_label, b"", algorithm.digest_size
|
||||
),
|
||||
version=version,
|
||||
)
|
||||
self.send.setup(
|
||||
cipher_suite=INITIAL_CIPHER_SUITE,
|
||||
secret=hkdf_expand_label(
|
||||
algorithm, initial_secret, send_label, b"", algorithm.digest_size
|
||||
),
|
||||
version=version,
|
||||
)
|
||||
|
||||
def teardown(self) -> None:
|
||||
self.recv.teardown()
|
||||
self.send.teardown()
|
||||
|
||||
def update_key(self) -> None:
|
||||
self._update_key_requested = True
|
||||
|
||||
@property
|
||||
def key_phase(self) -> int:
|
||||
if self._update_key_requested:
|
||||
return int(not self.recv.key_phase)
|
||||
else:
|
||||
return self.recv.key_phase
|
||||
|
||||
def _update_key(self, trigger: str) -> None:
|
||||
apply_key_phase(self.recv, next_key_phase(self.recv), trigger=trigger)
|
||||
apply_key_phase(self.send, next_key_phase(self.send), trigger=trigger)
|
||||
self._update_key_requested = False
|
||||
126
Code/venv/lib/python3.13/site-packages/aioquic/quic/events.py
Normal file
126
Code/venv/lib/python3.13/site-packages/aioquic/quic/events.py
Normal file
@ -0,0 +1,126 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class QuicEvent:
|
||||
"""
|
||||
Base class for QUIC events.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConnectionIdIssued(QuicEvent):
|
||||
connection_id: bytes
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConnectionIdRetired(QuicEvent):
|
||||
connection_id: bytes
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConnectionTerminated(QuicEvent):
|
||||
"""
|
||||
The ConnectionTerminated event is fired when the QUIC connection is terminated.
|
||||
"""
|
||||
|
||||
error_code: int
|
||||
"The error code which was specified when closing the connection."
|
||||
|
||||
frame_type: Optional[int]
|
||||
"The frame type which caused the connection to be closed, or `None`."
|
||||
|
||||
reason_phrase: str
|
||||
"The human-readable reason for which the connection was closed."
|
||||
|
||||
|
||||
@dataclass
|
||||
class DatagramFrameReceived(QuicEvent):
|
||||
"""
|
||||
The DatagramFrameReceived event is fired when a DATAGRAM frame is received.
|
||||
"""
|
||||
|
||||
data: bytes
|
||||
"The data which was received."
|
||||
|
||||
|
||||
@dataclass
|
||||
class HandshakeCompleted(QuicEvent):
|
||||
"""
|
||||
The HandshakeCompleted event is fired when the TLS handshake completes.
|
||||
"""
|
||||
|
||||
alpn_protocol: Optional[str]
|
||||
"The protocol which was negotiated using ALPN, or `None`."
|
||||
|
||||
early_data_accepted: bool
|
||||
"Whether early (0-RTT) data was accepted by the remote peer."
|
||||
|
||||
session_resumed: bool
|
||||
"Whether a TLS session was resumed."
|
||||
|
||||
|
||||
@dataclass
|
||||
class PingAcknowledged(QuicEvent):
|
||||
"""
|
||||
The PingAcknowledged event is fired when a PING frame is acknowledged.
|
||||
"""
|
||||
|
||||
uid: int
|
||||
"The unique ID of the PING."
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProtocolNegotiated(QuicEvent):
|
||||
"""
|
||||
The ProtocolNegotiated event is fired when ALPN negotiation completes.
|
||||
"""
|
||||
|
||||
alpn_protocol: Optional[str]
|
||||
"The protocol which was negotiated using ALPN, or `None`."
|
||||
|
||||
|
||||
@dataclass
|
||||
class StopSendingReceived(QuicEvent):
|
||||
"""
|
||||
The StopSendingReceived event is fired when the remote peer requests
|
||||
stopping data transmission on a stream.
|
||||
"""
|
||||
|
||||
error_code: int
|
||||
"The error code that was sent from the peer."
|
||||
|
||||
stream_id: int
|
||||
"The ID of the stream that the peer requested stopping data transmission."
|
||||
|
||||
|
||||
@dataclass
|
||||
class StreamDataReceived(QuicEvent):
|
||||
"""
|
||||
The StreamDataReceived event is fired whenever data is received on a
|
||||
stream.
|
||||
"""
|
||||
|
||||
data: bytes
|
||||
"The data which was received."
|
||||
|
||||
end_stream: bool
|
||||
"Whether the STREAM frame had the FIN bit set."
|
||||
|
||||
stream_id: int
|
||||
"The ID of the stream the data was received for."
|
||||
|
||||
|
||||
@dataclass
|
||||
class StreamReset(QuicEvent):
|
||||
"""
|
||||
The StreamReset event is fired when the remote peer resets a stream.
|
||||
"""
|
||||
|
||||
error_code: int
|
||||
"The error code that triggered the reset."
|
||||
|
||||
stream_id: int
|
||||
"The ID of the stream that was reset."
|
||||
329
Code/venv/lib/python3.13/site-packages/aioquic/quic/logger.py
Normal file
329
Code/venv/lib/python3.13/site-packages/aioquic/quic/logger.py
Normal file
@ -0,0 +1,329 @@
|
||||
import binascii
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from collections import deque
|
||||
from typing import Any, Deque, Dict, List, Optional
|
||||
|
||||
from ..h3.events import Headers
|
||||
from .packet import (
|
||||
QuicFrameType,
|
||||
QuicPacketType,
|
||||
QuicStreamFrame,
|
||||
QuicTransportParameters,
|
||||
)
|
||||
from .rangeset import RangeSet
|
||||
|
||||
PACKET_TYPE_NAMES = {
|
||||
QuicPacketType.INITIAL: "initial",
|
||||
QuicPacketType.HANDSHAKE: "handshake",
|
||||
QuicPacketType.ZERO_RTT: "0RTT",
|
||||
QuicPacketType.ONE_RTT: "1RTT",
|
||||
QuicPacketType.RETRY: "retry",
|
||||
QuicPacketType.VERSION_NEGOTIATION: "version_negotiation",
|
||||
}
|
||||
QLOG_VERSION = "0.3"
|
||||
|
||||
|
||||
def hexdump(data: bytes) -> str:
|
||||
return binascii.hexlify(data).decode("ascii")
|
||||
|
||||
|
||||
class QuicLoggerTrace:
|
||||
"""
|
||||
A QUIC event trace.
|
||||
|
||||
Events are logged in the format defined by qlog.
|
||||
|
||||
See:
|
||||
- https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema-02
|
||||
- https://datatracker.ietf.org/doc/html/draft-marx-quic-qlog-quic-events
|
||||
- https://datatracker.ietf.org/doc/html/draft-marx-quic-qlog-h3-events
|
||||
"""
|
||||
|
||||
def __init__(self, *, is_client: bool, odcid: bytes) -> None:
|
||||
self._odcid = odcid
|
||||
self._events: Deque[Dict[str, Any]] = deque()
|
||||
self._vantage_point = {
|
||||
"name": "aioquic",
|
||||
"type": "client" if is_client else "server",
|
||||
}
|
||||
|
||||
# QUIC
|
||||
|
||||
def encode_ack_frame(self, ranges: RangeSet, delay: float) -> Dict:
|
||||
return {
|
||||
"ack_delay": self.encode_time(delay),
|
||||
"acked_ranges": [[x.start, x.stop - 1] for x in ranges],
|
||||
"frame_type": "ack",
|
||||
}
|
||||
|
||||
def encode_connection_close_frame(
|
||||
self, error_code: int, frame_type: Optional[int], reason_phrase: str
|
||||
) -> Dict:
|
||||
attrs = {
|
||||
"error_code": error_code,
|
||||
"error_space": "application" if frame_type is None else "transport",
|
||||
"frame_type": "connection_close",
|
||||
"raw_error_code": error_code,
|
||||
"reason": reason_phrase,
|
||||
}
|
||||
if frame_type is not None:
|
||||
attrs["trigger_frame_type"] = frame_type
|
||||
|
||||
return attrs
|
||||
|
||||
def encode_connection_limit_frame(self, frame_type: int, maximum: int) -> Dict:
|
||||
if frame_type == QuicFrameType.MAX_DATA:
|
||||
return {"frame_type": "max_data", "maximum": maximum}
|
||||
else:
|
||||
return {
|
||||
"frame_type": "max_streams",
|
||||
"maximum": maximum,
|
||||
"stream_type": "unidirectional"
|
||||
if frame_type == QuicFrameType.MAX_STREAMS_UNI
|
||||
else "bidirectional",
|
||||
}
|
||||
|
||||
def encode_crypto_frame(self, frame: QuicStreamFrame) -> Dict:
|
||||
return {
|
||||
"frame_type": "crypto",
|
||||
"length": len(frame.data),
|
||||
"offset": frame.offset,
|
||||
}
|
||||
|
||||
def encode_data_blocked_frame(self, limit: int) -> Dict:
|
||||
return {"frame_type": "data_blocked", "limit": limit}
|
||||
|
||||
def encode_datagram_frame(self, length: int) -> Dict:
|
||||
return {"frame_type": "datagram", "length": length}
|
||||
|
||||
def encode_handshake_done_frame(self) -> Dict:
|
||||
return {"frame_type": "handshake_done"}
|
||||
|
||||
def encode_max_stream_data_frame(self, maximum: int, stream_id: int) -> Dict:
|
||||
return {
|
||||
"frame_type": "max_stream_data",
|
||||
"maximum": maximum,
|
||||
"stream_id": stream_id,
|
||||
}
|
||||
|
||||
def encode_new_connection_id_frame(
|
||||
self,
|
||||
connection_id: bytes,
|
||||
retire_prior_to: int,
|
||||
sequence_number: int,
|
||||
stateless_reset_token: bytes,
|
||||
) -> Dict:
|
||||
return {
|
||||
"connection_id": hexdump(connection_id),
|
||||
"frame_type": "new_connection_id",
|
||||
"length": len(connection_id),
|
||||
"reset_token": hexdump(stateless_reset_token),
|
||||
"retire_prior_to": retire_prior_to,
|
||||
"sequence_number": sequence_number,
|
||||
}
|
||||
|
||||
def encode_new_token_frame(self, token: bytes) -> Dict:
|
||||
return {
|
||||
"frame_type": "new_token",
|
||||
"length": len(token),
|
||||
"token": hexdump(token),
|
||||
}
|
||||
|
||||
def encode_padding_frame(self) -> Dict:
|
||||
return {"frame_type": "padding"}
|
||||
|
||||
def encode_path_challenge_frame(self, data: bytes) -> Dict:
|
||||
return {"data": hexdump(data), "frame_type": "path_challenge"}
|
||||
|
||||
def encode_path_response_frame(self, data: bytes) -> Dict:
|
||||
return {"data": hexdump(data), "frame_type": "path_response"}
|
||||
|
||||
def encode_ping_frame(self) -> Dict:
|
||||
return {"frame_type": "ping"}
|
||||
|
||||
def encode_reset_stream_frame(
|
||||
self, error_code: int, final_size: int, stream_id: int
|
||||
) -> Dict:
|
||||
return {
|
||||
"error_code": error_code,
|
||||
"final_size": final_size,
|
||||
"frame_type": "reset_stream",
|
||||
"stream_id": stream_id,
|
||||
}
|
||||
|
||||
def encode_retire_connection_id_frame(self, sequence_number: int) -> Dict:
|
||||
return {
|
||||
"frame_type": "retire_connection_id",
|
||||
"sequence_number": sequence_number,
|
||||
}
|
||||
|
||||
def encode_stream_data_blocked_frame(self, limit: int, stream_id: int) -> Dict:
|
||||
return {
|
||||
"frame_type": "stream_data_blocked",
|
||||
"limit": limit,
|
||||
"stream_id": stream_id,
|
||||
}
|
||||
|
||||
def encode_stop_sending_frame(self, error_code: int, stream_id: int) -> Dict:
|
||||
return {
|
||||
"frame_type": "stop_sending",
|
||||
"error_code": error_code,
|
||||
"stream_id": stream_id,
|
||||
}
|
||||
|
||||
def encode_stream_frame(self, frame: QuicStreamFrame, stream_id: int) -> Dict:
|
||||
return {
|
||||
"fin": frame.fin,
|
||||
"frame_type": "stream",
|
||||
"length": len(frame.data),
|
||||
"offset": frame.offset,
|
||||
"stream_id": stream_id,
|
||||
}
|
||||
|
||||
def encode_streams_blocked_frame(self, is_unidirectional: bool, limit: int) -> Dict:
|
||||
return {
|
||||
"frame_type": "streams_blocked",
|
||||
"limit": limit,
|
||||
"stream_type": "unidirectional" if is_unidirectional else "bidirectional",
|
||||
}
|
||||
|
||||
def encode_time(self, seconds: float) -> float:
|
||||
"""
|
||||
Convert a time to milliseconds.
|
||||
"""
|
||||
return seconds * 1000
|
||||
|
||||
def encode_transport_parameters(
|
||||
self, owner: str, parameters: QuicTransportParameters
|
||||
) -> Dict[str, Any]:
|
||||
data: Dict[str, Any] = {"owner": owner}
|
||||
for param_name, param_value in parameters.__dict__.items():
|
||||
if isinstance(param_value, bool):
|
||||
data[param_name] = param_value
|
||||
elif isinstance(param_value, bytes):
|
||||
data[param_name] = hexdump(param_value)
|
||||
elif isinstance(param_value, int):
|
||||
data[param_name] = param_value
|
||||
return data
|
||||
|
||||
def packet_type(self, packet_type: QuicPacketType) -> str:
|
||||
return PACKET_TYPE_NAMES[packet_type]
|
||||
|
||||
# HTTP/3
|
||||
|
||||
def encode_http3_data_frame(self, length: int, stream_id: int) -> Dict:
|
||||
return {
|
||||
"frame": {"frame_type": "data"},
|
||||
"length": length,
|
||||
"stream_id": stream_id,
|
||||
}
|
||||
|
||||
def encode_http3_headers_frame(
|
||||
self, length: int, headers: Headers, stream_id: int
|
||||
) -> Dict:
|
||||
return {
|
||||
"frame": {
|
||||
"frame_type": "headers",
|
||||
"headers": self._encode_http3_headers(headers),
|
||||
},
|
||||
"length": length,
|
||||
"stream_id": stream_id,
|
||||
}
|
||||
|
||||
def encode_http3_push_promise_frame(
|
||||
self, length: int, headers: Headers, push_id: int, stream_id: int
|
||||
) -> Dict:
|
||||
return {
|
||||
"frame": {
|
||||
"frame_type": "push_promise",
|
||||
"headers": self._encode_http3_headers(headers),
|
||||
"push_id": push_id,
|
||||
},
|
||||
"length": length,
|
||||
"stream_id": stream_id,
|
||||
}
|
||||
|
||||
def _encode_http3_headers(self, headers: Headers) -> List[Dict]:
|
||||
return [
|
||||
{"name": h[0].decode("utf8"), "value": h[1].decode("utf8")} for h in headers
|
||||
]
|
||||
|
||||
# CORE
|
||||
|
||||
def log_event(self, *, category: str, event: str, data: Dict) -> None:
|
||||
self._events.append(
|
||||
{
|
||||
"data": data,
|
||||
"name": category + ":" + event,
|
||||
"time": self.encode_time(time.time()),
|
||||
}
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Return the trace as a dictionary which can be written as JSON.
|
||||
"""
|
||||
return {
|
||||
"common_fields": {
|
||||
"ODCID": hexdump(self._odcid),
|
||||
},
|
||||
"events": list(self._events),
|
||||
"vantage_point": self._vantage_point,
|
||||
}
|
||||
|
||||
|
||||
class QuicLogger:
|
||||
"""
|
||||
A QUIC event logger which stores traces in memory.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._traces: List[QuicLoggerTrace] = []
|
||||
|
||||
def start_trace(self, is_client: bool, odcid: bytes) -> QuicLoggerTrace:
|
||||
trace = QuicLoggerTrace(is_client=is_client, odcid=odcid)
|
||||
self._traces.append(trace)
|
||||
return trace
|
||||
|
||||
def end_trace(self, trace: QuicLoggerTrace) -> None:
|
||||
assert trace in self._traces, "QuicLoggerTrace does not belong to QuicLogger"
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Return the traces as a dictionary which can be written as JSON.
|
||||
"""
|
||||
return {
|
||||
"qlog_format": "JSON",
|
||||
"qlog_version": QLOG_VERSION,
|
||||
"traces": [trace.to_dict() for trace in self._traces],
|
||||
}
|
||||
|
||||
|
||||
class QuicFileLogger(QuicLogger):
|
||||
"""
|
||||
A QUIC event logger which writes one trace per file.
|
||||
"""
|
||||
|
||||
def __init__(self, path: str) -> None:
|
||||
if not os.path.isdir(path):
|
||||
raise ValueError("QUIC log output directory '%s' does not exist" % path)
|
||||
self.path = path
|
||||
super().__init__()
|
||||
|
||||
def end_trace(self, trace: QuicLoggerTrace) -> None:
|
||||
trace_dict = trace.to_dict()
|
||||
trace_path = os.path.join(
|
||||
self.path, trace_dict["common_fields"]["ODCID"] + ".qlog"
|
||||
)
|
||||
with open(trace_path, "w") as logger_fp:
|
||||
json.dump(
|
||||
{
|
||||
"qlog_format": "JSON",
|
||||
"qlog_version": QLOG_VERSION,
|
||||
"traces": [trace_dict],
|
||||
},
|
||||
logger_fp,
|
||||
)
|
||||
self._traces.remove(trace)
|
||||
640
Code/venv/lib/python3.13/site-packages/aioquic/quic/packet.py
Normal file
640
Code/venv/lib/python3.13/site-packages/aioquic/quic/packet.py
Normal file
@ -0,0 +1,640 @@
|
||||
import binascii
|
||||
import ipaddress
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum, IntEnum
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
||||
|
||||
from ..buffer import Buffer
|
||||
from .rangeset import RangeSet
|
||||
|
||||
PACKET_LONG_HEADER = 0x80
|
||||
PACKET_FIXED_BIT = 0x40
|
||||
PACKET_SPIN_BIT = 0x20
|
||||
|
||||
CONNECTION_ID_MAX_SIZE = 20
|
||||
PACKET_NUMBER_MAX_SIZE = 4
|
||||
RETRY_AEAD_KEY_VERSION_1 = binascii.unhexlify("be0c690b9f66575a1d766b54e368c84e")
|
||||
RETRY_AEAD_KEY_VERSION_2 = binascii.unhexlify("8fb4b01b56ac48e260fbcbcead7ccc92")
|
||||
RETRY_AEAD_NONCE_VERSION_1 = binascii.unhexlify("461599d35d632bf2239825bb")
|
||||
RETRY_AEAD_NONCE_VERSION_2 = binascii.unhexlify("d86969bc2d7c6d9990efb04a")
|
||||
RETRY_INTEGRITY_TAG_SIZE = 16
|
||||
STATELESS_RESET_TOKEN_SIZE = 16
|
||||
|
||||
|
||||
class QuicErrorCode(IntEnum):
|
||||
NO_ERROR = 0x0
|
||||
INTERNAL_ERROR = 0x1
|
||||
CONNECTION_REFUSED = 0x2
|
||||
FLOW_CONTROL_ERROR = 0x3
|
||||
STREAM_LIMIT_ERROR = 0x4
|
||||
STREAM_STATE_ERROR = 0x5
|
||||
FINAL_SIZE_ERROR = 0x6
|
||||
FRAME_ENCODING_ERROR = 0x7
|
||||
TRANSPORT_PARAMETER_ERROR = 0x8
|
||||
CONNECTION_ID_LIMIT_ERROR = 0x9
|
||||
PROTOCOL_VIOLATION = 0xA
|
||||
INVALID_TOKEN = 0xB
|
||||
APPLICATION_ERROR = 0xC
|
||||
CRYPTO_BUFFER_EXCEEDED = 0xD
|
||||
KEY_UPDATE_ERROR = 0xE
|
||||
AEAD_LIMIT_REACHED = 0xF
|
||||
VERSION_NEGOTIATION_ERROR = 0x11
|
||||
CRYPTO_ERROR = 0x100
|
||||
|
||||
|
||||
class QuicPacketType(Enum):
|
||||
INITIAL = 0
|
||||
ZERO_RTT = 1
|
||||
HANDSHAKE = 2
|
||||
RETRY = 3
|
||||
VERSION_NEGOTIATION = 4
|
||||
ONE_RTT = 5
|
||||
|
||||
|
||||
# For backwards compatibility only, use `QuicPacketType` in new code.
|
||||
PACKET_TYPE_INITIAL = QuicPacketType.INITIAL
|
||||
|
||||
# QUIC version 1
|
||||
# https://datatracker.ietf.org/doc/html/rfc9000#section-17.2
|
||||
PACKET_LONG_TYPE_ENCODE_VERSION_1 = {
|
||||
QuicPacketType.INITIAL: 0,
|
||||
QuicPacketType.ZERO_RTT: 1,
|
||||
QuicPacketType.HANDSHAKE: 2,
|
||||
QuicPacketType.RETRY: 3,
|
||||
}
|
||||
PACKET_LONG_TYPE_DECODE_VERSION_1 = dict(
|
||||
(v, i) for (i, v) in PACKET_LONG_TYPE_ENCODE_VERSION_1.items()
|
||||
)
|
||||
|
||||
# QUIC version 2
|
||||
# https://datatracker.ietf.org/doc/html/rfc9369#section-3.2
|
||||
PACKET_LONG_TYPE_ENCODE_VERSION_2 = {
|
||||
QuicPacketType.INITIAL: 1,
|
||||
QuicPacketType.ZERO_RTT: 2,
|
||||
QuicPacketType.HANDSHAKE: 3,
|
||||
QuicPacketType.RETRY: 0,
|
||||
}
|
||||
PACKET_LONG_TYPE_DECODE_VERSION_2 = dict(
|
||||
(v, i) for (i, v) in PACKET_LONG_TYPE_ENCODE_VERSION_2.items()
|
||||
)
|
||||
|
||||
|
||||
class QuicProtocolVersion(IntEnum):
|
||||
NEGOTIATION = 0
|
||||
VERSION_1 = 0x00000001
|
||||
VERSION_2 = 0x6B3343CF
|
||||
|
||||
|
||||
@dataclass
|
||||
class QuicHeader:
|
||||
version: Optional[int]
|
||||
"The protocol version. Only present in long header packets."
|
||||
|
||||
packet_type: QuicPacketType
|
||||
"The type of the packet."
|
||||
|
||||
packet_length: int
|
||||
"The total length of the packet, in bytes."
|
||||
|
||||
destination_cid: bytes
|
||||
"The destination connection ID."
|
||||
|
||||
source_cid: bytes
|
||||
"The destination connection ID."
|
||||
|
||||
token: bytes
|
||||
"The address verification token. Only present in `INITIAL` and `RETRY` packets."
|
||||
|
||||
integrity_tag: bytes
|
||||
"The retry integrity tag. Only present in `RETRY` packets."
|
||||
|
||||
supported_versions: List[int]
|
||||
"Supported protocol versions. Only present in `VERSION_NEGOTIATION` packets."
|
||||
|
||||
|
||||
def decode_packet_number(truncated: int, num_bits: int, expected: int) -> int:
|
||||
"""
|
||||
Recover a packet number from a truncated packet number.
|
||||
|
||||
See: Appendix A - Sample Packet Number Decoding Algorithm
|
||||
"""
|
||||
window = 1 << num_bits
|
||||
half_window = window // 2
|
||||
candidate = (expected & ~(window - 1)) | truncated
|
||||
if candidate <= expected - half_window and candidate < (1 << 62) - window:
|
||||
return candidate + window
|
||||
elif candidate > expected + half_window and candidate >= window:
|
||||
return candidate - window
|
||||
else:
|
||||
return candidate
|
||||
|
||||
|
||||
def get_retry_integrity_tag(
|
||||
packet_without_tag: bytes, original_destination_cid: bytes, version: int
|
||||
) -> bytes:
|
||||
"""
|
||||
Calculate the integrity tag for a RETRY packet.
|
||||
"""
|
||||
# build Retry pseudo packet
|
||||
buf = Buffer(capacity=1 + len(original_destination_cid) + len(packet_without_tag))
|
||||
buf.push_uint8(len(original_destination_cid))
|
||||
buf.push_bytes(original_destination_cid)
|
||||
buf.push_bytes(packet_without_tag)
|
||||
assert buf.eof()
|
||||
|
||||
if version == QuicProtocolVersion.VERSION_2:
|
||||
aead_key = RETRY_AEAD_KEY_VERSION_2
|
||||
aead_nonce = RETRY_AEAD_NONCE_VERSION_2
|
||||
else:
|
||||
aead_key = RETRY_AEAD_KEY_VERSION_1
|
||||
aead_nonce = RETRY_AEAD_NONCE_VERSION_1
|
||||
|
||||
# run AES-128-GCM
|
||||
aead = AESGCM(aead_key)
|
||||
integrity_tag = aead.encrypt(aead_nonce, b"", buf.data)
|
||||
assert len(integrity_tag) == RETRY_INTEGRITY_TAG_SIZE
|
||||
return integrity_tag
|
||||
|
||||
|
||||
def get_spin_bit(first_byte: int) -> bool:
|
||||
return bool(first_byte & PACKET_SPIN_BIT)
|
||||
|
||||
|
||||
def is_long_header(first_byte: int) -> bool:
|
||||
return bool(first_byte & PACKET_LONG_HEADER)
|
||||
|
||||
|
||||
def pretty_protocol_version(version: int) -> str:
|
||||
"""
|
||||
Return a user-friendly representation of a protocol version.
|
||||
"""
|
||||
try:
|
||||
version_name = QuicProtocolVersion(version).name
|
||||
except ValueError:
|
||||
version_name = "UNKNOWN"
|
||||
return f"0x{version:08x} ({version_name})"
|
||||
|
||||
|
||||
def pull_quic_header(buf: Buffer, host_cid_length: Optional[int] = None) -> QuicHeader:
|
||||
packet_start = buf.tell()
|
||||
|
||||
version = None
|
||||
integrity_tag = b""
|
||||
supported_versions = []
|
||||
token = b""
|
||||
|
||||
first_byte = buf.pull_uint8()
|
||||
if is_long_header(first_byte):
|
||||
# Long Header Packets.
|
||||
# https://datatracker.ietf.org/doc/html/rfc9000#section-17.2
|
||||
version = buf.pull_uint32()
|
||||
|
||||
destination_cid_length = buf.pull_uint8()
|
||||
if destination_cid_length > CONNECTION_ID_MAX_SIZE:
|
||||
raise ValueError(
|
||||
"Destination CID is too long (%d bytes)" % destination_cid_length
|
||||
)
|
||||
destination_cid = buf.pull_bytes(destination_cid_length)
|
||||
|
||||
source_cid_length = buf.pull_uint8()
|
||||
if source_cid_length > CONNECTION_ID_MAX_SIZE:
|
||||
raise ValueError("Source CID is too long (%d bytes)" % source_cid_length)
|
||||
source_cid = buf.pull_bytes(source_cid_length)
|
||||
|
||||
if version == QuicProtocolVersion.NEGOTIATION:
|
||||
# Version Negotiation Packet.
|
||||
# https://datatracker.ietf.org/doc/html/rfc9000#section-17.2.1
|
||||
packet_type = QuicPacketType.VERSION_NEGOTIATION
|
||||
while not buf.eof():
|
||||
supported_versions.append(buf.pull_uint32())
|
||||
packet_end = buf.tell()
|
||||
else:
|
||||
if not (first_byte & PACKET_FIXED_BIT):
|
||||
raise ValueError("Packet fixed bit is zero")
|
||||
|
||||
if version == QuicProtocolVersion.VERSION_2:
|
||||
packet_type = PACKET_LONG_TYPE_DECODE_VERSION_2[
|
||||
(first_byte & 0x30) >> 4
|
||||
]
|
||||
else:
|
||||
packet_type = PACKET_LONG_TYPE_DECODE_VERSION_1[
|
||||
(first_byte & 0x30) >> 4
|
||||
]
|
||||
|
||||
if packet_type == QuicPacketType.INITIAL:
|
||||
token_length = buf.pull_uint_var()
|
||||
token = buf.pull_bytes(token_length)
|
||||
rest_length = buf.pull_uint_var()
|
||||
elif packet_type == QuicPacketType.ZERO_RTT:
|
||||
rest_length = buf.pull_uint_var()
|
||||
elif packet_type == QuicPacketType.HANDSHAKE:
|
||||
rest_length = buf.pull_uint_var()
|
||||
else:
|
||||
token_length = buf.capacity - buf.tell() - RETRY_INTEGRITY_TAG_SIZE
|
||||
token = buf.pull_bytes(token_length)
|
||||
integrity_tag = buf.pull_bytes(RETRY_INTEGRITY_TAG_SIZE)
|
||||
rest_length = 0
|
||||
|
||||
# Check remainder length.
|
||||
packet_end = buf.tell() + rest_length
|
||||
if packet_end > buf.capacity:
|
||||
raise ValueError("Packet payload is truncated")
|
||||
|
||||
else:
|
||||
# Short Header Packets.
|
||||
# https://datatracker.ietf.org/doc/html/rfc9000#section-17.3
|
||||
if not (first_byte & PACKET_FIXED_BIT):
|
||||
raise ValueError("Packet fixed bit is zero")
|
||||
|
||||
version = None
|
||||
packet_type = QuicPacketType.ONE_RTT
|
||||
destination_cid = buf.pull_bytes(host_cid_length)
|
||||
source_cid = b""
|
||||
packet_end = buf.capacity
|
||||
|
||||
return QuicHeader(
|
||||
version=version,
|
||||
packet_type=packet_type,
|
||||
packet_length=packet_end - packet_start,
|
||||
destination_cid=destination_cid,
|
||||
source_cid=source_cid,
|
||||
token=token,
|
||||
integrity_tag=integrity_tag,
|
||||
supported_versions=supported_versions,
|
||||
)
|
||||
|
||||
|
||||
def encode_long_header_first_byte(
|
||||
version: int, packet_type: QuicPacketType, bits: int
|
||||
) -> int:
|
||||
"""
|
||||
Encode the first byte of a long header packet.
|
||||
"""
|
||||
if version == QuicProtocolVersion.VERSION_2:
|
||||
long_type_encode = PACKET_LONG_TYPE_ENCODE_VERSION_2
|
||||
else:
|
||||
long_type_encode = PACKET_LONG_TYPE_ENCODE_VERSION_1
|
||||
return (
|
||||
PACKET_LONG_HEADER
|
||||
| PACKET_FIXED_BIT
|
||||
| long_type_encode[packet_type] << 4
|
||||
| bits
|
||||
)
|
||||
|
||||
|
||||
def encode_quic_retry(
|
||||
version: int,
|
||||
source_cid: bytes,
|
||||
destination_cid: bytes,
|
||||
original_destination_cid: bytes,
|
||||
retry_token: bytes,
|
||||
unused: int = 0,
|
||||
) -> bytes:
|
||||
buf = Buffer(
|
||||
capacity=7
|
||||
+ len(destination_cid)
|
||||
+ len(source_cid)
|
||||
+ len(retry_token)
|
||||
+ RETRY_INTEGRITY_TAG_SIZE
|
||||
)
|
||||
buf.push_uint8(encode_long_header_first_byte(version, QuicPacketType.RETRY, unused))
|
||||
buf.push_uint32(version)
|
||||
buf.push_uint8(len(destination_cid))
|
||||
buf.push_bytes(destination_cid)
|
||||
buf.push_uint8(len(source_cid))
|
||||
buf.push_bytes(source_cid)
|
||||
buf.push_bytes(retry_token)
|
||||
buf.push_bytes(
|
||||
get_retry_integrity_tag(buf.data, original_destination_cid, version=version)
|
||||
)
|
||||
assert buf.eof()
|
||||
return buf.data
|
||||
|
||||
|
||||
def encode_quic_version_negotiation(
|
||||
source_cid: bytes, destination_cid: bytes, supported_versions: List[int]
|
||||
) -> bytes:
|
||||
buf = Buffer(
|
||||
capacity=7
|
||||
+ len(destination_cid)
|
||||
+ len(source_cid)
|
||||
+ 4 * len(supported_versions)
|
||||
)
|
||||
buf.push_uint8(os.urandom(1)[0] | PACKET_LONG_HEADER)
|
||||
buf.push_uint32(QuicProtocolVersion.NEGOTIATION)
|
||||
buf.push_uint8(len(destination_cid))
|
||||
buf.push_bytes(destination_cid)
|
||||
buf.push_uint8(len(source_cid))
|
||||
buf.push_bytes(source_cid)
|
||||
for version in supported_versions:
|
||||
buf.push_uint32(version)
|
||||
return buf.data
|
||||
|
||||
|
||||
# TLS EXTENSION
|
||||
|
||||
|
||||
@dataclass
|
||||
class QuicPreferredAddress:
|
||||
ipv4_address: Optional[Tuple[str, int]]
|
||||
ipv6_address: Optional[Tuple[str, int]]
|
||||
connection_id: bytes
|
||||
stateless_reset_token: bytes
|
||||
|
||||
|
||||
@dataclass
|
||||
class QuicVersionInformation:
|
||||
chosen_version: int
|
||||
available_versions: List[int]
|
||||
|
||||
|
||||
@dataclass
|
||||
class QuicTransportParameters:
|
||||
original_destination_connection_id: Optional[bytes] = None
|
||||
max_idle_timeout: Optional[int] = None
|
||||
stateless_reset_token: Optional[bytes] = None
|
||||
max_udp_payload_size: Optional[int] = None
|
||||
initial_max_data: Optional[int] = None
|
||||
initial_max_stream_data_bidi_local: Optional[int] = None
|
||||
initial_max_stream_data_bidi_remote: Optional[int] = None
|
||||
initial_max_stream_data_uni: Optional[int] = None
|
||||
initial_max_streams_bidi: Optional[int] = None
|
||||
initial_max_streams_uni: Optional[int] = None
|
||||
ack_delay_exponent: Optional[int] = None
|
||||
max_ack_delay: Optional[int] = None
|
||||
disable_active_migration: Optional[bool] = False
|
||||
preferred_address: Optional[QuicPreferredAddress] = None
|
||||
active_connection_id_limit: Optional[int] = None
|
||||
initial_source_connection_id: Optional[bytes] = None
|
||||
retry_source_connection_id: Optional[bytes] = None
|
||||
version_information: Optional[QuicVersionInformation] = None
|
||||
max_datagram_frame_size: Optional[int] = None
|
||||
quantum_readiness: Optional[bytes] = None
|
||||
|
||||
|
||||
PARAMS = {
|
||||
0x00: ("original_destination_connection_id", bytes),
|
||||
0x01: ("max_idle_timeout", int),
|
||||
0x02: ("stateless_reset_token", bytes),
|
||||
0x03: ("max_udp_payload_size", int),
|
||||
0x04: ("initial_max_data", int),
|
||||
0x05: ("initial_max_stream_data_bidi_local", int),
|
||||
0x06: ("initial_max_stream_data_bidi_remote", int),
|
||||
0x07: ("initial_max_stream_data_uni", int),
|
||||
0x08: ("initial_max_streams_bidi", int),
|
||||
0x09: ("initial_max_streams_uni", int),
|
||||
0x0A: ("ack_delay_exponent", int),
|
||||
0x0B: ("max_ack_delay", int),
|
||||
0x0C: ("disable_active_migration", bool),
|
||||
0x0D: ("preferred_address", QuicPreferredAddress),
|
||||
0x0E: ("active_connection_id_limit", int),
|
||||
0x0F: ("initial_source_connection_id", bytes),
|
||||
0x10: ("retry_source_connection_id", bytes),
|
||||
# https://datatracker.ietf.org/doc/html/rfc9368#section-3
|
||||
0x11: ("version_information", QuicVersionInformation),
|
||||
# extensions
|
||||
0x0020: ("max_datagram_frame_size", int),
|
||||
0x0C37: ("quantum_readiness", bytes),
|
||||
}
|
||||
|
||||
|
||||
def pull_quic_preferred_address(buf: Buffer) -> QuicPreferredAddress:
|
||||
ipv4_address = None
|
||||
ipv4_host = buf.pull_bytes(4)
|
||||
ipv4_port = buf.pull_uint16()
|
||||
if ipv4_host != bytes(4):
|
||||
ipv4_address = (str(ipaddress.IPv4Address(ipv4_host)), ipv4_port)
|
||||
|
||||
ipv6_address = None
|
||||
ipv6_host = buf.pull_bytes(16)
|
||||
ipv6_port = buf.pull_uint16()
|
||||
if ipv6_host != bytes(16):
|
||||
ipv6_address = (str(ipaddress.IPv6Address(ipv6_host)), ipv6_port)
|
||||
|
||||
connection_id_length = buf.pull_uint8()
|
||||
connection_id = buf.pull_bytes(connection_id_length)
|
||||
stateless_reset_token = buf.pull_bytes(16)
|
||||
|
||||
return QuicPreferredAddress(
|
||||
ipv4_address=ipv4_address,
|
||||
ipv6_address=ipv6_address,
|
||||
connection_id=connection_id,
|
||||
stateless_reset_token=stateless_reset_token,
|
||||
)
|
||||
|
||||
|
||||
def push_quic_preferred_address(
|
||||
buf: Buffer, preferred_address: QuicPreferredAddress
|
||||
) -> None:
|
||||
if preferred_address.ipv4_address is not None:
|
||||
buf.push_bytes(ipaddress.IPv4Address(preferred_address.ipv4_address[0]).packed)
|
||||
buf.push_uint16(preferred_address.ipv4_address[1])
|
||||
else:
|
||||
buf.push_bytes(bytes(6))
|
||||
|
||||
if preferred_address.ipv6_address is not None:
|
||||
buf.push_bytes(ipaddress.IPv6Address(preferred_address.ipv6_address[0]).packed)
|
||||
buf.push_uint16(preferred_address.ipv6_address[1])
|
||||
else:
|
||||
buf.push_bytes(bytes(18))
|
||||
|
||||
buf.push_uint8(len(preferred_address.connection_id))
|
||||
buf.push_bytes(preferred_address.connection_id)
|
||||
buf.push_bytes(preferred_address.stateless_reset_token)
|
||||
|
||||
|
||||
def pull_quic_version_information(buf: Buffer, length: int) -> QuicVersionInformation:
|
||||
chosen_version = buf.pull_uint32()
|
||||
available_versions = []
|
||||
for i in range(length // 4 - 1):
|
||||
available_versions.append(buf.pull_uint32())
|
||||
|
||||
# If an endpoint receives a Chosen Version equal to zero, or any Available Version
|
||||
# equal to zero, it MUST treat it as a parsing failure.
|
||||
#
|
||||
# https://datatracker.ietf.org/doc/html/rfc9368#section-4
|
||||
if chosen_version == 0 or 0 in available_versions:
|
||||
raise ValueError("Version Information must not contain version 0")
|
||||
|
||||
return QuicVersionInformation(
|
||||
chosen_version=chosen_version,
|
||||
available_versions=available_versions,
|
||||
)
|
||||
|
||||
|
||||
def push_quic_version_information(
|
||||
buf: Buffer, version_information: QuicVersionInformation
|
||||
) -> None:
|
||||
buf.push_uint32(version_information.chosen_version)
|
||||
for version in version_information.available_versions:
|
||||
buf.push_uint32(version)
|
||||
|
||||
|
||||
def pull_quic_transport_parameters(buf: Buffer) -> QuicTransportParameters:
|
||||
params = QuicTransportParameters()
|
||||
while not buf.eof():
|
||||
param_id = buf.pull_uint_var()
|
||||
param_len = buf.pull_uint_var()
|
||||
param_start = buf.tell()
|
||||
if param_id in PARAMS:
|
||||
# Parse known parameter.
|
||||
param_name, param_type = PARAMS[param_id]
|
||||
if param_type is int:
|
||||
setattr(params, param_name, buf.pull_uint_var())
|
||||
elif param_type is bytes:
|
||||
setattr(params, param_name, buf.pull_bytes(param_len))
|
||||
elif param_type is QuicPreferredAddress:
|
||||
setattr(params, param_name, pull_quic_preferred_address(buf))
|
||||
elif param_type is QuicVersionInformation:
|
||||
setattr(
|
||||
params,
|
||||
param_name,
|
||||
pull_quic_version_information(buf, param_len),
|
||||
)
|
||||
else:
|
||||
setattr(params, param_name, True)
|
||||
else:
|
||||
# Skip unknown parameter.
|
||||
buf.pull_bytes(param_len)
|
||||
|
||||
if buf.tell() != param_start + param_len:
|
||||
raise ValueError("Transport parameter length does not match")
|
||||
|
||||
return params
|
||||
|
||||
|
||||
def push_quic_transport_parameters(
|
||||
buf: Buffer, params: QuicTransportParameters
|
||||
) -> None:
|
||||
for param_id, (param_name, param_type) in PARAMS.items():
|
||||
param_value = getattr(params, param_name)
|
||||
if param_value is not None and param_value is not False:
|
||||
param_buf = Buffer(capacity=65536)
|
||||
if param_type is int:
|
||||
param_buf.push_uint_var(param_value)
|
||||
elif param_type is bytes:
|
||||
param_buf.push_bytes(param_value)
|
||||
elif param_type is QuicPreferredAddress:
|
||||
push_quic_preferred_address(param_buf, param_value)
|
||||
elif param_type is QuicVersionInformation:
|
||||
push_quic_version_information(param_buf, param_value)
|
||||
buf.push_uint_var(param_id)
|
||||
buf.push_uint_var(param_buf.tell())
|
||||
buf.push_bytes(param_buf.data)
|
||||
|
||||
|
||||
# FRAMES
|
||||
|
||||
|
||||
class QuicFrameType(IntEnum):
|
||||
PADDING = 0x00
|
||||
PING = 0x01
|
||||
ACK = 0x02
|
||||
ACK_ECN = 0x03
|
||||
RESET_STREAM = 0x04
|
||||
STOP_SENDING = 0x05
|
||||
CRYPTO = 0x06
|
||||
NEW_TOKEN = 0x07
|
||||
STREAM_BASE = 0x08
|
||||
MAX_DATA = 0x10
|
||||
MAX_STREAM_DATA = 0x11
|
||||
MAX_STREAMS_BIDI = 0x12
|
||||
MAX_STREAMS_UNI = 0x13
|
||||
DATA_BLOCKED = 0x14
|
||||
STREAM_DATA_BLOCKED = 0x15
|
||||
STREAMS_BLOCKED_BIDI = 0x16
|
||||
STREAMS_BLOCKED_UNI = 0x17
|
||||
NEW_CONNECTION_ID = 0x18
|
||||
RETIRE_CONNECTION_ID = 0x19
|
||||
PATH_CHALLENGE = 0x1A
|
||||
PATH_RESPONSE = 0x1B
|
||||
TRANSPORT_CLOSE = 0x1C
|
||||
APPLICATION_CLOSE = 0x1D
|
||||
HANDSHAKE_DONE = 0x1E
|
||||
DATAGRAM = 0x30
|
||||
DATAGRAM_WITH_LENGTH = 0x31
|
||||
|
||||
|
||||
NON_ACK_ELICITING_FRAME_TYPES = frozenset(
|
||||
[
|
||||
QuicFrameType.ACK,
|
||||
QuicFrameType.ACK_ECN,
|
||||
QuicFrameType.PADDING,
|
||||
QuicFrameType.TRANSPORT_CLOSE,
|
||||
QuicFrameType.APPLICATION_CLOSE,
|
||||
]
|
||||
)
|
||||
NON_IN_FLIGHT_FRAME_TYPES = frozenset(
|
||||
[
|
||||
QuicFrameType.ACK,
|
||||
QuicFrameType.ACK_ECN,
|
||||
QuicFrameType.TRANSPORT_CLOSE,
|
||||
QuicFrameType.APPLICATION_CLOSE,
|
||||
]
|
||||
)
|
||||
|
||||
PROBING_FRAME_TYPES = frozenset(
|
||||
[
|
||||
QuicFrameType.PATH_CHALLENGE,
|
||||
QuicFrameType.PATH_RESPONSE,
|
||||
QuicFrameType.PADDING,
|
||||
QuicFrameType.NEW_CONNECTION_ID,
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class QuicResetStreamFrame:
|
||||
error_code: int
|
||||
final_size: int
|
||||
stream_id: int
|
||||
|
||||
|
||||
@dataclass
|
||||
class QuicStopSendingFrame:
|
||||
error_code: int
|
||||
stream_id: int
|
||||
|
||||
|
||||
@dataclass
|
||||
class QuicStreamFrame:
|
||||
data: bytes = b""
|
||||
fin: bool = False
|
||||
offset: int = 0
|
||||
|
||||
|
||||
def pull_ack_frame(buf: Buffer) -> Tuple[RangeSet, int]:
|
||||
rangeset = RangeSet()
|
||||
end = buf.pull_uint_var() # largest acknowledged
|
||||
delay = buf.pull_uint_var()
|
||||
ack_range_count = buf.pull_uint_var()
|
||||
ack_count = buf.pull_uint_var() # first ack range
|
||||
rangeset.add(end - ack_count, end + 1)
|
||||
end -= ack_count
|
||||
for _ in range(ack_range_count):
|
||||
end -= buf.pull_uint_var() + 2
|
||||
ack_count = buf.pull_uint_var()
|
||||
rangeset.add(end - ack_count, end + 1)
|
||||
end -= ack_count
|
||||
return rangeset, delay
|
||||
|
||||
|
||||
def push_ack_frame(buf: Buffer, rangeset: RangeSet, delay: int) -> int:
|
||||
ranges = len(rangeset)
|
||||
index = ranges - 1
|
||||
r = rangeset[index]
|
||||
buf.push_uint_var(r.stop - 1)
|
||||
buf.push_uint_var(delay)
|
||||
buf.push_uint_var(index)
|
||||
buf.push_uint_var(r.stop - 1 - r.start)
|
||||
start = r.start
|
||||
while index > 0:
|
||||
index -= 1
|
||||
r = rangeset[index]
|
||||
buf.push_uint_var(start - r.stop - 1)
|
||||
buf.push_uint_var(r.stop - r.start - 1)
|
||||
start = r.start
|
||||
return ranges
|
||||
@ -0,0 +1,384 @@
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
|
||||
|
||||
from ..buffer import Buffer, size_uint_var
|
||||
from ..tls import Epoch
|
||||
from .crypto import CryptoPair
|
||||
from .logger import QuicLoggerTrace
|
||||
from .packet import (
|
||||
NON_ACK_ELICITING_FRAME_TYPES,
|
||||
NON_IN_FLIGHT_FRAME_TYPES,
|
||||
PACKET_FIXED_BIT,
|
||||
PACKET_NUMBER_MAX_SIZE,
|
||||
QuicFrameType,
|
||||
QuicPacketType,
|
||||
encode_long_header_first_byte,
|
||||
)
|
||||
|
||||
PACKET_LENGTH_SEND_SIZE = 2
|
||||
PACKET_NUMBER_SEND_SIZE = 2
|
||||
|
||||
|
||||
QuicDeliveryHandler = Callable[..., None]
|
||||
|
||||
|
||||
class QuicDeliveryState(Enum):
|
||||
ACKED = 0
|
||||
LOST = 1
|
||||
|
||||
|
||||
@dataclass
|
||||
class QuicSentPacket:
|
||||
epoch: Epoch
|
||||
in_flight: bool
|
||||
is_ack_eliciting: bool
|
||||
is_crypto_packet: bool
|
||||
packet_number: int
|
||||
packet_type: QuicPacketType
|
||||
sent_time: Optional[float] = None
|
||||
sent_bytes: int = 0
|
||||
|
||||
delivery_handlers: List[Tuple[QuicDeliveryHandler, Any]] = field(
|
||||
default_factory=list
|
||||
)
|
||||
quic_logger_frames: List[Dict] = field(default_factory=list)
|
||||
|
||||
|
||||
class QuicPacketBuilderStop(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class QuicPacketBuilder:
|
||||
"""
|
||||
Helper for building QUIC packets.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
host_cid: bytes,
|
||||
peer_cid: bytes,
|
||||
version: int,
|
||||
is_client: bool,
|
||||
max_datagram_size: int,
|
||||
packet_number: int = 0,
|
||||
peer_token: bytes = b"",
|
||||
quic_logger: Optional[QuicLoggerTrace] = None,
|
||||
spin_bit: bool = False,
|
||||
):
|
||||
self.max_flight_bytes: Optional[int] = None
|
||||
self.max_total_bytes: Optional[int] = None
|
||||
self.quic_logger_frames: Optional[List[Dict]] = None
|
||||
|
||||
self._host_cid = host_cid
|
||||
self._is_client = is_client
|
||||
self._peer_cid = peer_cid
|
||||
self._peer_token = peer_token
|
||||
self._quic_logger = quic_logger
|
||||
self._spin_bit = spin_bit
|
||||
self._version = version
|
||||
|
||||
# assembled datagrams and packets
|
||||
self._datagrams: List[bytes] = []
|
||||
self._datagram_flight_bytes = 0
|
||||
self._datagram_init = True
|
||||
self._datagram_needs_padding = False
|
||||
self._packets: List[QuicSentPacket] = []
|
||||
self._flight_bytes = 0
|
||||
self._total_bytes = 0
|
||||
|
||||
# current packet
|
||||
self._header_size = 0
|
||||
self._packet: Optional[QuicSentPacket] = None
|
||||
self._packet_crypto: Optional[CryptoPair] = None
|
||||
self._packet_number = packet_number
|
||||
self._packet_start = 0
|
||||
self._packet_type: Optional[QuicPacketType] = None
|
||||
|
||||
self._buffer = Buffer(max_datagram_size)
|
||||
self._buffer_capacity = max_datagram_size
|
||||
self._flight_capacity = max_datagram_size
|
||||
|
||||
@property
|
||||
def packet_is_empty(self) -> bool:
|
||||
"""
|
||||
Returns `True` if the current packet is empty.
|
||||
"""
|
||||
assert self._packet is not None
|
||||
packet_size = self._buffer.tell() - self._packet_start
|
||||
return packet_size <= self._header_size
|
||||
|
||||
@property
|
||||
def packet_number(self) -> int:
|
||||
"""
|
||||
Returns the packet number for the next packet.
|
||||
"""
|
||||
return self._packet_number
|
||||
|
||||
@property
|
||||
def remaining_buffer_space(self) -> int:
|
||||
"""
|
||||
Returns the remaining number of bytes which can be used in
|
||||
the current packet.
|
||||
"""
|
||||
return (
|
||||
self._buffer_capacity
|
||||
- self._buffer.tell()
|
||||
- self._packet_crypto.aead_tag_size
|
||||
)
|
||||
|
||||
@property
|
||||
def remaining_flight_space(self) -> int:
|
||||
"""
|
||||
Returns the remaining number of bytes which can be used in
|
||||
the current packet.
|
||||
"""
|
||||
return (
|
||||
self._flight_capacity
|
||||
- self._buffer.tell()
|
||||
- self._packet_crypto.aead_tag_size
|
||||
)
|
||||
|
||||
def flush(self) -> Tuple[List[bytes], List[QuicSentPacket]]:
|
||||
"""
|
||||
Returns the assembled datagrams.
|
||||
"""
|
||||
if self._packet is not None:
|
||||
self._end_packet()
|
||||
self._flush_current_datagram()
|
||||
|
||||
datagrams = self._datagrams
|
||||
packets = self._packets
|
||||
self._datagrams = []
|
||||
self._packets = []
|
||||
return datagrams, packets
|
||||
|
||||
def start_frame(
|
||||
self,
|
||||
frame_type: int,
|
||||
capacity: int = 1,
|
||||
handler: Optional[QuicDeliveryHandler] = None,
|
||||
handler_args: Sequence[Any] = [],
|
||||
) -> Buffer:
|
||||
"""
|
||||
Starts a new frame.
|
||||
"""
|
||||
if self.remaining_buffer_space < capacity or (
|
||||
frame_type not in NON_IN_FLIGHT_FRAME_TYPES
|
||||
and self.remaining_flight_space < capacity
|
||||
):
|
||||
raise QuicPacketBuilderStop
|
||||
|
||||
self._buffer.push_uint_var(frame_type)
|
||||
if frame_type not in NON_ACK_ELICITING_FRAME_TYPES:
|
||||
self._packet.is_ack_eliciting = True
|
||||
if frame_type not in NON_IN_FLIGHT_FRAME_TYPES:
|
||||
self._packet.in_flight = True
|
||||
if frame_type == QuicFrameType.CRYPTO:
|
||||
self._packet.is_crypto_packet = True
|
||||
if handler is not None:
|
||||
self._packet.delivery_handlers.append((handler, handler_args))
|
||||
return self._buffer
|
||||
|
||||
def start_packet(self, packet_type: QuicPacketType, crypto: CryptoPair) -> None:
|
||||
"""
|
||||
Starts a new packet.
|
||||
"""
|
||||
assert packet_type in (
|
||||
QuicPacketType.INITIAL,
|
||||
QuicPacketType.HANDSHAKE,
|
||||
QuicPacketType.ZERO_RTT,
|
||||
QuicPacketType.ONE_RTT,
|
||||
), "Invalid packet type"
|
||||
buf = self._buffer
|
||||
|
||||
# finish previous datagram
|
||||
if self._packet is not None:
|
||||
self._end_packet()
|
||||
|
||||
# if there is too little space remaining, start a new datagram
|
||||
# FIXME: the limit is arbitrary!
|
||||
packet_start = buf.tell()
|
||||
if self._buffer_capacity - packet_start < 128:
|
||||
self._flush_current_datagram()
|
||||
packet_start = 0
|
||||
|
||||
# initialize datagram if needed
|
||||
if self._datagram_init:
|
||||
if self.max_total_bytes is not None:
|
||||
remaining_total_bytes = self.max_total_bytes - self._total_bytes
|
||||
if remaining_total_bytes < self._buffer_capacity:
|
||||
self._buffer_capacity = remaining_total_bytes
|
||||
|
||||
self._flight_capacity = self._buffer_capacity
|
||||
if self.max_flight_bytes is not None:
|
||||
remaining_flight_bytes = self.max_flight_bytes - self._flight_bytes
|
||||
if remaining_flight_bytes < self._flight_capacity:
|
||||
self._flight_capacity = remaining_flight_bytes
|
||||
self._datagram_flight_bytes = 0
|
||||
self._datagram_init = False
|
||||
self._datagram_needs_padding = False
|
||||
|
||||
# calculate header size
|
||||
if packet_type != QuicPacketType.ONE_RTT:
|
||||
header_size = 11 + len(self._peer_cid) + len(self._host_cid)
|
||||
if packet_type == QuicPacketType.INITIAL:
|
||||
token_length = len(self._peer_token)
|
||||
header_size += size_uint_var(token_length) + token_length
|
||||
else:
|
||||
header_size = 3 + len(self._peer_cid)
|
||||
|
||||
# check we have enough space
|
||||
if packet_start + header_size >= self._buffer_capacity:
|
||||
raise QuicPacketBuilderStop
|
||||
|
||||
# determine ack epoch
|
||||
if packet_type == QuicPacketType.INITIAL:
|
||||
epoch = Epoch.INITIAL
|
||||
elif packet_type == QuicPacketType.HANDSHAKE:
|
||||
epoch = Epoch.HANDSHAKE
|
||||
else:
|
||||
epoch = Epoch.ONE_RTT
|
||||
|
||||
self._header_size = header_size
|
||||
self._packet = QuicSentPacket(
|
||||
epoch=epoch,
|
||||
in_flight=False,
|
||||
is_ack_eliciting=False,
|
||||
is_crypto_packet=False,
|
||||
packet_number=self._packet_number,
|
||||
packet_type=packet_type,
|
||||
)
|
||||
self._packet_crypto = crypto
|
||||
self._packet_start = packet_start
|
||||
self._packet_type = packet_type
|
||||
self.quic_logger_frames = self._packet.quic_logger_frames
|
||||
|
||||
buf.seek(self._packet_start + self._header_size)
|
||||
|
||||
def _end_packet(self) -> None:
|
||||
"""
|
||||
Ends the current packet.
|
||||
"""
|
||||
buf = self._buffer
|
||||
packet_size = buf.tell() - self._packet_start
|
||||
if packet_size > self._header_size:
|
||||
# padding to ensure sufficient sample size
|
||||
padding_size = (
|
||||
PACKET_NUMBER_MAX_SIZE
|
||||
- PACKET_NUMBER_SEND_SIZE
|
||||
+ self._header_size
|
||||
- packet_size
|
||||
)
|
||||
|
||||
# Padding for datagrams containing initial packets; see RFC 9000
|
||||
# section 14.1.
|
||||
if (
|
||||
self._is_client or self._packet.is_ack_eliciting
|
||||
) and self._packet_type == QuicPacketType.INITIAL:
|
||||
self._datagram_needs_padding = True
|
||||
|
||||
# For datagrams containing 1-RTT data, we *must* apply the padding
|
||||
# inside the packet, we cannot tack bytes onto the end of the
|
||||
# datagram.
|
||||
if (
|
||||
self._datagram_needs_padding
|
||||
and self._packet_type == QuicPacketType.ONE_RTT
|
||||
):
|
||||
if self.remaining_flight_space > padding_size:
|
||||
padding_size = self.remaining_flight_space
|
||||
self._datagram_needs_padding = False
|
||||
|
||||
# write padding
|
||||
if padding_size > 0:
|
||||
buf.push_bytes(bytes(padding_size))
|
||||
packet_size += padding_size
|
||||
self._packet.in_flight = True
|
||||
|
||||
# log frame
|
||||
if self._quic_logger is not None:
|
||||
self._packet.quic_logger_frames.append(
|
||||
self._quic_logger.encode_padding_frame()
|
||||
)
|
||||
|
||||
# write header
|
||||
if self._packet_type != QuicPacketType.ONE_RTT:
|
||||
length = (
|
||||
packet_size
|
||||
- self._header_size
|
||||
+ PACKET_NUMBER_SEND_SIZE
|
||||
+ self._packet_crypto.aead_tag_size
|
||||
)
|
||||
|
||||
buf.seek(self._packet_start)
|
||||
buf.push_uint8(
|
||||
encode_long_header_first_byte(
|
||||
self._version, self._packet_type, PACKET_NUMBER_SEND_SIZE - 1
|
||||
)
|
||||
)
|
||||
buf.push_uint32(self._version)
|
||||
buf.push_uint8(len(self._peer_cid))
|
||||
buf.push_bytes(self._peer_cid)
|
||||
buf.push_uint8(len(self._host_cid))
|
||||
buf.push_bytes(self._host_cid)
|
||||
if self._packet_type == QuicPacketType.INITIAL:
|
||||
buf.push_uint_var(len(self._peer_token))
|
||||
buf.push_bytes(self._peer_token)
|
||||
buf.push_uint16(length | 0x4000)
|
||||
buf.push_uint16(self._packet_number & 0xFFFF)
|
||||
else:
|
||||
buf.seek(self._packet_start)
|
||||
buf.push_uint8(
|
||||
PACKET_FIXED_BIT
|
||||
| (self._spin_bit << 5)
|
||||
| (self._packet_crypto.key_phase << 2)
|
||||
| (PACKET_NUMBER_SEND_SIZE - 1)
|
||||
)
|
||||
buf.push_bytes(self._peer_cid)
|
||||
buf.push_uint16(self._packet_number & 0xFFFF)
|
||||
|
||||
# encrypt in place
|
||||
plain = buf.data_slice(self._packet_start, self._packet_start + packet_size)
|
||||
buf.seek(self._packet_start)
|
||||
buf.push_bytes(
|
||||
self._packet_crypto.encrypt_packet(
|
||||
plain[0 : self._header_size],
|
||||
plain[self._header_size : packet_size],
|
||||
self._packet_number,
|
||||
)
|
||||
)
|
||||
self._packet.sent_bytes = buf.tell() - self._packet_start
|
||||
self._packets.append(self._packet)
|
||||
if self._packet.in_flight:
|
||||
self._datagram_flight_bytes += self._packet.sent_bytes
|
||||
|
||||
# Short header packets cannot be coalesced, we need a new datagram.
|
||||
if self._packet_type == QuicPacketType.ONE_RTT:
|
||||
self._flush_current_datagram()
|
||||
|
||||
self._packet_number += 1
|
||||
else:
|
||||
# "cancel" the packet
|
||||
buf.seek(self._packet_start)
|
||||
|
||||
self._packet = None
|
||||
self.quic_logger_frames = None
|
||||
|
||||
def _flush_current_datagram(self) -> None:
|
||||
datagram_bytes = self._buffer.tell()
|
||||
if datagram_bytes:
|
||||
# Padding for datagrams containing initial packets; see RFC 9000
|
||||
# section 14.1.
|
||||
if self._datagram_needs_padding:
|
||||
extra_bytes = self._flight_capacity - self._buffer.tell()
|
||||
if extra_bytes > 0:
|
||||
self._buffer.push_bytes(bytes(extra_bytes))
|
||||
self._datagram_flight_bytes += extra_bytes
|
||||
datagram_bytes += extra_bytes
|
||||
|
||||
self._datagrams.append(self._buffer.data)
|
||||
self._flight_bytes += self._datagram_flight_bytes
|
||||
self._total_bytes += datagram_bytes
|
||||
self._datagram_init = True
|
||||
self._buffer.seek(0)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user