1
+ #!/usr/bin/env python3
2
+ import requests
3
+ import json
4
+ import os
5
+ import argparse
6
+ from typing import Dict , List , Tuple
7
+ from openai import OpenAI
8
+
9
+ class SecurityHeadersAnalyzer :
10
+ def __init__ (self , api_key : str = None , base_url : str = None , model : str = None ):
11
+ self .api_key = api_key or os .getenv ('OPENROUTER_API_KEY' ) or os .getenv ('OPENAI_API_KEY' )
12
+ self .base_url = base_url or os .getenv ('OPENROUTER_BASE_URL' , 'https://openrouter.ai/api/v1' )
13
+ self .model = model or os .getenv ('LLM_MODEL' , 'deepseek/deepseek-chat-v3.1:free' )
14
+
15
+ if not self .api_key :
16
+ raise ValueError ("API key is required. Set OPENROUTER_API_KEY or provide --api-key" )
17
+
18
+ self .client = OpenAI (base_url = self .base_url , api_key = self .api_key )
19
+
20
+ def fetch_headers (self , url : str , timeout : int = 10 ) -> Tuple [Dict [str , str ], int ]:
21
+ """Fetch HTTP headers from URL"""
22
+ if not url .startswith (('http://' , 'https://' )):
23
+ url = 'https://' + url
24
+
25
+ try :
26
+ response = requests .get (url , timeout = timeout , allow_redirects = True )
27
+ return dict (response .headers ), response .status_code
28
+ except requests .exceptions .RequestException as e :
29
+ print (f"Error fetching { url } : { e } " )
30
+ return {}, 0
31
+
32
+ def analyze_headers (self , url : str , headers : Dict [str , str ], status_code : int ) -> str :
33
+ """Analyze headers using LLM"""
34
+ prompt = f"""Analyze the HTTP security headers for { url } (Status: { status_code } )
35
+
36
+ Headers:
37
+ { json .dumps (headers , indent = 2 )}
38
+
39
+ Provide a comprehensive security analysis including:
40
+ 1. Security score (0-100) and overall assessment
41
+ 2. Critical security issues that need immediate attention
42
+ 3. Missing important security headers
43
+ 4. Analysis of existing security headers and their effectiveness
44
+ 5. Specific recommendations for improvement
45
+ 6. Potential security risks based on current configuration
46
+
47
+ Focus on practical, actionable advice following current web security best practices. Please do not include ** and #
48
+ in the response except for specific references where necessary. use numbers, romans, alphabets instead Format the response well please. """
49
+
50
+ try :
51
+ completion = self .client .chat .completions .create (
52
+ model = self .model ,
53
+ messages = [{"role" : "user" , "content" : prompt }],
54
+ temperature = 0.2
55
+ )
56
+ return completion .choices [0 ].message .content
57
+ except Exception as e :
58
+ return f"Analysis failed: { e } "
59
+
60
+ def analyze_url (self , url : str , timeout : int = 10 ) -> Dict :
61
+ """Analyze a single URL"""
62
+ print (f"\n Analyzing: { url } " )
63
+ print ("-" * 50 )
64
+
65
+ headers , status_code = self .fetch_headers (url , timeout )
66
+ if not headers :
67
+ return {"url" : url , "error" : "Failed to fetch headers" }
68
+
69
+ print (f"Status Code: { status_code } " )
70
+ print (f"\n HTTP Headers ({ len (headers )} found):" )
71
+ print ("-" * 30 )
72
+ for key , value in headers .items ():
73
+ print (f"{ key } : { value } " )
74
+
75
+ print (f"\n Analyzing with AI..." )
76
+ analysis = self .analyze_headers (url , headers , status_code )
77
+
78
+ print ("\n SECURITY ANALYSIS" )
79
+ print ("=" * 50 )
80
+ print (analysis )
81
+
82
+ return {
83
+ "url" : url ,
84
+ "status_code" : status_code ,
85
+ "headers_count" : len (headers ),
86
+ "analysis" : analysis ,
87
+ "raw_headers" : headers
88
+ }
89
+
90
+ def analyze_multiple_urls (self , urls : List [str ], timeout : int = 10 ) -> List [Dict ]:
91
+ """Analyze multiple URLs"""
92
+ results = []
93
+ for i , url in enumerate (urls , 1 ):
94
+ print (f"\n [{ i } /{ len (urls )} ]" )
95
+ result = self .analyze_url (url , timeout )
96
+ results .append (result )
97
+ return results
98
+
99
+ def export_results (self , results : List [Dict ], filename : str ):
100
+ """Export results to JSON"""
101
+ with open (filename , 'w' ) as f :
102
+ json .dump (results , f , indent = 2 , ensure_ascii = False )
103
+ print (f"\n Results exported to: { filename } " )
104
+
105
+ def main ():
106
+ parser = argparse .ArgumentParser (
107
+ description = 'Analyze HTTP security headers using AI' ,
108
+ formatter_class = argparse .RawDescriptionHelpFormatter ,
109
+ epilog = '''Examples:
110
+ python security_headers.py https://example.com
111
+ python security_headers.py example.com google.com
112
+ python security_headers.py example.com --export results.json
113
+
114
+ Environment Variables:
115
+ OPENROUTER_API_KEY - API key for OpenRouter
116
+ OPENAI_API_KEY - API key for OpenAI
117
+ LLM_MODEL - Model to use (default: deepseek/deepseek-chat-v3.1:free)'''
118
+ )
119
+
120
+ parser .add_argument ('urls' , nargs = '+' , help = 'URLs to analyze' )
121
+ parser .add_argument ('--api-key' , help = 'API key for LLM service' )
122
+ parser .add_argument ('--base-url' , help = 'Base URL for LLM API' )
123
+ parser .add_argument ('--model' , help = 'LLM model to use' )
124
+ parser .add_argument ('--timeout' , type = int , default = 10 , help = 'Request timeout (default: 10s)' )
125
+ parser .add_argument ('--export' , help = 'Export results to JSON file' )
126
+
127
+ args = parser .parse_args ()
128
+
129
+ try :
130
+ analyzer = SecurityHeadersAnalyzer (
131
+ api_key = args .api_key ,
132
+ base_url = args .base_url ,
133
+ model = args .model
134
+ )
135
+
136
+ results = analyzer .analyze_multiple_urls (args .urls , args .timeout )
137
+
138
+ if args .export :
139
+ analyzer .export_results (results , args .export )
140
+
141
+ except ValueError as e :
142
+ print (f"Error: { e } " )
143
+ return 1
144
+ except KeyboardInterrupt :
145
+ print ("\n Analysis interrupted by user" )
146
+ return 1
147
+
148
+ if __name__ == '__main__' :
149
+ main ()
0 commit comments