diff --git a/day-01/system_health.py b/day-01/system_health.py new file mode 100644 index 00000000..d51f2975 --- /dev/null +++ b/day-01/system_health.py @@ -0,0 +1,15 @@ +import psutil + +def check_system_health(): + cpu_threshodld = input("Enter CPU usage threshold (in percentage): ") + print("current cpu threshold is : ", cpu_threshodld) + + current_cpu = psutil.cpu_percent(interval=1) + print("Current CPU %: ", current_cpu) + + if current_cpu > int(cpu_threshodld): + print("CPU usage is above the threshold! sending email aert....") + else: + print("cpu is in normal state") + +check_system_health() \ No newline at end of file diff --git a/day-02/mypractice/api.py b/day-02/mypractice/api.py new file mode 100644 index 00000000..1dc49a42 --- /dev/null +++ b/day-02/mypractice/api.py @@ -0,0 +1,2 @@ +# here we will learn how to use API in python + \ No newline at end of file diff --git a/day-02/mypractice/dict_ex.py b/day-02/mypractice/dict_ex.py new file mode 100644 index 00000000..2933d966 --- /dev/null +++ b/day-02/mypractice/dict_ex.py @@ -0,0 +1,19 @@ +info = { + "name": "Rahul kumar", + "age" : 24, + "city": "Bangalore", + "package" : 3.5, + "married" : False, + "favourites" : ["python", "linux", "devops"] + +} + + +print(info["city"]) +print("I am married: ", info["married"]) + +info.update({"village": "alwalpur"}) +print(info) + +for items, value in info.items(): + print(items,value) \ No newline at end of file diff --git a/day-02/mypractice/jokes_api.py b/day-02/mypractice/jokes_api.py new file mode 100644 index 00000000..4d9e1a1c --- /dev/null +++ b/day-02/mypractice/jokes_api.py @@ -0,0 +1,14 @@ +import requests + +url = "https://official-joke-api.appspot.com/random_joke" + +# this code is for practice purpose only to import API request and get the jokes from the API and print the setup and punchline of the joke. + +def get_jokes(): + + response = requests.get(url=url) + for item,value in response.json().items(): + if item == "setup" or item == "punchline": + print(item,value) + +get_jokes() \ No newline at end of file diff --git a/day-02/mypractice/lists_ex.py b/day-02/mypractice/lists_ex.py new file mode 100644 index 00000000..65fa9c8c --- /dev/null +++ b/day-02/mypractice/lists_ex.py @@ -0,0 +1,26 @@ +a=[100,200,3.1,True] + +print(type(a)) +print(a) +print(a[0]) +print(a[1]) +print(a[2]) +print(a[3]) + + +a = [100, 200, 3.1, True,"Rahul"] +a.append(500) +for item in a: + print(item) + + + +clouds=list() +clouds.append("aws") +clouds.append("azure") + +#for item in clouds: +print(clouds) +print("length of cloud list:", len(clouds)) +print(dir(clouds)) +print(clouds.count.__doc__) \ No newline at end of file diff --git a/day-02/mypractice/stock_market_api.py b/day-02/mypractice/stock_market_api.py new file mode 100644 index 00000000..0cbc8a2c --- /dev/null +++ b/day-02/mypractice/stock_market_api.py @@ -0,0 +1,33 @@ +import requests + +API_KEY = "DAZR72VR6C6AVTHW" + +API_URL = "https://www.alphavantage.co/" + +#symbol = "IBM" + +def get_stock_market_data(symbol,is_time_series): + + + + query = f"query?function=TIME_SERIES_DAILY&symbol={symbol}&apikey={API_KEY}" + + #print(API_URL+query) + + response = requests.get(url=API_URL+query) + for item,value in response.json().items(): + if is_time_series == "yes": + + print(item,value) + + + else : + if item == "Meta Data": + print(item,value) + +symbol = input("Enter the stock symbol(IBM,GOGL,AMZN): ") +is_time_series = input("Do you want to see the time series data? (yes/no): ") + + +get_stock_market_data(symbol,is_time_series) + \ No newline at end of file diff --git a/day-02/practice/stock_market_api.py b/day-02/practice/stock_market_api.py index ac2ed527..3266a381 100644 --- a/day-02/practice/stock_market_api.py +++ b/day-02/practice/stock_market_api.py @@ -14,7 +14,7 @@ def get_stock_market_data(symbol,is_timeseries): for key, value in response.json().items(): if is_timeseries: - print(key,value) + print(key,value) else: if key == "Time Series (Daily)": continue diff --git a/day-03/solution.py b/day-03/solution.py new file mode 100644 index 00000000..4f8a635c --- /dev/null +++ b/day-03/solution.py @@ -0,0 +1,75 @@ +import requests + +API_KEY = "DAZR72VR6C6AVTHW" +API_URL = "https://www.alphavantage.co/" + +def get_stock_market_data(symbol, is_time_series): + try: + query = f"query?function=TIME_SERIES_DAILY&symbol={symbol}&apikey={API_KEY}" + response = requests.get(url=API_URL + query, timeout=10) + response.raise_for_status() + + except requests.exceptions.ConnectionError: + print("❌ No internet connection. Please check your network.") + return + except requests.exceptions.Timeout: + print("❌ Request timed out. The server took too long to respond.") + return + except requests.exceptions.HTTPError as e: + print(f"❌ HTTP error occurred: {e}") + return + + try: + data = response.json() + + except requests.exceptions.JSONDecodeError: + print("❌ Failed to parse response. The API may have returned invalid data.") + return + + try: + # ✅ Check FIRST before doing anything else + if "Error Message" in data: + print(f"❌ Invalid stock symbol '{symbol}'. Please enter a valid symbol like IBM, GOOGL, AMZN.") + return + + if "Note" in data: + print(f"⚠️ API Limit Reached: {data['Note']}") + return + + if "Meta Data" not in data: + print("❌ Unexpected response from API. No data found.") + return + + # ✅ Only reaches here if symbol is valid + print(f"\n✅ Fetching data for: {symbol}") + + for item, value in data.items(): + if is_time_series == "yes": + print(f"\n{item}:") + print(value) + else: + if item == "Meta Data": + print(f"\n{item}:") + print(value) + + except KeyError as e: + print(f"❌ Unexpected data format. Missing key: {e}") + except Exception as e: + print(f"❌ An unexpected error occurred: {e}") + + +# --- Main Program --- +try: + symbol = input("Enter the stock symbol (IBM, GOOGL, AMZN): ").strip().upper() + if not symbol: + raise ValueError("Stock symbol cannot be empty.") + + is_time_series = input("Do you want to see the time series data? (yes/no): ").strip().lower() + if is_time_series not in ("yes", "no"): + raise ValueError("Please enter only 'yes' or 'no'.") + +except ValueError as e: + print(f"❌ Invalid input: {e}") + +else: + get_stock_market_data(symbol, is_time_series) \ No newline at end of file diff --git a/day-04/log_analyzer.py b/day-04/log_analyzer.py new file mode 100644 index 00000000..b15ada36 --- /dev/null +++ b/day-04/log_analyzer.py @@ -0,0 +1,17 @@ + +import json + +with open("app.log", "r") as log_file: + content = log_file.read() + + counts = { + "INFO": content.count("INFO"), + "WARNING": content.count("WARNING"), + "ERROR": content.count("ERROR"), + } + + print(counts) + + + with open("output.json", "w") as file: + json.dump(counts, file) diff --git a/day-04/output.json b/day-04/output.json new file mode 100644 index 00000000..4bcc4030 --- /dev/null +++ b/day-04/output.json @@ -0,0 +1 @@ +{"INFO": 10, "WARNING": 2, "ERROR": 3} \ No newline at end of file diff --git a/day-05/mysolution.py b/day-05/mysolution.py new file mode 100644 index 00000000..e8ea0fdb --- /dev/null +++ b/day-05/mysolution.py @@ -0,0 +1,43 @@ +import json + + +class LogAnalyzer: + + + def __init__(self, log_path, output_path): + + self.log_path = log_path + self.output_path = output_path + self.counts = {} # An empty dictionary to hold our results later + + + def analyze(self): + + with open(self.log_path, "r") as log_file: + content = log_file.read() + + + self.counts = { + "INFO": content.count("INFO"), + "WARNING": content.count("WARNING"), + "ERROR": content.count("ERROR"), + } + + def show_results(self): + + print(self.counts) + + + def save_to_file(self): + + with open(self.output_path, "w") as file: + json.dump(self.counts, file) + + + +my_analyzer = LogAnalyzer("app.log", "output.json") + + +my_analyzer.analyze() +my_analyzer.show_results() +my_analyzer.save_to_file() \ No newline at end of file diff --git a/day-05/output.json b/day-05/output.json new file mode 100644 index 00000000..4bcc4030 --- /dev/null +++ b/day-05/output.json @@ -0,0 +1 @@ +{"INFO": 10, "WARNING": 2, "ERROR": 3} \ No newline at end of file diff --git a/day-06/app.log b/day-06/app.log new file mode 100644 index 00000000..056314af --- /dev/null +++ b/day-06/app.log @@ -0,0 +1,21 @@ +2025-01-10 09:00:01 INFO Application started successfully +2025-01-10 09:00:05 INFO Connecting to database +2025-01-10 09:00:07 INFO Database connection established + +2025-01-10 09:05:12 WARNING High memory usage detected +2025-01-10 09:05:15 INFO Memory usage back to normal + +2025-01-10 09:10:22 ERROR Failed to fetch user data +2025-01-10 09:10:25 ERROR Database timeout occurred + +2025-01-10 09:15:30 INFO Retrying database connection +2025-01-10 09:15:32 INFO Database connection successful + +2025-01-10 09:20:45 WARNING Disk usage above 75% +2025-01-10 09:20:50 INFO Disk cleanup initiated + +2025-01-10 09:25:10 ERROR Unable to write logs to disk +2025-01-10 09:25:15 INFO Log rotation completed + +2025-01-10 09:30:00 INFO Application shutdown initiated +2025-01-10 09:30:05 INFO Application stopped diff --git a/day-06/solution.py b/day-06/solution.py new file mode 100644 index 00000000..b14c1b87 --- /dev/null +++ b/day-06/solution.py @@ -0,0 +1,72 @@ +import json +import argparse + + +class LogAnalyzer: + + + def __init__(self, log_path, output_path=None, level=None): + self.log_path = log_path + self.output_path = output_path + self.level = level + self.counts = {} + + def analyze(self): + """Reads the log file, handles missing files, and counts occurrences.""" + try: + with open(self.log_path, "r") as log_file: + content = log_file.read() + + + if self.level: + self.counts = {self.level: content.count(self.level)} + else: + + self.counts = { + "INFO": content.count("INFO"), + "WARNING": content.count("WARNING"), + "ERROR": content.count("ERROR"), + } + return True + + except FileNotFoundError: + + print(f"❌ Error: The file '{self.log_path}' was not found. Please check the path.") + return False + + def show_results(self): + """Prints the counts to the terminal.""" + print("\n📊 Log Analysis Summary:") + for key, value in self.counts.items(): + print(f" - {key}: {value}") + + def save_to_file(self): + """Saves the dictionary to a JSON file if an output path was provided.""" + if self.output_path: + with open(self.output_path, "w") as file: + json.dump(self.counts, file, indent=4) + print(f"\n✅ Results successfully saved to: {self.output_path}") + + +if __name__ == "__main__": + + + parser = argparse.ArgumentParser(description="A DevOps CLI tool to analyze server logs.") + + + parser.add_argument("--file", required=True, help="Path to the input log file (e.g., app.log)") + parser.add_argument("--out", required=False, help="Path to save the JSON output (e.g., summary.json)") + parser.add_argument("--level", required=False, choices=["INFO", "WARNING", "ERROR"], help="Filter by a specific log level") + + + args = parser.parse_args() + + my_analyzer = LogAnalyzer(log_path=args.file, output_path=args.out, level=args.level) + + success = my_analyzer.analyze() + + if success: + my_analyzer.show_results() + + if args.out: + my_analyzer.save_to_file() \ No newline at end of file diff --git a/day-06/summary.json b/day-06/summary.json new file mode 100644 index 00000000..018a055f --- /dev/null +++ b/day-06/summary.json @@ -0,0 +1,3 @@ +{ + "INFO": 10 +} \ No newline at end of file diff --git a/day-07/solution.md b/day-07/solution.md new file mode 100644 index 00000000..320db0de --- /dev/null +++ b/day-07/solution.md @@ -0,0 +1,60 @@ +I have picked the code from day -06 where we have modify the code from day -05 + +1)Understanding what problem that script is solving + +In my previous script everything was fine but we were assigned our specific file that we have toc heck the logs from that one log file only. What if we have new log files and we have to check the logs for that then we have to manually edit the code for analyzing different log file + + +2)What is the problem? + +we were not able to automate the code for any log file because it was name specific code for app.log + +3)What input does it need? + +Here we are using the concept of argument where we are passing the argument making the script more functional + +4)What output should it give? + +It will analyze the file which we pass in argument + +________________________________________________________________________________________________________________________________________________________________________________________ + + +🎯 1. What problem am I solving? + +Here I am automating the process of reading log files instead of manually counting how many are INFO,WARNING,and ERROR messages . My scripts can do same thing in milliseconds even with passing arguments as file name + + +📥 2. What input does my script need? + +It needs command-line arguments (flags) passed directly through the terminal via argparse. + +Required Input: The path to the log file you want to read (e.g., --file app.log). + +Optional Input 1: A specific log level to filter by (e.g., --level ERROR). + +Optional Input 2: The name of a file to save the results into (e.g., --out summary.json). + +📤 3. What output should my script give? + +script provides two types of output depending on what the user asks for: + +Terminal Output (Always): A clean, easy-to-read summary printed directly to the console showing the count of each log level. + +File Output (Conditional): If the user provided the --out flag, it generates a physical .json file on the hard drive containing the dictionary of counts. + +Validation Output: If the user types a file name that doesn't exist, it outputs a friendly error message instead of crashing the program. + +⚙️ 4. What are the main steps? + +When you press "Enter" in the terminal, your code executes these exact steps in order: + +Catch the Flags (argparse): reads the terminal command, validates that the inputs are allowed, and pass them into the args object. + +Build the Machine (Initialization): The script creates the my_analyzer object from your LogAnalyzer class and feeds it the file paths from argparse. + +Read and Count (analyze): The machine safely opens the text file, scans the content, counts the requested log levels, and stores the numbers in its internal self.counts memory. + +Display (show_results): The machine prints its internal memory to the screen. + +Export (save_to_file): If an --out destination was provided, the machine formats its internal memory into JSON and saves it to the disk.